diff --git a/.github/workflows/lineendings.yml b/.github/workflows/lineendings.yml
index d73ed4829..6f0ebbc51 100644
--- a/.github/workflows/lineendings.yml
+++ b/.github/workflows/lineendings.yml
@@ -1,15 +1,18 @@
-name: 'Check CAP Line Endings'
+name: "Check CAP Line Endings"
on:
push:
branches:
- - master
+ - master
pull_request:
jobs:
lineendings:
runs-on: ubuntu-latest
steps:
- - run: sudo apt-get update && sudo apt-get install dos2unix
- - uses: actions/checkout@v5
- - run: (ls core/cap-*.md | xargs -I '{}' sh -c 'cat {} | dos2unix | cmp - {}') || (echo 'Documents listed above contain CRLF line endings, and should be LF.'; exit 1)
+ - run: sudo apt-get update && sudo apt-get install dos2unix
+ - uses: actions/checkout@v5
+ - run:
+ (ls core/cap-*.md | xargs -I '{}' sh -c 'cat {} | dos2unix | cmp -
+ {}') || (echo 'Documents listed above contain CRLF line endings, and
+ should be LF.'; exit 1)
diff --git a/.github/workflows/mddiffcheck.yml b/.github/workflows/mddiffcheck.yml
index 3cce558f0..d40d05ca2 100644
--- a/.github/workflows/mddiffcheck.yml
+++ b/.github/workflows/mddiffcheck.yml
@@ -1,23 +1,21 @@
-name: 'Check CAP Diffs'
+name: "Check CAP Diffs"
on:
push:
branches:
- - master
+ - master
pull_request:
jobs:
mddiffcheck:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v5
- - name: Set up Go
- uses: actions/setup-go@v6
- with:
- go-version: ^1.24
- - run: go install github.com/stellar/mddiffcheck@v1
- - run: >
- mddiffcheck
- -repo
- 'https://github.com/stellar/stellar-core https://github.com/stellar/stellar-xdr'
- core/*.md
+ - uses: actions/checkout@v5
+ - name: Set up Go
+ uses: actions/setup-go@v6
+ with:
+ go-version: ^1.24
+ - run: go install github.com/stellar/mddiffcheck@v1
+ - run: >
+ mddiffcheck -repo 'https://github.com/stellar/stellar-core
+ https://github.com/stellar/stellar-xdr' core/*.md
diff --git a/.github/workflows/prettier.yml b/.github/workflows/prettier.yml
index 0617d0ad0..56183950d 100644
--- a/.github/workflows/prettier.yml
+++ b/.github/workflows/prettier.yml
@@ -1,4 +1,4 @@
-name: 'Prettier for SEP files'
+name: "Prettier for SEP files"
on:
pull_request:
@@ -13,6 +13,5 @@ jobs:
with:
node-version: 24
- run: corepack enable
- - run: corepack prepare yarn@stable --activate
- run: yarn install --frozen-lockfile
- run: yarn sep-check
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
index ce741ca93..25eab05a3 100644
--- a/.github/workflows/stale.yml
+++ b/.github/workflows/stale.yml
@@ -1,8 +1,8 @@
-name: 'Stale Issues'
+name: "Stale Issues"
on:
schedule:
- - cron: '0 18 * * *' # approx 9:30am daily
+ - cron: "0 18 * * *" # approx 9:30am daily
jobs:
stale:
@@ -14,10 +14,16 @@ jobs:
debug-only: false
days-before-stale: 30
days-before-close: 30
- stale-issue-message: 'This issue is stale because it has been open for 30 days with no activity. It will be closed in 30 days unless the stale label is removed.'
- stale-pr-message: 'This pull request is stale because it has been open for 30 days with no activity. It will be closed in 30 days unless the stale label is removed.'
+ stale-issue-message:
+ "This issue is stale because it has been open for 30 days with no
+ activity. It will be closed in 30 days unless the stale label is
+ removed."
+ stale-pr-message:
+ "This pull request is stale because it has been open for 30 days
+ with no activity. It will be closed in 30 days unless the stale
+ label is removed."
stale-issue-label: stale
stale-pr-label: stale
- exempt-issue-labels: 'needs draft,needs pilot,accepted,blocked'
- exempt-pr-labels: 'needs draft,needs pilot,accepted,blocked'
+ exempt-issue-labels: "needs draft,needs pilot,accepted,blocked"
+ exempt-pr-labels: "needs draft,needs pilot,accepted,blocked"
remove-stale-when-updated: true
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 82e57350b..eced0b753 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -2,22 +2,26 @@
👍🎉 First off, thanks for taking the time to contribute! 🎉👍
-Check out the [Stellar Contribution Guide](https://github.com/stellar/.github/blob/master/CONTRIBUTING.md) for details
-on contributing to stellar-core and Stellar's other repositories, especially with regard to our code of conduct and
-contributor license agreement.
+Check out the
+[Stellar Contribution Guide](https://github.com/stellar/.github/blob/master/CONTRIBUTING.md)
+for details on contributing to stellar-core and Stellar's other repositories,
+especially with regard to our code of conduct and contributor license
+agreement.
# CAP Contribution Process
CAPs deal with changes to the core protocol of the Stellar network.
-Please see the contribution process outlined in [the core folder](core/README.md).
+Please see the contribution process outlined in
+[the core folder](core/README.md).
# SEP Contribution Process
-SEPs deal with changes to the standards, protocols, and methods used in the ecosystem built on top of the Stellar
-network.
+SEPs deal with changes to the standards, protocols, and methods used in the
+ecosystem built on top of the Stellar network.
-Please see the contribution process outlined in [the ecosystem folder](ecosystem/README.md).
+Please see the contribution process outlined in
+[the ecosystem folder](ecosystem/README.md).
-Please run `./bin/prettier.sh` or `yarn sep-prettify` (if your OS doesn't have sh) before submitting
-your PR.
+Please run `./bin/prettier.sh` or `yarn sep-prettify` (if your OS doesn't have
+sh) before submitting your PR.
diff --git a/README.md b/README.md
index ca4047b3d..84ce958ab 100644
--- a/README.md
+++ b/README.md
@@ -9,27 +9,34 @@
-This repository is home to **Core Advancement Proposals** (CAPs) and **Stellar Ecosystem Proposals**
-(SEPs).
+This repository is home to **Core Advancement Proposals** (CAPs) and **Stellar
+Ecosystem Proposals** (SEPs).
-Similar to [BIPs](https://github.com/bitcoin/bips) and [EIPs](https://github.com/ethereum/EIPs),
-CAPs and SEPs are the proposals of standards to improve the Stellar protocol and related client APIs.
+Similar to [BIPs](https://github.com/bitcoin/bips) and
+[EIPs](https://github.com/ethereum/EIPs), CAPs and SEPs are the proposals of
+standards to improve the Stellar protocol and related client APIs.
-CAPs deal with changes to the core protocol of the Stellar network. Please see [the process for CAPs](core/README.md).
+CAPs deal with changes to the core protocol of the Stellar network. Please see
+[the process for CAPs](core/README.md).
-SEPs deal with changes to the standards, protocols, and methods used in the ecosystem built on top
-of the Stellar network. Please see [the process for SEPs](ecosystem/README.md).
+SEPs deal with changes to the standards, protocols, and methods used in the
+ecosystem built on top of the Stellar network. Please see
+[the process for SEPs](ecosystem/README.md).
## Repository structure
The root directory of this repository contains:
-* Templates for creating your own CAP or SEP
-* `contents` directory with `[cap | sep]-xxxx` subdirectories that contain all media/script files for a given CAP or SEP document.
-* core directory which contains accepted CAPs (`cap-xxxx.md` where `xxxx` is a CAP number with leading zeros, ex. `cap-0051.md`)
-* ecosystem directory which contains accepted SEPs (`sep-xxxx.md` where `xxxx` is a SEP number with leading zeros, ex. `sep-0051.md`)
+- Templates for creating your own CAP or SEP
+- `contents` directory with `[cap | sep]-xxxx` subdirectories that contain all
+ media/script files for a given CAP or SEP document.
+- core directory which contains accepted CAPs (`cap-xxxx.md` where `xxxx` is a
+ CAP number with leading zeros, ex. `cap-0051.md`)
+- ecosystem directory which contains accepted SEPs (`sep-xxxx.md` where `xxxx`
+ is a SEP number with leading zeros, ex. `sep-0051.md`)
Example repository structure:
+
```
├── CONTRIBUTING.md
├── README.md
diff --git a/cap-template.md b/cap-template.md
index f0d105b99..18b1a638c 100644
--- a/cap-template.md
+++ b/cap-template.md
@@ -14,8 +14,9 @@ Protocol version: TBD
```
## Simple Summary
-"If you can't explain it simply, you don't understand it well enough." Please provide a simplified
-and layman-accessible explanation of the CAP.
+
+"If you can't explain it simply, you don't understand it well enough." Please
+provide a simplified and layman-accessible explanation of the CAP.
## Working Group
@@ -23,59 +24,79 @@ This section describes the composition of the working group.
### Recommended structure
-The recommended structure of the working group is based on the [RACI](https://en.wikipedia.org/wiki/Responsibility_assignment_matrix#Role_distinction) model.
+The recommended structure of the working group is based on the
+[RACI](https://en.wikipedia.org/wiki/Responsibility_assignment_matrix#Role_distinction)
+model.
The model contains the following roles:
- * Authors - ("Recommender" in RACI) group of people that author the CAP with the owner
- * Owner - ("Accountable" in RACI) the person that owns the CAP. This includes
- * signing off on any changes to the CAP and
- * moving the CAP through the [CAP process](core/README.md)
- * Consulted - list of people that need to be consulted and provide feedback
- * Informed - not explicitely listed, the developer mailing list allows for that.
+
+- Authors - ("Recommender" in RACI) group of people that author the CAP with
+ the owner
+- Owner - ("Accountable" in RACI) the person that owns the CAP. This includes
+ - signing off on any changes to the CAP and
+ - moving the CAP through the [CAP process](core/README.md)
+- Consulted - list of people that need to be consulted and provide feedback
+- Informed - not explicitely listed, the developer mailing list allows for
+ that.
### Example working group composition
#### Semantic protocol changes
Example:
- * adding or modifying operations
- * modifying the behavior of operations
+
+- adding or modifying operations
+- modifying the behavior of operations
The working group must include a representative set of
- * downstream systems developers (Horizon, block explorers, etc)
- * SDK developers (Go, Javascript, etc)
-In some cases, application developers (or somebody representing their interest) can also be involved.
+- downstream systems developers (Horizon, block explorers, etc)
+- SDK developers (Go, Javascript, etc)
-The motivation section should clearly show how the changes will be used end to end.
+In some cases, application developers (or somebody representing their interest)
+can also be involved.
+
+The motivation section should clearly show how the changes will be used end to
+end.
#### Ledger and historical subsystem changes
The working group must include a representative set of
- * downstream systems developers (Horizon, block explorers, etc)
- * node operators
-The motivation section should articulate the positive impact on stakeholders (the "Protocol Upgrade Transition" section can focus on other aspects).
+- downstream systems developers (Horizon, block explorers, etc)
+- node operators
+
+The motivation section should articulate the positive impact on stakeholders
+(the "Protocol Upgrade Transition" section can focus on other aspects).
## Motivation
-You should clearly explain why the existing protocol specification is inadequate to address the
-problem that the CAP solves. In particular, CAP submissions without sufficient motivation may be
-rejected outright.
+
+You should clearly explain why the existing protocol specification is
+inadequate to address the problem that the CAP solves. In particular, CAP
+submissions without sufficient motivation may be rejected outright.
### Goals Alignment
-You should reference the Stellar Network goal(s) that this proposal advances, such as:
-* The Stellar Network should run at scale and at low cost to all participants of the network.
-* The Stellar Network should enable cross-border payments.
+
+You should reference the Stellar Network goal(s) that this proposal advances,
+such as:
+
+- The Stellar Network should run at scale and at low cost to all participants
+ of the network.
+- The Stellar Network should enable cross-border payments.
## Abstract
+
A short (~200 word) description of the technical issue being addressed.
## Specification
-The technical specification should describe the syntax and semantics of any new feature.
+
+The technical specification should describe the syntax and semantics of any new
+feature.
### XDR changes
-This section includes all changes to the XDR (`.x` files), presented as a "diff"
-against the latest version of the protocol (or in some rare exception,
+
+This section includes all changes to the XDR (`.x` files), presented as a
+"diff" against the latest version of the protocol (or in some rare exception,
on top of a different CAP). Diffs should be generated against on the XDR in the
[stellar-core repository].
@@ -83,61 +104,87 @@ To generate diffs, use the `git diff` command.
To apply diffs, use the `git apply --reject --whitespace=fix` command.
-For large changes, it may be beneficial to link to actual XDR files copied
-in the relevant "contents" folder.
+For large changes, it may be beneficial to link to actual XDR files copied in
+the relevant "contents" folder.
#### XDR Example
-If this update to XDR or any logical change to meta will have any effect on Horizon, RPC, etc., include some base64-encoded `LedgerCloseMeta` examples. The examples should reflect the updated XDR structures for downstream testing purposes. The feasibility of generating this XDR will depend on the XDR change itself, but the CAP author should do their best to give downstream consumers enough to test with so they don't need to wait for the Stellar Core implementation before starting testing.
-The `stellar-xdr` CLI can convert between base64 XDR and JSON, so that can be used to modify an existing testnet or mainnet `LedgerCloseMeta` to use as an example. Note that you may need to build or install a version of `stellar-xdr` that contains the changes in this CAP.
+If this update to XDR or any logical change to meta will have any effect on
+Horizon, RPC, etc., include some base64-encoded `LedgerCloseMeta` examples. The
+examples should reflect the updated XDR structures for downstream testing
+purposes. The feasibility of generating this XDR will depend on the XDR change
+itself, but the CAP author should do their best to give downstream consumers
+enough to test with so they don't need to wait for the Stellar Core
+implementation before starting testing.
+
+The `stellar-xdr` CLI can convert between base64 XDR and JSON, so that can be
+used to modify an existing testnet or mainnet `LedgerCloseMeta` to use as an
+example. Note that you may need to build or install a version of `stellar-xdr`
+that contains the changes in this CAP.
-This section should be filled out by the time the XDR is agreed on in a protocol meeting.
+This section should be filled out by the time the XDR is agreed on in a
+protocol meeting.
### Semantics
-This section includes subsections, one for each logical change included in the XDR changes,
-that describes how each new or changed type functions and is used, and for new operations
-a step-by-step description of what happens when the operation is executed.
+
+This section includes subsections, one for each logical change included in the
+XDR changes, that describes how each new or changed type functions and is used,
+and for new operations a step-by-step description of what happens when the
+operation is executed.
## Design Rationale
-The rationale fleshes out the specification by describing what motivated the design and why
-particular design decisions were made. It should describe alternate designs that were considered
-and related work, e.g. how the feature is supported in other protocols. The rationale may also
-provide evidence of consensus within the community, and should discuss important objections or
+
+The rationale fleshes out the specification by describing what motivated the
+design and why particular design decisions were made. It should describe
+alternate designs that were considered and related work, e.g. how the feature
+is supported in other protocols. The rationale may also provide evidence of
+consensus within the community, and should discuss important objections or
concerns raised during discussion.
## Protocol Upgrade Transition
-Typically CAPs have a direct impact on core that should be well understood,
-and indirect impact on other systems in the ecosystem (Horizon, SDKs,
-application, etc).
-The following sections look at common challenges associated with those
-protocol transitions.
+Typically CAPs have a direct impact on core that should be well understood, and
+indirect impact on other systems in the ecosystem (Horizon, SDKs, application,
+etc).
+
+The following sections look at common challenges associated with those protocol
+transitions.
### Backwards Incompatibilities
-All CAPs that introduce backwards incompatibilities must include a section describing these
-incompatibilities and their severity.
-The CAP must propose how to deal with these incompatibilities, potentially pointing to other standard documents that complements the CAP (for example SEPs).
+All CAPs that introduce backwards incompatibilities must include a section
+describing these incompatibilities and their severity.
-CAP submissions with an insufficient discussion of backwards compatibility
-may be rejected outright.
+The CAP must propose how to deal with these incompatibilities, potentially
+pointing to other standard documents that complements the CAP (for example
+SEPs).
+
+CAP submissions with an insufficient discussion of backwards compatibility may
+be rejected outright.
### Resource Utilization
+
Reasonable effort should be made to understand the impact of the CAP on
resource utilization like CPU, memory, network bandwidth and disk/database.
## Security Concerns
-All CAPs should carefully consider areas where security may be a concern, and document them
-accordingly. If a change does not have security implications, briefly explain why.
+
+All CAPs should carefully consider areas where security may be a concern, and
+document them accordingly. If a change does not have security implications,
+briefly explain why.
## Test Cases
-Test cases for an implementation are mandatory for CAPs that are affecting consensus changes. Other
-CAPs can choose to include links to test cases if applicable.
+
+Test cases for an implementation are mandatory for CAPs that are affecting
+consensus changes. Other CAPs can choose to include links to test cases if
+applicable.
## Implementation
-The implementation(s) must be completed before any CAP is given "Final" status, but it need not be
-completed before the CAP is accepted. While there is merit to the approach of reaching consensus on
-the specification and rationale before writing code, the principle of "rough consensus and running
-code" is still useful when it comes to resolving many discussions of API details.
+
+The implementation(s) must be completed before any CAP is given "Final" status,
+but it need not be completed before the CAP is accepted. While there is merit
+to the approach of reaching consensus on the specification and rationale before
+writing code, the principle of "rough consensus and running code" is still
+useful when it comes to resolving many discussions of API details.
[stellar-core repository]: https://github.com/stellar/stellar-core
diff --git a/contents/sep-0042/assetlist.schema.json b/contents/sep-0042/assetlist.schema.json
index bf3224cb8..d6ff88a92 100644
--- a/contents/sep-0042/assetlist.schema.json
+++ b/contents/sep-0042/assetlist.schema.json
@@ -16,7 +16,7 @@
"minLength": 5,
"description": "Short descriptive title of the list"
},
- "network":{
+ "network": {
"type": "string",
"enum": ["public", "testnet"]
},
@@ -106,32 +106,19 @@
"description": "Alerts, messages, or other additional information specified by the provider"
}
},
- "required": [
- "name",
- "org"
- ],
+ "required": ["name", "org"],
"anyOf": [
{
- "required": [
- "contract"
- ]
+ "required": ["contract"]
},
{
- "required": [
- "code",
- "issuer"
- ]
+ "required": ["code", "issuer"]
}
],
"additionalProperties": false
}
}
},
- "required": [
- "name",
- "provider",
- "version",
- "assets"
- ],
+ "required": ["name", "provider", "version", "assets"],
"additionalProperties": false
-}
\ No newline at end of file
+}
diff --git a/core/README.md b/core/README.md
index 17b3e593c..e9066d489 100644
--- a/core/README.md
+++ b/core/README.md
@@ -3,186 +3,209 @@
## CAP Status Terms
### Primary Workflow
-- **Draft** — A CAP that is currently open for consideration and actively being discussed.
-- **Awaiting Decision** — A mature and ready CAP that is ready for final deliberation by the CAP
- Core Team. After a maximum of three meetings, a vote will take place that will set the CAP's
- intended FCP disposition (**FCP: Acceptance/Rejection**) or go back into a **Draft** state.
-- **FCP: [Acceptance/Rejection]** — A CAP that has entered a Final Comment Period (FCP) with an
- intended disposition. After one week has passed, during which any new concerns should be
- addressed, the CAP will head towards its intended disposition [**Acceptance/Rejection**] or go
- back into a Draft state.
-- **Accepted** — A CAP that has been accepted on the merits of its idea pre-implementation, and is
- ready for implementation. It is still possible that the CAP may be rejected post-implementation
- due to the issues that may arise during an initial implementation.
-- **Implemented** - A CAP that has been implemented with the protocol version specified in the CAP. It will graduate to
- **Final** when it has been formally accepted by a majority of validators (nodes) on the network.
-- **Final** — A CAP that has been accepted by a majority of validators (nodes) on the network. A
- final CAP should only be updated to correct errata.
+
+- **Draft** — A CAP that is currently open for consideration and actively being
+ discussed.
+- **Awaiting Decision** — A mature and ready CAP that is ready for final
+ deliberation by the CAP Core Team. After a maximum of three meetings, a vote
+ will take place that will set the CAP's intended FCP disposition (**FCP:
+ Acceptance/Rejection**) or go back into a **Draft** state.
+- **FCP: [Acceptance/Rejection]** — A CAP that has entered a Final Comment
+ Period (FCP) with an intended disposition. After one week has passed, during
+ which any new concerns should be addressed, the CAP will head towards its
+ intended disposition [**Acceptance/Rejection**] or go back into a Draft
+ state.
+- **Accepted** — A CAP that has been accepted on the merits of its idea
+ pre-implementation, and is ready for implementation. It is still possible
+ that the CAP may be rejected post-implementation due to the issues that may
+ arise during an initial implementation.
+- **Implemented** - A CAP that has been implemented with the protocol version
+ specified in the CAP. It will graduate to **Final** when it has been formally
+ accepted by a majority of validators (nodes) on the network.
+- **Final** — A CAP that has been accepted by a majority of validators (nodes)
+ on the network. A final CAP should only be updated to correct errata.
### Additional Statuses
-- **Rejected** - A CAP that has been formally rejected by the CAP Core Team, and will not be
- implemented.
-- **Superseded: [New Final CAP]** - A CAP that which was previously final but has been superseded
- by a new, final CAP. Both CAPs should reference each other.
+
+- **Rejected** - A CAP that has been formally rejected by the CAP Core Team,
+ and will not be implemented.
+- **Superseded: [New Final CAP]** - A CAP that which was previously final but
+ has been superseded by a new, final CAP. Both CAPs should reference each
+ other.
## List of Proposals
-| Number | Protocol Version | Title | Author | Status |
-| ---- | --- | --- | --- | --- |
-| [CAP-0001](cap-0001.md) | 10 | Bump Sequence | Nicolas Barry | Final |
-| [CAP-0002](cap-0002.md) | 10 | Transaction level signature verification | Nicolas Barry | Final |
-| [CAP-0003](cap-0003.md) | 10 | Asset-backed offers | Jonathan Jove | Final |
-| [CAP-0004](cap-0004.md) | 10 | Improved Rounding for Cross Offer | Jonathan Jove | Final |
-| [CAP-0005](cap-0005.md) | 11 | Throttling and transaction pricing improvements | Nicolas Barry | Final |
-| [CAP-0006](cap-0006.md) | 11 | Add ManageBuyOffer Operation | Jonathan Jove | Final |
-| [CAP-0015](cap-0015.md) | 13 | Fee Bump Transactions | OrbitLens | Final |
-| [CAP-0017](cap-0017.md) | - | Update LastModifiedLedgerSeq If and Only If LedgerEntry is Modified | Jonathan Jove | Accepted |
-| [CAP-0018](cap-0018.md) | 13 | Fine-Grained Control of Authorization | Jonathan Jove | Final |
-| [CAP-0019](cap-0019.md) | 13 | Future-upgradable TransactionEnvelope type | David Mazières | Final |
-| [CAP-0020](cap-0020.md) | 11 | Bucket Initial Entries | Graydon Hoare | Final |
-| [CAP-0021](cap-0021.md) | 19 | Generalized transaction preconditions | David Mazières | Final |
-| [CAP-0023](cap-0023.md) | 14 | Two-Part Payments with ClaimableBalanceEntry | Jonathan Jove | Final |
-| [CAP-0024](cap-0024.md) | 12 | Make PathPayment Symmetrical | Jed McCaleb | Final |
-| [CAP-0025](cap-0025.md) | 12 | Remove Bucket Shadowing | Marta Lokhava | Final |
-| [CAP-0026](cap-0026.md) | 12 | Disable Inflation Mechanism | OrbitLens | Final |
-| [CAP-0027](cap-0027.md) | 13 | First-class multiplexed accounts | David Mazières and Tomer Weller | Final |
-| [CAP-0028](cap-0028.md) | 13 | Clear pre-auth transaction signer on failed transactions | Siddharth Suresh | Final |
-| [CAP-0029](cap-0029.md) | 16 | AllowTrust when not AUTH_REQUIRED | Tomer Weller | Final |
-| [CAP-0030](cap-0030.md) | 13 | Remove NO_ISSUER Operation Results | Siddharth Suresh | Final |
-| [CAP-0033](cap-0033.md) | 14/15 | Sponsored Reserve with EphemeralSponsorshipEntry | Jonathan Jove | Final |
-| [CAP-0034](cap-0034.md) | 14 | Preserve Transaction-Set/Close-Time Affinity During Nomination | Terence Rokop | Final |
-| [CAP-0035](cap-0035.md) | 17 | Asset Clawback | Dan Doney | Final |
-| [CAP-0038](cap-0038.md) | 18 | Automated Market Makers | Jonathan Jove | Final |
-| [CAP-0040](cap-0040.md) | 19 | Ed25519 Signed Payload Signer for Transaction Signature Disclosure | Leigh McCulloch | Final |
-| [CAP-0042](cap-0042.md) | - | Multi-Part Transaction Sets | Nicolas Barry | Final |
-| [CAP-0046](cap-0046.md) | 20 |Soroban smart contract system overview | Graydon Hoare | Final |
-| [CAP-0046-01 (formerly 0046)](cap-0046-01.md) | 20 | WebAssembly Smart Contract Runtime Environment | Graydon Hoare | Final |
-| [CAP-0046-02 (formerly 0047)](cap-0046-02.md) | 20 | Smart Contract Lifecycle | Siddharth Suresh | Final |
-| [CAP-0046-03 (formerly 0051)](cap-0046-03.md) | 20 | Smart Contract Host Functons | Jay Geng | Final |
-| [CAP-0046-05 (formerly 0053)](cap-0046-05.md) | 20 | Smart Contract Data | Graydon Hoare | Final |
-| [CAP-0046-06 (formerly 0054)](cap-0046-06.md) | 20 | Smart Contract Standardized Asset | Jonathan Jove | Final |
-| [CAP-0046-07 (formerly 0055)](cap-0046-07.md) | 20 | Fee model in smart contracts | Nicolas Barry | Final |
-| [CAP-0046-08 (formerly 0056)](cap-0046-08.md) | 20 |Smart Contract Logging | Siddharth Suresh | Final |
-| [CAP-0046-09](cap-0046-09.md) | 20 | Network Configuration Ledger Entries | Dmytro Kozhevin | Final |
-| [CAP-0046-10](cap-0046-10.md) | 20 | Smart Contract Budget Metering | Jay Geng | Final |
-| [CAP-0046-11](cap-0046-11.md) | 20 | Soroban Authorization Framework | Dmytro Kozhevin | Final |
-| [CAP-0046-12](cap-0046-12.md) | 20 | Soroban State Archival Interface | Garand Tyson | Final |
-| [CAP-0051](cap-0051.md) | 21 | Smart Contract Host Functionality: Secp256r1 Verification | Leigh McCulloch | Final |
-| [CAP-0053](cap-0053.md) | 21 | Separate host functions to extend the TTL for contract instance and contract code | Tommaso De Ponti | Final |
-| [CAP-0054](cap-0054.md) | 21 | Soroban refined VM instantiation cost model | Graydon Hoare | Final |
-| [CAP-0055](cap-0055.md) | 21 | Soroban streamlined linking | Graydon Hoare | Final |
-| [CAP-0056](cap-0056.md) | 21 | Soroban intra-transaction module caching | Graydon Hoare | Final |
-| [CAP-0058](cap-0058.md) | 22 | Constructors for Soroban Contracts | Dmytro Kozhevin | Final |
-| [CAP-0059](cap-0059.md) | 22 | Host functions for BLS12-381 | Jay Geng | Final |
-| [CAP-0062](cap-0062.md) | 23 | Soroban Live State Prioritization | Garand Tyson | Final |
-| [CAP-0063](cap-0063.md) | 23 | Parallelism-friendly Transaction Scheduling | Dmytro Kozhevin | Final |
-| [CAP-0065](cap-0065.md) | 23 | Reusable Module Cache | Graydon Hoare | Final |
-| [CAP-0066](cap-0066.md) | 23 | Soroban In-memory Read Resource | Garand Tyson | Final |
-| [CAP-0067](cap-0067.md) | 23 | Unified Asset Events | Siddharth Suresh | Final |
-| [CAP-0068](cap-0068.md) | 23 | Host function for getting executable for `Address` | Dmytro Kozhevin | Final |
-| [CAP-0069](cap-0069.md) | 23 | String/Bytes conversion host functions | Dmytro Kozhevin | Final |
-| [CAP-0070](cap-0070.md) | 23 | Configurable SCP Timing Parameters | Garand Tyson | Final |
-| [CAP-0074](cap-0074.md) | Host functions for BN254 | Siddharth Suresh | Awaiting Decision |
-| [CAP-0075](cap-0075.md) | Cryptographic Primitives for Poseidon/Poseidon2 Hash Functions | Jay Geng | Awaiting Decision |
-| [CAP-0076](cap-0076.md) | P23 State Archival bug remediation | Dmytro Kozhevin | Final |
+| Number | Protocol Version | Title | Author | Status |
+| --------------------------------------------- | -------------------------------------------------------------- | --------------------------------------------------------------------------------- | ------------------------------- | -------- |
+| [CAP-0001](cap-0001.md) | 10 | Bump Sequence | Nicolas Barry | Final |
+| [CAP-0002](cap-0002.md) | 10 | Transaction level signature verification | Nicolas Barry | Final |
+| [CAP-0003](cap-0003.md) | 10 | Asset-backed offers | Jonathan Jove | Final |
+| [CAP-0004](cap-0004.md) | 10 | Improved Rounding for Cross Offer | Jonathan Jove | Final |
+| [CAP-0005](cap-0005.md) | 11 | Throttling and transaction pricing improvements | Nicolas Barry | Final |
+| [CAP-0006](cap-0006.md) | 11 | Add ManageBuyOffer Operation | Jonathan Jove | Final |
+| [CAP-0015](cap-0015.md) | 13 | Fee Bump Transactions | OrbitLens | Final |
+| [CAP-0017](cap-0017.md) | - | Update LastModifiedLedgerSeq If and Only If LedgerEntry is Modified | Jonathan Jove | Accepted |
+| [CAP-0018](cap-0018.md) | 13 | Fine-Grained Control of Authorization | Jonathan Jove | Final |
+| [CAP-0019](cap-0019.md) | 13 | Future-upgradable TransactionEnvelope type | David Mazières | Final |
+| [CAP-0020](cap-0020.md) | 11 | Bucket Initial Entries | Graydon Hoare | Final |
+| [CAP-0021](cap-0021.md) | 19 | Generalized transaction preconditions | David Mazières | Final |
+| [CAP-0023](cap-0023.md) | 14 | Two-Part Payments with ClaimableBalanceEntry | Jonathan Jove | Final |
+| [CAP-0024](cap-0024.md) | 12 | Make PathPayment Symmetrical | Jed McCaleb | Final |
+| [CAP-0025](cap-0025.md) | 12 | Remove Bucket Shadowing | Marta Lokhava | Final |
+| [CAP-0026](cap-0026.md) | 12 | Disable Inflation Mechanism | OrbitLens | Final |
+| [CAP-0027](cap-0027.md) | 13 | First-class multiplexed accounts | David Mazières and Tomer Weller | Final |
+| [CAP-0028](cap-0028.md) | 13 | Clear pre-auth transaction signer on failed transactions | Siddharth Suresh | Final |
+| [CAP-0029](cap-0029.md) | 16 | AllowTrust when not AUTH_REQUIRED | Tomer Weller | Final |
+| [CAP-0030](cap-0030.md) | 13 | Remove NO_ISSUER Operation Results | Siddharth Suresh | Final |
+| [CAP-0033](cap-0033.md) | 14/15 | Sponsored Reserve with EphemeralSponsorshipEntry | Jonathan Jove | Final |
+| [CAP-0034](cap-0034.md) | 14 | Preserve Transaction-Set/Close-Time Affinity During Nomination | Terence Rokop | Final |
+| [CAP-0035](cap-0035.md) | 17 | Asset Clawback | Dan Doney | Final |
+| [CAP-0038](cap-0038.md) | 18 | Automated Market Makers | Jonathan Jove | Final |
+| [CAP-0040](cap-0040.md) | 19 | Ed25519 Signed Payload Signer for Transaction Signature Disclosure | Leigh McCulloch | Final |
+| [CAP-0042](cap-0042.md) | - | Multi-Part Transaction Sets | Nicolas Barry | Final |
+| [CAP-0046](cap-0046.md) | 20 | Soroban smart contract system overview | Graydon Hoare | Final |
+| [CAP-0046-01 (formerly 0046)](cap-0046-01.md) | 20 | WebAssembly Smart Contract Runtime Environment | Graydon Hoare | Final |
+| [CAP-0046-02 (formerly 0047)](cap-0046-02.md) | 20 | Smart Contract Lifecycle | Siddharth Suresh | Final |
+| [CAP-0046-03 (formerly 0051)](cap-0046-03.md) | 20 | Smart Contract Host Functons | Jay Geng | Final |
+| [CAP-0046-05 (formerly 0053)](cap-0046-05.md) | 20 | Smart Contract Data | Graydon Hoare | Final |
+| [CAP-0046-06 (formerly 0054)](cap-0046-06.md) | 20 | Smart Contract Standardized Asset | Jonathan Jove | Final |
+| [CAP-0046-07 (formerly 0055)](cap-0046-07.md) | 20 | Fee model in smart contracts | Nicolas Barry | Final |
+| [CAP-0046-08 (formerly 0056)](cap-0046-08.md) | 20 | Smart Contract Logging | Siddharth Suresh | Final |
+| [CAP-0046-09](cap-0046-09.md) | 20 | Network Configuration Ledger Entries | Dmytro Kozhevin | Final |
+| [CAP-0046-10](cap-0046-10.md) | 20 | Smart Contract Budget Metering | Jay Geng | Final |
+| [CAP-0046-11](cap-0046-11.md) | 20 | Soroban Authorization Framework | Dmytro Kozhevin | Final |
+| [CAP-0046-12](cap-0046-12.md) | 20 | Soroban State Archival Interface | Garand Tyson | Final |
+| [CAP-0051](cap-0051.md) | 21 | Smart Contract Host Functionality: Secp256r1 Verification | Leigh McCulloch | Final |
+| [CAP-0053](cap-0053.md) | 21 | Separate host functions to extend the TTL for contract instance and contract code | Tommaso De Ponti | Final |
+| [CAP-0054](cap-0054.md) | 21 | Soroban refined VM instantiation cost model | Graydon Hoare | Final |
+| [CAP-0055](cap-0055.md) | 21 | Soroban streamlined linking | Graydon Hoare | Final |
+| [CAP-0056](cap-0056.md) | 21 | Soroban intra-transaction module caching | Graydon Hoare | Final |
+| [CAP-0058](cap-0058.md) | 22 | Constructors for Soroban Contracts | Dmytro Kozhevin | Final |
+| [CAP-0059](cap-0059.md) | 22 | Host functions for BLS12-381 | Jay Geng | Final |
+| [CAP-0062](cap-0062.md) | 23 | Soroban Live State Prioritization | Garand Tyson | Final |
+| [CAP-0063](cap-0063.md) | 23 | Parallelism-friendly Transaction Scheduling | Dmytro Kozhevin | Final |
+| [CAP-0065](cap-0065.md) | 23 | Reusable Module Cache | Graydon Hoare | Final |
+| [CAP-0066](cap-0066.md) | 23 | Soroban In-memory Read Resource | Garand Tyson | Final |
+| [CAP-0067](cap-0067.md) | 23 | Unified Asset Events | Siddharth Suresh | Final |
+| [CAP-0068](cap-0068.md) | 23 | Host function for getting executable for `Address` | Dmytro Kozhevin | Final |
+| [CAP-0069](cap-0069.md) | 23 | String/Bytes conversion host functions | Dmytro Kozhevin | Final |
+| [CAP-0070](cap-0070.md) | 23 | Configurable SCP Timing Parameters | Garand Tyson | Final |
+| [CAP-0074](cap-0074.md) | Host functions for BN254 | Siddharth Suresh | Awaiting Decision |
+| [CAP-0075](cap-0075.md) | Cryptographic Primitives for Poseidon/Poseidon2 Hash Functions | Jay Geng | Awaiting Decision |
+| [CAP-0076](cap-0076.md) | P23 State Archival bug remediation | Dmytro Kozhevin | Final |
### Draft Proposals
-| Number | Title | Author | Status |
-| --- | --- | --- | --- |
-| [CAP-0007](cap-0007.md) | Deterministic Account Creation | Jeremy Rubin | Draft |
-| [CAP-0008](cap-0008.md) | Self Identified Pre-Auth Transaction | Jeremy Rubin | Draft |
-| [CAP-0009](cap-0009.md) | Linear/Exterior Immutable Accounts | Jeremy Rubin | Draft |
-| [CAP-0010](cap-0010.md) | Fee Bump Account | Jeremy Rubin | Draft |
-| [CAP-0011](cap-0011.md) | Relative Account Freeze | Jeremy Rubin | Draft |
-| [CAP-0012](cap-0012.md) | Deterministic accounts and creatorTxID | David Mazières | Draft |
-| [CAP-0014](cap-0014.md) | Adversarial Transaction Set Ordering | Jeremy Rubin | Draft |
-| [CAP-0022](cap-0022.md) | Invalid transactions must have no effects | David Mazières | Draft |
-| [CAP-0032](cap-0032.md) | Trustline Preauthorization | Jonathan Jove | Draft |
-| [CAP-0037](cap-0037.md) | Automated Market Makers | OrbitLens | Draft |
-| [CAP-0041](cap-0041.md) | Concurrent Transactions | Leigh McCulloch, David Mazières | Draft |
-| [CAP-0043](cap-0043.md) | ECDSA Signers with P-256 and secp256k1 Curves | Leigh McCulloch | Draft |
-| [CAP-0044](cap-0044.md) | SPEEDEX - Configuration | Jonathan Jove | Draft |
-| [CAP-0045](cap-0045.md) | SPEEDEX - Pricing | Jonathan Jove | Draft |
-| [CAP-0057](cap-0057.md) | State Archival Persistent Entry Eviction | Garand Tyson | Draft |
-| [CAP-0060](cap-0060.md) | Update to Wasmi register machine| Graydon Hoare | Accepted |
-| [CAP-0071](cap-0071.md) | Authentication delegation for custom accounts | Dmytro Kozhevin | Draft |
-| [CAP-0072](cap-0072.md) | Contract signers for Stellar accounts | Dmytro Kozhevin | Draft |
-| [CAP-0073](cap-0073.md) | Allow SAC to create G-account balances | Dmytro Kozhevin | Draft |
-| [CAP-0077](cap-0077.md) | Ability to freeze ledger keys via network configuration | Dmytro Kozhevin | Draft |
-| [CAP-0078](cap-0078.md) | Host functions for performing limited TTL extensions | Dmytro Kozhevin | Draft |
-| [CAP-0079](cap-0079.md) | Host functions for muxed address strkey conversions | Dmytro Kozhevin | Draft |
-| [CAP-0080](cap-0080.md) | Host functions for efficient ZK BN254 use cases | Siddharth Suresh | Draft |
+
+| Number | Title | Author | Status |
+| ----------------------- | ------------------------------------------------------- | ------------------------------- | -------- |
+| [CAP-0007](cap-0007.md) | Deterministic Account Creation | Jeremy Rubin | Draft |
+| [CAP-0008](cap-0008.md) | Self Identified Pre-Auth Transaction | Jeremy Rubin | Draft |
+| [CAP-0009](cap-0009.md) | Linear/Exterior Immutable Accounts | Jeremy Rubin | Draft |
+| [CAP-0010](cap-0010.md) | Fee Bump Account | Jeremy Rubin | Draft |
+| [CAP-0011](cap-0011.md) | Relative Account Freeze | Jeremy Rubin | Draft |
+| [CAP-0012](cap-0012.md) | Deterministic accounts and creatorTxID | David Mazières | Draft |
+| [CAP-0014](cap-0014.md) | Adversarial Transaction Set Ordering | Jeremy Rubin | Draft |
+| [CAP-0022](cap-0022.md) | Invalid transactions must have no effects | David Mazières | Draft |
+| [CAP-0032](cap-0032.md) | Trustline Preauthorization | Jonathan Jove | Draft |
+| [CAP-0037](cap-0037.md) | Automated Market Makers | OrbitLens | Draft |
+| [CAP-0041](cap-0041.md) | Concurrent Transactions | Leigh McCulloch, David Mazières | Draft |
+| [CAP-0043](cap-0043.md) | ECDSA Signers with P-256 and secp256k1 Curves | Leigh McCulloch | Draft |
+| [CAP-0044](cap-0044.md) | SPEEDEX - Configuration | Jonathan Jove | Draft |
+| [CAP-0045](cap-0045.md) | SPEEDEX - Pricing | Jonathan Jove | Draft |
+| [CAP-0057](cap-0057.md) | State Archival Persistent Entry Eviction | Garand Tyson | Draft |
+| [CAP-0060](cap-0060.md) | Update to Wasmi register machine | Graydon Hoare | Accepted |
+| [CAP-0071](cap-0071.md) | Authentication delegation for custom accounts | Dmytro Kozhevin | Draft |
+| [CAP-0072](cap-0072.md) | Contract signers for Stellar accounts | Dmytro Kozhevin | Draft |
+| [CAP-0073](cap-0073.md) | Allow SAC to create G-account balances | Dmytro Kozhevin | Draft |
+| [CAP-0077](cap-0077.md) | Ability to freeze ledger keys via network configuration | Dmytro Kozhevin | Draft |
+| [CAP-0078](cap-0078.md) | Host functions for performing limited TTL extensions | Dmytro Kozhevin | Draft |
+| [CAP-0079](cap-0079.md) | Host functions for muxed address strkey conversions | Dmytro Kozhevin | Draft |
+| [CAP-0080](cap-0080.md) | Host functions for efficient ZK BN254 use cases | Siddharth Suresh | Draft |
### Rejected Proposals
-| Number | Title | Author | Status |
-| --- | --- | --- | --- |
-| [CAP-0013](cap-0013.md) | Change Trustlines to Balances | Dan Robinson | Rejected |
-| [CAP-0016](cap-0016.md) | Cosigned assets: NopOp and COAUTHORIZED_FLAG | David Mazières | Rejected |
-| [CAP-0031](cap-0031.md) | Sponsored Reserve | Jonathan Jove | Rejected |
-| [CAP-0036](cap-0036.md) | Claimable Balance Clawback | Leigh McCulloch | Rejected |
-| [CAP-0039](cap-0039.md) | Not Auth Revocable Trustlines | Leigh McCulloch | Rejected |
-| [CAP-0048](cap-0048.md) | Smart Contract Asset Interoperability | Jonathan Jove | Rejected |
-| [CAP-0049](cap-0049.md) | Smart Contract Asset Interoperability with Wrapper | Jonathan Jove | Rejected |
-| [CAP-0050](cap-0050.md) | Smart Contract Interactions | Jonathan Jove | Rejected |
-| [CAP-0052](cap-0052.md) | Smart Contract Host Functionality: Base64 Encoding/Decoding | Leigh McCulloch | Rejected |
-| [CAP-0064](cap-0064.md) | Memo Authorization for Soroban | Dmytro Kozhevin | Rejected |
-| [CAP-0061](cap-0061.md) | Smart Contract Standardized Asset (Stellar Asset Contract) Extension: Memo | Tomer Weller | Rejected |
-# Contribution Process
+| Number | Title | Author | Status |
+| ----------------------- | -------------------------------------------------------------------------- | --------------- | -------- |
+| [CAP-0013](cap-0013.md) | Change Trustlines to Balances | Dan Robinson | Rejected |
+| [CAP-0016](cap-0016.md) | Cosigned assets: NopOp and COAUTHORIZED_FLAG | David Mazières | Rejected |
+| [CAP-0031](cap-0031.md) | Sponsored Reserve | Jonathan Jove | Rejected |
+| [CAP-0036](cap-0036.md) | Claimable Balance Clawback | Leigh McCulloch | Rejected |
+| [CAP-0039](cap-0039.md) | Not Auth Revocable Trustlines | Leigh McCulloch | Rejected |
+| [CAP-0048](cap-0048.md) | Smart Contract Asset Interoperability | Jonathan Jove | Rejected |
+| [CAP-0049](cap-0049.md) | Smart Contract Asset Interoperability with Wrapper | Jonathan Jove | Rejected |
+| [CAP-0050](cap-0050.md) | Smart Contract Interactions | Jonathan Jove | Rejected |
+| [CAP-0052](cap-0052.md) | Smart Contract Host Functionality: Base64 Encoding/Decoding | Leigh McCulloch | Rejected |
+| [CAP-0064](cap-0064.md) | Memo Authorization for Soroban | Dmytro Kozhevin | Rejected |
+| [CAP-0061](cap-0061.md) | Smart Contract Standardized Asset (Stellar Asset Contract) Extension: Memo | Tomer Weller | Rejected |
-The Stellar Protocol, like most software in the world, continues to evolve over time to meet the
-needs of our network's participants and to drive technology forward into new territory. Given the
-importance of the reliability and safety of the network, we ask that all of those who have ideas
-towards pushing Stellar's protocol development forward adhere to the following:
+# Contribution Process
-- Consider your idea and how it serves the fundamental goals of the Stellar Network and aligns with
- values of the Stellar Protocol (which are listed below). If you cannot show how your proposal
- aligns with those goals and values, it's unlikely to ever be implemented.
-- Gather feedback from discussion on the dev mailing list and other forums, and utilize it to begin
- a draft proposal, otherwise known as a CAP (Core Advancement Proposal).
+The Stellar protocol, like most software in the world, continues to evolve over
+time to meet the needs of our network's participants and to drive technology
+forward into new territory. Given the importance of the reliability and safety
+of the network, we ask that all of those who have ideas towards pushing
+Stellar's protocol development forward adhere to the following:
+
+- Consider your idea and how it serves the fundamental goals of the Stellar
+ network and aligns with values of the Stellar protocol (which are listed
+ below). If you cannot show how your proposal aligns with those goals and
+ values, it's unlikely to ever be implemented.
+- Gather feedback from discussion on the dev mailing list and other forums, and
+ utilize it to begin a draft proposal, otherwise known as a CAP (Core
+ Advancement Proposal).
- Follow the proposal process listed below.
## Stellar Network Goals
-- **The Stellar Network should be secure and reliable, and should bias towards safety, simplicity,
- reliability, and performance over new functionality.**
-- **The Stellar Network should run at scale and at low cost to all participants of the network.**
- * In support of this, the Stellar Network should support off-chain transactions, e.g. Starlight.
- * An explicit non-goal is limiting the hardware requirements of stellar-core to a personal
- computer.
-- **The Stellar Network should facilitate simplicity and interoperability with other protocols and
- networks.**
- * In support of this, the Stellar Network should facilitate side-chain transactions to enable
- sub-networks.
-- **The Stellar Network should enable cross-border payments, i.e. payments via exchange of assets,
- throughout the globe, enabling users to make payments between assets in a manner that is fast,
- cheap, and highly usable.**
- - In support of this, the Stellar Network should support an orderbook that values simplicity
- over functionality, and one that primarily serves to enable cross-border payments.
- - In support of this, the Stellar Network should facilitate liquidity as a means to enabling
- - cross-border payments.
- - In support of this, the Stellar Network should enable asset issuance, but as a means of
- - enabling cross-border payments.
-- **The Stellar Network should support decentralization wherever possible, but not at the expense
- of the majority of its values.**
- - There should be no privileged actors — we should support egalitarianism and everyone
- participating on the same playing field.
-- **The Stellar Network should enable users to easily exchange their non-Stellar based assets to
- Stellar-based assets, and vice versa.**
-- **The Stellar Network should make it easy for developers of Stellar projects to create highly
- usable products.**
+
+- **The Stellar network should be secure and reliable, and should bias towards
+ safety, simplicity, reliability, and performance over new functionality.**
+- **The Stellar network should run at scale and at low cost to all participants
+ of the network.**
+ - In support of this, the Stellar network should support off-chain
+ transactions, e.g. Starlight.
+ - An explicit non-goal is limiting the hardware requirements of stellar-core
+ to a personal computer.
+- **The Stellar network should facilitate simplicity and interoperability with
+ other protocols and networks.**
+ - In support of this, the Stellar network should facilitate side-chain
+ transactions to enable sub-networks.
+- **The Stellar network should enable cross-border payments, i.e. payments via
+ exchange of assets, throughout the globe, enabling users to make payments
+ between assets in a manner that is fast, cheap, and highly usable.**
+ - In support of this, the Stellar network should support an orderbook that
+ values simplicity over functionality, and one that primarily serves to
+ enable cross-border payments.
+ - In support of this, the Stellar network should facilitate liquidity as a
+ means to enabling
+ - cross-border payments.
+ - In support of this, the Stellar network should enable asset issuance, but
+ as a means of
+ - enabling cross-border payments.
+- **The Stellar network should support decentralization wherever possible, but
+ not at the expense of the majority of its values.**
+ - There should be no privileged actors — we should support egalitarianism and
+ everyone participating on the same playing field.
+- **The Stellar network should enable users to easily exchange their
+ non-Stellar based assets to Stellar-based assets, and vice versa.**
+- **The Stellar network should make it easy for developers of Stellar projects
+ to create highly usable products.**
## Stellar Protocol Development Values
-- **The Stellar Protocol should serve the goals of the Stellar Network.**
-- **The Stellar Protocol should bias towards simplicity.**
- - When possible, solutions should be considered outside of core protocol changes such as via
- [SEPs (Stellar Ecosystem Proposals)](../ecosystem/README.md) to minimize complexity in the
- Stellar protocol.
- - When possible, proposals should minimize the impact of changes to the smallest surface area and
- shallowest depth (i.e. sticking to the higher levels of the software) of the protocol
- architecture possible to make changes predictable and easier to test and reason about. Changes
- should be surgical, and minimal invasive. As a result, changes that affect lower levels of the
- implementation have a higher bar for acceptance.
- - In order from the lowest level to the highest level systems, the systems are:
+
+- **The Stellar protocol should serve the goals of the Stellar network.**
+- **The Stellar protocol should bias towards simplicity.**
+ - When possible, solutions should be considered outside of core protocol
+ changes such as via
+ [SEPs (Stellar Ecosystem Proposals)](../ecosystem/README.md) to minimize
+ complexity in the Stellar protocol.
+ - When possible, proposals should minimize the impact of changes to the
+ smallest surface area and shallowest depth (i.e. sticking to the higher
+ levels of the software) of the protocol architecture possible to make
+ changes predictable and easier to test and reason about. Changes should be
+ surgical, and minimal invasive. As a result, changes that affect lower
+ levels of the implementation have a higher bar for acceptance.
+ - In order from the lowest level to the highest level systems, the systems
+ are:
- Historical / Ledger XDR
- Observable Transaction Semantics
- Consensus XDR
@@ -191,123 +214,149 @@ towards pushing Stellar's protocol development forward adhere to the following:
- Unobservable tx semantics (eg. performance or bug fixes)
- Horizon semantics
- Public APIs, Client Libraries/SDKs.
-- **The Stellar Protocol should be clear, concise, and opinionated.**
- - New operations and functionality should be opinionated, and straightforward to use.
+- **The Stellar protocol should be clear, concise, and opinionated.**
+ - New operations and functionality should be opinionated, and straightforward
+ to use.
- There should ideally be only one obvious way to accomplish a given task.
-- **The Stellar Protocol should bias towards broad use cases, and bias against niche
- functionality.**
-- **The Stellar Protocol should bias towards user safety.**
+- **The Stellar protocol should bias towards broad use cases, and bias against
+ niche functionality.**
+- **The Stellar protocol should bias towards user safety.**
## CAP Process
-These are the steps from [idea to deployment](https://www.youtube.com/watch?v=Otbml6WIQPo) on how
-to create a Core Advancement Proposal (CAP).
+
+These are the steps from
+[idea to deployment](https://www.youtube.com/watch?v=Otbml6WIQPo) on how to
+create a Core Advancement Proposal (CAP).
### Pre-CAP (Initial Discussion)
-Introduce your idea on the [stellar-dev mailing list](https://groups.google.com/forum/?utm_medium=email&utm_source=footer#!forum/stellar-dev) or [GitHub Discussions].
+
+Introduce your idea on the
+[stellar-dev mailing list](https://groups.google.com/forum/?utm_medium=email&utm_source=footer#!forum/stellar-dev)
+or [GitHub Discussions].
[GitHub Discussions]: https://github.com/stellar/stellar-protocol/discussions
-- Make sure to gather feedback and alternative ideas — it's useful before putting together a
- formal draft!
-- Consider contacting experts in a particular area for feedback while you're hashing out the
- details.
+- Make sure to gather feedback and alternative ideas — it's useful before
+ putting together a formal draft!
+- Consider contacting experts in a particular area for feedback while you're
+ hashing out the details.
### Creating a CAP Draft
-Draft a formal proposal using the [CAP Template](../cap-template.md), and submit a PR to this
-repository. You should make sure to adhere to the following:
+
+Draft a formal proposal using the [CAP Template](../cap-template.md), and
+submit a PR to this repository. You should make sure to adhere to the
+following:
- Make sure to place the draft in the `core/` folder.
- Your CAP should be named `cap-TBD.md`
-- If your CAP requires images or other supporting files, they should be included in a sub-directory
- of the `contents` folder for that CAP, such as `contents/cap-TBD/`. Links
- should be relative, for example a link to an image from your CAP would be
- `../contents/cap-TBD/image.png`.
+- If your CAP requires images or other supporting files, they should be
+ included in a sub-directory of the `contents` folder for that CAP, such as
+ `contents/cap-TBD/`. Links should be relative, for example a link to an image
+ from your CAP would be `../contents/cap-TBD/image.png`.
Finally, submit a PR of your draft via your fork of this repository.
#### Additional Tips
-- Use `TBD` for the protocol version. Don't assign a protocol version to the CAP — this will be
- established once the CAP has reached the state of *Final* and has been formally implemented.
+
+- Use `TBD` for the protocol version. Don't assign a protocol version to the
+ CAP — this will be established once the CAP has reached the state of _Final_
+ and has been formally implemented.
### Draft: Merging & Further Iteration
+
From there, the following process will happen.
#### CAP gets merged
+
If you properly followed the steps above, your PR will get merged.
-The CAP and associated files will get renamed based on the latest
-CAP draft number before merging.
+The CAP and associated files will get renamed based on the latest CAP draft
+number before merging.
#### Assembling a working group
-As your idea gets traction, you'll need to assemble a working group as
-to increase the chances of success that this CAP proceeds through the stages.
+As your idea gets traction, you'll need to assemble a working group as to
+increase the chances of success that this CAP proceeds through the stages.
-For more information on this, review the [working group section](../cap-template.md#working-group) of the CAP template.
+For more information on this, review the
+[working group section](../cap-template.md#working-group) of the CAP template.
#### Iterating on the CAP
-You should continue the discussion of the draft CAP on the mailing list
-with an attempt at reaching consensus.
+You should continue the discussion of the draft CAP on the mailing list with an
+attempt at reaching consensus.
When opening PRs to modify the draft:
-- changes have to either be submitted by one of the authors (Recommender or Owner) or
-signed off by the authors
-- avoid discussions in the PR itself as it makes it more difficult for future contributors to understand the rational for changes.
+
+- changes have to either be submitted by one of the authors (Recommender or
+ Owner) or signed off by the authors
+- avoid discussions in the PR itself as it makes it more difficult for future
+ contributors to understand the rational for changes.
- best is to always discuss in the mailing list.
- - alternatively, a recap of the discussion that happened in the PR could be posted in the mailing list (but it's easy to forget to do this).
+ - alternatively, a recap of the discussion that happened in the PR could be
+ posted in the mailing list (but it's easy to forget to do this).
### Draft -> Awaiting Decision
-When your CAP receives sufficient feedback from the community,
-you'll need to present it to a subset of the CAP Core Team for review.
+When your CAP receives sufficient feedback from the community, you'll need to
+present it to a subset of the CAP Core Team for review.
-For that, when you're ready, you should submit a PR changing the status
-in the draft to `Awaiting Decision`.
+For that, when you're ready, you should submit a PR changing the status in the
+draft to `Awaiting Decision`.
-The CAP will be scheduled to be discussed at a protocol meeting.
-As the owner of the CAP, you will be invited to share your CAP
-and participate in discussion during the meeting.
+The CAP will be scheduled to be discussed at a protocol meeting. As the owner
+of the CAP, you will be invited to share your CAP and participate in discussion
+during the meeting.
You may invite any other members of your working group.
The protocol meetings will be used to decide on next step:
- - If the CAP has received support and general consensus, it is moved to `Awaiting Decision` ;
- - If the CAP requires some adjustments or needs to receive more feedback from the community, the meeting is adjourned ;
- - If for any reason the CAP gets abandoned, it gets a status of `Rejected`.
+
+- If the CAP has received support and general consensus, it is moved to
+ `Awaiting Decision` ;
+- If the CAP requires some adjustments or needs to receive more feedback from
+ the community, the meeting is adjourned ;
+- If for any reason the CAP gets abandoned, it gets a status of `Rejected`.
### Awaiting Decision -> Final Comment Period (FCP)
+
- A vote will take place among the CAP Core Team.
- - A unanimous approval from the CAP Core Team will put the CAP in a `FCP: Accepted` status.
- - Otherwise, the CAP will be given feedback and head towards a `FCP: Rejected` status (if the
- majority of the CAP raises concerns) or a `Draft` status (if only a minority of the CAP
- raises concerns).
+ - A unanimous approval from the CAP Core Team will put the CAP in a
+ `FCP: Accepted` status.
+ - Otherwise, the CAP will be given feedback and head towards a
+ `FCP: Rejected` status (if the majority of the CAP raises concerns) or a
+ `Draft` status (if only a minority of the CAP raises concerns).
- It can take upwards of 3 meetings before a disposition is reached.
### FCP -> Accepted/Rejected
-- After a week of an Final Comment Period (FCP) where any major concerns that have not been
- previously addressed can be brought up, the CAP will head to its final disposition.
- - Concerns will be addressed on a case by case basis, and only major concerns that were not
- addressed earlier will move the CAP back to a `Draft` state.
+
+- After a week of an Final Comment Period (FCP) where any major concerns that
+ have not been previously addressed can be brought up, the CAP will head to
+ its final disposition.
+ - Concerns will be addressed on a case by case basis, and only major concerns
+ that were not addressed earlier will move the CAP back to a `Draft` state.
### CAP Implementation
-SDF will prioritize accepted CAPs among its priorities for a given year. However, if you want to
-ensure your CAP is implemented in a timely manner, it is likely best for you to attempt to
-implement it yourself.
+SDF will prioritize accepted CAPs among its priorities for a given year.
+However, if you want to ensure your CAP is implemented in a timely manner, it
+is likely best for you to attempt to implement it yourself.
-Once a CAP is implemented, a PR should be submitted to update its status to **Implementation
-Review**, along with the protocol version it was released in if applicable.
+Once a CAP is implemented, a PR should be submitted to update its status to
+**Implementation Review**, along with the protocol version it was released in
+if applicable.
-From here the proposal is brought up again before the protocol group for additional comment, where
-it is possible that the proposal is rejected based on the issues that arise from its
-implementation. If no issues arise, it will move to **Implemented** by a CAP team member.
+From here the proposal is brought up again before the protocol group for
+additional comment, where it is possible that the proposal is rejected based on
+the issues that arise from its implementation. If no issues arise, it will move
+to **Implemented** by a CAP team member.
### CAP Finalization
-Once an implemented CAP has been released in a specified version, the CAP should be updated with
-the protocol version that the implementation targets. From there, once a majority of validators on
-the network have accepted the implementation, it will move to **Final**.
+Once an implemented CAP has been released in a specified version, the CAP
+should be updated with the protocol version that the implementation targets.
+From there, once a majority of validators on the network have accepted the
+implementation, it will move to **Final**.
## CAP Team Members
diff --git a/core/cap-0001.md b/core/cap-0001.md
index d2cda7759..a026cce38 100644
--- a/core/cap-0001.md
+++ b/core/cap-0001.md
@@ -11,41 +11,44 @@ Protocol version: 10
```
## Simple Summary
-The Bump Sequence operation allows you to bump forward the sequence number of the source
-account of the operation.
+
+The Bump Sequence operation allows you to bump forward the sequence number of
+the source account of the operation.
If the specified bumpTo sequence number is greater than the source account's
sequence number, the account's sequence number is updated with that value,
otherwise it's not modified.
## Abstract
-The Bump sequence operation allows you to bump forward the sequence number of the source account
-of the operation.
+
+The Bump sequence operation allows you to bump forward the sequence number of
+the source account of the operation.
## Motivation
+
Addresses a need when dealing with a large number of pre-signed transactions
-where there is a need to invalidate an execution branch.
-The problem is solved by allowing the branches of pre-signed transactions
-to have non-overlapping sequence numbers and creating a new operation to
-change the sequence number of the account to an arbitrary number.
+where there is a need to invalidate an execution branch. The problem is solved
+by allowing the branches of pre-signed transactions to have non-overlapping
+sequence numbers and creating a new operation to change the sequence number of
+the account to an arbitrary number.
## Specification
-The Bump Sequence operation allows you to bump forward the sequence number of the source
-account of the operation.
+The Bump Sequence operation allows you to bump forward the sequence number of
+the source account of the operation.
If the specified `bumpTo` sequence number is greater than the source account's
- sequence number, the account's sequence number is updated with that value,
+sequence number, the account's sequence number is updated with that value,
otherwise it's not modified.
Threshold is "Low", in line with the weight required by a signer for the source
account to update the sequence number for all transactions.
-Note:
-This operation only allows bumping the sequence number up to
+Note: This operation only allows bumping the sequence number up to
`(current_ledger_number<<32) - 1`.
`BumpSequenceOp` specification:
+
```c++
struct BumpSequenceOp
{
@@ -72,6 +75,7 @@ default:
```
New error code for `AccountMerge`:
+
```c++
/******* AccountMerge Result ********/
@@ -89,6 +93,7 @@ enum AccountMergeResultCode
```
New error code at the operation level:
+
```c++
enum OperationResultCode
{
@@ -100,8 +105,9 @@ enum OperationResultCode
};
```
-Updated meta data format:
-Transactions now update the sequence number right before applying their operations.
+Updated meta data format: Transactions now update the sequence number right
+before applying their operations.
+
```c++
struct TransactionMetaV1
{
@@ -121,60 +127,64 @@ case 1:
```
Finally this change also switches `SequenceNumber` to be a signed integer:
+
```c++
typedef int64 SequenceNumber;
```
## Rationale
-`BumpSequenceOp` is a new operation, in order to properly reject the use of this
-new operation from nodes that understand the xdr but with the network still
-running an older protocol version, a new return value is added to operation
-result to communicate the failure. Note that this error code should never
-appear in results processed post consensus as operations failing that way
+`BumpSequenceOp` is a new operation, in order to properly reject the use of
+this new operation from nodes that understand the xdr but with the network
+still running an older protocol version, a new return value is added to
+operation result to communicate the failure. Note that this error code should
+never appear in results processed post consensus as operations failing that way
are deemed "invalid".
-In addition, to avoid sequence number reuse by re-creating an account that was merged,
-AccountMerge is modified to not allow merging the account if it would open the possibility
-of the account being re-created with a smaller sequence number.
+In addition, to avoid sequence number reuse by re-creating an account that was
+merged, AccountMerge is modified to not allow merging the account if it would
+open the possibility of the account being re-created with a smaller sequence
+number.
-The move to signed integers for `SequenceNumber` was driven by the same rationale
-than for why amounts are modeled as signed integers and the fact that `BumpSeqOp`
-makes it a lot likier to have sequence numbers in the high 64 bit range:
-many languages and systems (SQL) do not support unsigned integers properly which
-may lead to crashes or unexpected behaviors.
+The move to signed integers for `SequenceNumber` was driven by the same
+rationale than for why amounts are modeled as signed integers and the fact that
+`BumpSeqOp` makes it a lot likier to have sequence numbers in the high 64 bit
+range: many languages and systems (SQL) do not support unsigned integers
+properly which may lead to crashes or unexpected behaviors.
### Considerations on how transaction sets are applied to ledgers
#### pre v10 implementation
Currently a transaction set is processed in two phases:
-* fees and sequence numbers are collected globally
-* transactions (and their operations) are applied
+
+- fees and sequence numbers are collected globally
+- transactions (and their operations) are applied
The problem with this approach is that with the introduction of an operation
-like `BumpSequenceOp`, we have some inconsistencies in the way transactions
- are processed:
+like `BumpSequenceOp`, we have some inconsistencies in the way transactions are
+processed:
if a transaction bumps the sequence number of an account used in a later
transaction, the second transaction is expected to fail but with the current
implementation, it won't (as sequence number checks are performed while
collecting fees).
-One the reasons that `BumpSequenceOp` is introduced is to invalidate ranges
-of transactions, and with the current behavior it would make it difficult to
+One the reasons that `BumpSequenceOp` is introduced is to invalidate ranges of
+transactions, and with the current behavior it would make it difficult to
reason about the correctness of sequence of transactions.
#### Updated sequence number processing
Only process fees first:
-* fees are collected globally as they are now
-* transactions are applied, before applying individual transactions:
- * numbers are checked for validity
- * if the sequence number was valid, update the account's sequence number
-We would not change the logic to construct or validate a transaction set:
-a transaction set would still be built with transactions that have consecutive
+- fees are collected globally as they are now
+- transactions are applied, before applying individual transactions:
+ - numbers are checked for validity
+ - if the sequence number was valid, update the account's sequence number
+
+We would not change the logic to construct or validate a transaction set: a
+transaction set would still be built with transactions that have consecutive
sequence numbers.
The difference is that in the event that a transaction is invalidated by an
@@ -187,34 +197,34 @@ level changes when applying the transaction and its operations.
## Backwards Compatibility
-As the sequence number is updated as part of the regular processing of operations,
- it requires to emit transaction level meta data, which only existed as part of
-the fee processing (included in the `txfeehistory` table).
-The implementation will switch to using the new meta format for all transactions,
+As the sequence number is updated as part of the regular processing of
+operations, it requires to emit transaction level meta data, which only existed
+as part of the fee processing (included in the `txfeehistory` table). The
+implementation will switch to using the new meta format for all transactions,
including the ones for older versions of the protocol (which will not emit any
-transaction level meta data as expected).
-While this is not strictly necessary, it's safer as it makes it obvious that
-downstream systems (such as Horizon) need to be updated to support the
-new meta data format; the alternative would have been to change the meta data
-format with the protocol version, but as protocol version upgrades are not
-necessarily managed by the node operators it ends up being less safe.
-
-The shift to signed sequence numbers is backward compatible from a data
-point of view as the top 32 bits of sequence numbers are seeded with the
-legder sequence number in older versions of the protocol.
-Some SDKs may have to be slightly updated if they enforce strong typing.
-In addition, existing code even with the updated range will continue to work
-as sequence numbers are within a subset of the range that was supported
-before.
+transaction level meta data as expected). While this is not strictly necessary,
+it's safer as it makes it obvious that downstream systems (such as Horizon)
+need to be updated to support the new meta data format; the alternative would
+have been to change the meta data format with the protocol version, but as
+protocol version upgrades are not necessarily managed by the node operators it
+ends up being less safe.
+
+The shift to signed sequence numbers is backward compatible from a data point
+of view as the top 32 bits of sequence numbers are seeded with the legder
+sequence number in older versions of the protocol. Some SDKs may have to be
+slightly updated if they enforce strong typing. In addition, existing code even
+with the updated range will continue to work as sequence numbers are within a
+subset of the range that was supported before.
## Test Cases
-* `BumpSequenceOp` rejected on older versions of the protocol
-* bumps to a small number passed the current sequence number
-* bumps to `MAX_INT64`, at which point the account cannot be used as transaction source account
-* bumps to a number smaller than the current sequence number (should no op)
-* bumps to a sequence number that is negative
-* don't allow merge when the account sequence number is too high
+- `BumpSequenceOp` rejected on older versions of the protocol
+- bumps to a small number passed the current sequence number
+- bumps to `MAX_INT64`, at which point the account cannot be used as
+ transaction source account
+- bumps to a number smaller than the current sequence number (should no op)
+- bumps to a sequence number that is negative
+- don't allow merge when the account sequence number is too high
## Implementation
diff --git a/core/cap-0002.md b/core/cap-0002.md
index 992ead28a..59557e2b9 100644
--- a/core/cap-0002.md
+++ b/core/cap-0002.md
@@ -2,7 +2,7 @@
```
CAP: 0002
-Title: Transaction level signature verification
+Title: Transaction level signature verification
Author: Nicolas Barry, Rafal Malinowski
Status: Final
Created: 2018-05-03
@@ -11,54 +11,57 @@ Protocol Version: 10
```
## Simple Summary
-Make signature verification behave the same way pre and post consensus
- in order to make smart contracts easier to author.
+
+Make signature verification behave the same way pre and post consensus in order
+to make smart contracts easier to author.
## Abstract
-Perform signature verification (for each operation) before performing
-any of the transaction side effects.
+Perform signature verification (for each operation) before performing any of
+the transaction side effects.
-This may still allow certain transactions to invalidate future transactions
-in the same transaction set, while allowing operations in a transaction to
-safely manipulate signers.
+This may still allow certain transactions to invalidate future transactions in
+the same transaction set, while allowing operations in a transaction to safely
+manipulate signers.
## Motivation
Currently signatures are verified in two different ways:
-* when building a transaction set for consensus, verification is done without
-any of the operation side effects
-* when applying transactions post consensus, each operation checks again for
-signatures
+
+- when building a transaction set for consensus, verification is done without
+ any of the operation side effects
+- when applying transactions post consensus, each operation checks again for
+ signatures
The problem with this approach is that some operations may cause subsequent
operations in the same transaction to fail, making crafting pre-signed
-transactions that manipulate multiple signers or weights very complicated
-(or impossible) to implement in the context of multiple operations within
-a transaction.
+transactions that manipulate multiple signers or weights very complicated (or
+impossible) to implement in the context of multiple operations within a
+transaction.
## Specification
Transaction's `doApply` will be modified to perform in order:
+
1. transaction level changes
- 1. sequence number processing
- 2. signature verification
- 1. verify signatures for each operation
- 2. removal of one time signatures
-2. operations processing, for each operation:
- 1. validity checks (same as today except for the signature verification)
- 2. side effects
+ 1. sequence number processing
+ 2. signature verification
+ 1. verify signatures for each operation
+ 2. removal of one time signatures
+2. operations processing, for each operation:
+ 1. validity checks (same as today except for the signature verification)
+ 2. side effects
## Rationale
-This approach makes signature verification behave the same way than how sequence
-numbers are now processed (in v10 of the protocol as per cap-0001).
+This approach makes signature verification behave the same way than how
+sequence numbers are now processed (in v10 of the protocol as per cap-0001).
Removal of one time signatures are done first in order to avoid introducing a
new concept of "post transaction side effects" that are not currently modeled
in the meta data.
-Note that we *could* add this notion to protocol 10 as cap-001 has not been put
+Note that we _could_ add this notion to protocol 10 as cap-001 has not been put
in production yet but the added complexity doesn't seem to be necessary.
## Backwards Compatibility
@@ -68,9 +71,11 @@ This change makes certain transaction succeed that are now failing.
There are probably no good use cases for those failures, so it is deemed safe.
## Test Cases
+
Transactions that should now succeed:
### change thresholds twice
+
```json
// transaction that changes A thresholds twice
@@ -92,6 +97,7 @@ envelope:
Failing right now with `opBAD_AUTH` for second operation.
### lower master weight twice
+
```json
// A with weigths=[10,1,5,10]
@@ -114,6 +120,7 @@ envelope:
Failing right now with `opBAD_AUTH` for second operation.
### remove signer and do something
+
```json
// A with signers: {key: B, weight: 1}; weigths=[1,2,2,2]
envelope:
@@ -136,4 +143,5 @@ Failing right now with `opBAD_AUTH` for second operation.
## Implementation
-Commit [c9087d0cc4c0710c9129d473fcd22887c60e4653](https://github.com/stellar/stellar-core/commit/c9087d0cc4c0710c9129d473fcd22887c60e4653)
+Commit
+[c9087d0cc4c0710c9129d473fcd22887c60e4653](https://github.com/stellar/stellar-core/commit/c9087d0cc4c0710c9129d473fcd22887c60e4653)
diff --git a/core/cap-0003.md b/core/cap-0003.md
index d47352101..c1a62fcf8 100644
--- a/core/cap-0003.md
+++ b/core/cap-0003.md
@@ -11,25 +11,63 @@ Protocol version: 10
```
## Simple Summary
-Asset-backed offers is a proposal to resolve the issue that offers in the ledger might not be executable.
+
+Asset-backed offers is a proposal to resolve the issue that offers in the
+ledger might not be executable.
## Abstract
-Asset-backed offers is a proposal to resolve the issue that a single account might have liabilities, in the form of offers on the ledger, that exceeds the assets of the account. When this occurs offers in the ledger might not be executable, in the sense that there exists no crossing offer such that the entire amount of the offer is exchanged. Offers in the ledger that are not executable provide a false sense of liquidity.
-## Motivation
-We will say that an offer is "immediately executable in full" (IEIF in short) if, were it crossed by a hypothetical offer with no limits on amount bought or sold, the entire amount of selling asset would be exchanged. It is desirable that all offers are IEIF since this enables users to easily evaluate the amount of available liquidity.
+Asset-backed offers is a proposal to resolve the issue that a single account
+might have liabilities, in the form of offers on the ledger, that exceeds the
+assets of the account. When this occurs offers in the ledger might not be
+executable, in the sense that there exists no crossing offer such that the
+entire amount of the offer is exchanged. Offers in the ledger that are not
+executable provide a false sense of liquidity.
-The protocol requires that, upon creation, every offer satisfies the following two conditions:
+## Motivation
-* The amount offered to sell does not exceed the available balance of the selling asset
-* The amount offered to buy (computed implicitly) does not exceed the available limit of the buying asset
+We will say that an offer is "immediately executable in full" (IEIF in short)
+if, were it crossed by a hypothetical offer with no limits on amount bought or
+sold, the entire amount of selling asset would be exchanged. It is desirable
+that all offers are IEIF since this enables users to easily evaluate the amount
+of available liquidity.
+
+The protocol requires that, upon creation, every offer satisfies the following
+two conditions:
+
+- The amount offered to sell does not exceed the available balance of the
+ selling asset
+- The amount offered to buy (computed implicitly) does not exceed the available
+ limit of the buying asset
+
+We will now demonstrate that these conditions are not sufficient to ensure that
+an offer is IEIF. Suppose that an account creates an offer which is IEIF with
+the selling amount equal to the available balance of the selling asset. The
+account then creates a second offer with a worse price but otherwise identical
+to the first. Although each offer individually satisfies the above
+requirements, the second offer is not IEIF. For suppose that the second offer
+was crossed by a hypothetical offer with no limits. This crossing offer would
+initially cross the first offer, which leaves no available balance for the
+selling asset. When the second offer is then crossed, no assets can be
+exchanged.
+
+Analogous to the above example, it is possible to create multiple offers that
+individually meet the requirements but exceed the available limit when
+considered in aggregate. Suppose that an offer creates an offer which is IEIF
+with the buying amount equal to the available limit of the buying asset. The
+account then creates a second offer with a worse price but otherwise identical
+to the first. Although each offer individually satisfies the above
+requirements, the second offer is not IEIF. For suppose that the second offer
+was crossed by a hypothetical offer with no limits. This crossing offer would
+initially cross the first offer, which leaves no available limit for the buying
+asset. When the second offer is then crossed, no assets can be exchanged.
-We will now demonstrate that these conditions are not sufficient to ensure that an offer is IEIF. Suppose that an account creates an offer which is IEIF with the selling amount equal to the available balance of the selling asset. The account then creates a second offer with a worse price but otherwise identical to the first. Although each offer individually satisfies the above requirements, the second offer is not IEIF. For suppose that the second offer was crossed by a hypothetical offer with no limits. This crossing offer would initially cross the first offer, which leaves no available balance for the selling asset. When the second offer is then crossed, no assets can be exchanged.
+## Specification
-Analogous to the above example, it is possible to create multiple offers that individually meet the requirements but exceed the available limit when considered in aggregate. Suppose that an offer creates an offer which is IEIF with the buying amount equal to the available limit of the buying asset. The account then creates a second offer with a worse price but otherwise identical to the first. Although each offer individually satisfies the above requirements, the second offer is not IEIF. For suppose that the second offer was crossed by a hypothetical offer with no limits. This crossing offer would initially cross the first offer, which leaves no available limit for the buying asset. When the second offer is then crossed, no assets can be exchanged.
+This proposal will require XDR changes for `AccountEntry` and `TrustLineEntry`,
+as well as schema updates for the accounts and trustlines tables. The updated
+XDR is:
-## Specification
-This proposal will require XDR changes for `AccountEntry` and `TrustLineEntry`, as well as schema updates for the accounts and trustlines tables. The updated XDR is:
```c++
struct Liabilities
{
@@ -105,6 +143,7 @@ struct TrustLineEntry
```
The SQL required to update the schema is:
+
```sql
ALTER TABLE accounts ADD buyingliabilities BIGINT
CHECK (buyingliabilities >= 0);
@@ -117,6 +156,7 @@ ALTER TABLE trustlines ADD sellingliabilities BIGINT
```
The operation result code `ACCOUNT_MERGE_DEST_FULL` also must be added:
+
```c++
enum AccountMergeResultCode
{
@@ -134,133 +174,257 @@ enum AccountMergeResultCode
```
## Rationale
-The purpose of the asset-backed offers proposal is to maintain the invariant that every offer is IEIF. We begin with a discussion of what can cause offers to no longer be IEIF:
-
-* Fees
- * Account `A` is used to pay fees: offers owned by `A` and selling the native asset may no longer be IEIF
-* `AllowTrustOp`
- * Revoke authorization from account `A` to hold non-native asset `X`: all offers owned by `A` and either buying or selling `X` are no longer IEIF
-* `ChangeTrustOp`
- * Create trust line for account `A`: offers owned by `A` and selling the native asset may no longer be IEIF
- * Reduce the limit on a trust line for account `A` to hold a non-native asset `X`: offers owned by `A` and buying `X` may no longer be IEIF
-* `CreateAccountOp`
- * Create account using native assets from account `A`: offers owned by `A` and selling the native asset may no longer be IEIF
-* `InflationOp`
- * Account `W` is an inflation winner: offers owned by `W` and buying the native asset may no longer be IEIF
-* `ManageDataOp`
- * Create account data for account `A`: offers owned by `A` and selling the native asset may no longer be IEIF
-* `ManageOfferOp` (and `CreatePassiveOfferOp`)
- * Account `A` crosses offer owned by account `M` selling asset `Y` for asset `X`:
- * offers owned by `A` and selling `X` may no longer be IEIF
- * offers owned by `A` and buying `Y` may no longer be IEIF
- * offers owned by `M` and selling `Y` may no longer be IEIF
- * offers owned by `M` and buying `X` may no longer be IEIF
- * Create offer for account `A` selling asset `X` for asset `Y`:
- * offers owned by `A` and selling `X` may no longer be IEIF
- * offers owned by `A` and buying `Y` may no longer be IEIF
- * offers owned by `A` and selling the native asset may no longer be IEIF
-* `MergeOp`
- * Account `S` is merged into account `D`: offers owned by `D` and buying the native asset may no longer be IEIF
-* `PaymentOp`
- * Account `S` pays asset `X` to account `D`:
- * offers owned by `S` and selling `X` may no longer be IEIF
- * offers owned by `D` and buying `X` may no longer be IEIF
-* `PathPaymentOp`
- * Account `S` crosses offer owned by account `M` selling asset `Y` for asset `X`:
- * offers owned by `S` and selling `X` may no longer be IEIF
- * offers owned by `S` and buying `Y` may no longer be IEIF
- * offers owned by `M` and selling `Y` may no longer be IEIF
- * offers owned by `M` and buying `X` may no longer be IEIF
- * Account `S` pays asset `X` to account `D` arriving as asset `Y`:
- * offers owned by `S` and selling `X` may no longer be IEIF
- * offers owned by `D` and buying `Y` may no longer be IEIF
-* `SetOptionsOp`
- * Add signer to account `A`: offers owned by `A` and selling the native asset may no longer be IEIF
-
-From this analysis, it is clear that any proposal which attempts to modify or delete offers that are no longer IEIF would require maintaining the offer book after fee collection and after each operation. This would be both complicated and inefficient. Instead, we pursue a proposal which modifies the operations such that they guarantee offers remain IEIF. To achieve this, we first define new quantities which are derived data on the ledger:
-
-* `account.sellingLiabilities`
- * For account `A`: the amount of native asset offered to be sold, aggregated over all offers owned by `A`
-* `account.buyingLiabilities`
- * For account `A`: the amount of native asset offered to be bought, aggregated over all offers owned by `A`
-* `trustline.sellingLiabilities`
- * For account `A` and non-native asset `X`: the amount of `X` offered to be sold, aggregated over all offers owned by `A`
-* `trustline.buyingLiabilities`
- * For account `A` and non-native asset `X`: the amount of `X` offered to be bought, aggregated over all offers owned by `A`
-
-These quantities will be updated whenever an offer is created, modified, or deleted. When an offer is created, the liabilities for the offer will be calculated and added to `buyingLiabilities` and `sellingLiabilities` in the relevant account and/or trust lines. When an offer is deleted, the liabilities for the offer will be calculated and subtracted from `buyingLiabilities` and `sellingLiabilities` in the relevant account and/or trust lines. When an offer is modified, it can be viewed as a delete followed by a create so the `buyingLiabilities` and `sellingLiabilities` can be updated using the logic from those cases. As we already load accounts and trust lines when interacting with offers, the cost of maintaining these quantities should be minimal.
-
-We will now use these quantities to modify the operations such that they guarantee offers remain IEIF. In what follows, available limit is `INT64_MAX` for the native asset and `limit - buyingLiabilities` for non-native assets. Similarly, available balance is `balance - reserve - sellingLiabilities` for the native asset and `balance - sellingLiabilities` for non-native assets. In order for issuers to be able to buy or sell any quantity (even exceeding `INT64_MAX`) of an asset they issued, available limit and available balance will always be `INT64_MAX` in this case. The asset-backed offers proposal modifies the operations such that all offers remain IEIF after an operation:
-
-* Fees
- * Account `A` is used to pay fees: transaction is not valid with result `txINSUFFICIENT_BALANCE` if new available balance of native asset is negative
-* `AllowTrustOp`
- * Revoke authorization from account `A` to hold non-native asset `X`: all offers owned by `A` and either buying or selling `X` are deleted
-* `ChangeTrustOp`
- * Create trust line for account `A`: fails with result `CHANGE_TRUST_LOW_RESERVE` if new available balance of native asset is negative
- * Reduce the limit on a trust line for account `A` to hold a non-native asset `X`: fails with result `CHANGE_TRUST_INVALID_LIMIT` if new available limit is negative
-* `CreateAccountOp`
- * Create account using native assets from account `A`: fails with `CREATE_ACCOUNT_UNDERFUNDED` if new available balance of native asset is negative
-* `InflationOp`
- * Account `W` is an inflation winner: inflation winners receive the minimum of their winning and their available limit of native asset, with the residual returned to the inflation pool
-* `ManageDataOp`
- * Create account data for account `A`: fails with result `MANAGE_DATA_LOW_RESERVE` if new available balance of native asset is negative
-* `ManageOfferOp` (and `CreatePassiveOfferOp`)
- * Account `A` crosses offer owned by account `M` selling asset `Y` for asset `X`:
- * `A` does not buy more `Y` than available limit
- * `A` does not sell more `X` than available balance
- * `M` does not buy more `X` than available limit
- * `M` does not sell more `Y` than available balance
- * Create offer for account `A` selling asset `X` for asset `Y`:
- * `A` does not offer to sell more `X` than available balance
- * `A` does not offer to buy more `Y` than available limit
- * fails with result `MANAGE_OFFER_LOW_RESERVE` if new available balance of native asset is negative
-* `MergeOp`
- * Account `S` is merged into account `D`: fails with result `ACCOUNT_MERGE_DEST_FULL` if new available limit of native asset is negative
-* `PaymentOp`
- * Account `S` pays asset `X` to account `D`:
- * fails with result `PAYMENT_UNDERFUNDED` if new available balance of `X` in `S` is negative
- * fails with result `PAYMENT_LINE_FULL` if new available limit of `X` in `D` is negative
-* `PathPaymentOp`
- * Account `S` pays asset `X` to account `D` arriving as asset `Y`:
- * fails with result `PATH_PAYMENT_UNDERFUNDED` if new available balance of `X` in `S` is negative
- * fails with result `PATH_PAYMENT_LINE_FULL` if new available limit of `Y` in `D` is negative
- * Account `S` crosses offer owned by account `M` selling asset `Y` for asset `X`:
- * `S` does not buy more `Y` than available limit
- * `S` does not sell more `X` than available balance
- * `M` does not buy more `X` than available limit
- * `M` does not sell more `Y` than available balance
-* `SetOptionsOp`
- * Add signer to account `A`: fails with result `SET_OPTIONS_LOW_RESERVE` if new available balance of native asset is negative
-
-The behavior of `ManageOfferOp` (and `CreatePassiveOfferOp`) will undergo a considerable change in order to make the behavior predictable and performant with regard to liabilities. Before crossing any offers, both operations will now enforce the following requirements:
-
-* If modifying the offer `offerID`, then the liabilities associated with offer `offerID` are removed
-* If a new offer is being created, the number of subentries is updated
-* If the buying liabilities associated with the new offer exceed the available limit then the operation fails with `MANAGE_OFFER_LINE_FULL`
-* If the selling liabilities associated with the new offer exceed the available balance then the operation fails with `MANAGE_OFFER_UNDERFUNDED`
-
-These updates to `ManageOfferOp` (and `CreatePassiveOfferOp`) imply all of the modifications discussed in the previous list for these operations.
+
+The purpose of the asset-backed offers proposal is to maintain the invariant
+that every offer is IEIF. We begin with a discussion of what can cause offers
+to no longer be IEIF:
+
+- Fees
+ - Account `A` is used to pay fees: offers owned by `A` and selling the native
+ asset may no longer be IEIF
+- `AllowTrustOp`
+ - Revoke authorization from account `A` to hold non-native asset `X`: all
+ offers owned by `A` and either buying or selling `X` are no longer IEIF
+- `ChangeTrustOp`
+ - Create trust line for account `A`: offers owned by `A` and selling the
+ native asset may no longer be IEIF
+ - Reduce the limit on a trust line for account `A` to hold a non-native asset
+ `X`: offers owned by `A` and buying `X` may no longer be IEIF
+- `CreateAccountOp`
+ - Create account using native assets from account `A`: offers owned by `A`
+ and selling the native asset may no longer be IEIF
+- `InflationOp`
+ - Account `W` is an inflation winner: offers owned by `W` and buying the
+ native asset may no longer be IEIF
+- `ManageDataOp`
+ - Create account data for account `A`: offers owned by `A` and selling the
+ native asset may no longer be IEIF
+- `ManageOfferOp` (and `CreatePassiveOfferOp`)
+ - Account `A` crosses offer owned by account `M` selling asset `Y` for asset
+ `X`:
+ - offers owned by `A` and selling `X` may no longer be IEIF
+ - offers owned by `A` and buying `Y` may no longer be IEIF
+ - offers owned by `M` and selling `Y` may no longer be IEIF
+ - offers owned by `M` and buying `X` may no longer be IEIF
+ - Create offer for account `A` selling asset `X` for asset `Y`:
+ - offers owned by `A` and selling `X` may no longer be IEIF
+ - offers owned by `A` and buying `Y` may no longer be IEIF
+ - offers owned by `A` and selling the native asset may no longer be IEIF
+- `MergeOp`
+ - Account `S` is merged into account `D`: offers owned by `D` and buying the
+ native asset may no longer be IEIF
+- `PaymentOp`
+ - Account `S` pays asset `X` to account `D`:
+ - offers owned by `S` and selling `X` may no longer be IEIF
+ - offers owned by `D` and buying `X` may no longer be IEIF
+- `PathPaymentOp`
+ - Account `S` crosses offer owned by account `M` selling asset `Y` for asset
+ `X`:
+ - offers owned by `S` and selling `X` may no longer be IEIF
+ - offers owned by `S` and buying `Y` may no longer be IEIF
+ - offers owned by `M` and selling `Y` may no longer be IEIF
+ - offers owned by `M` and buying `X` may no longer be IEIF
+ - Account `S` pays asset `X` to account `D` arriving as asset `Y`:
+ - offers owned by `S` and selling `X` may no longer be IEIF
+ - offers owned by `D` and buying `Y` may no longer be IEIF
+- `SetOptionsOp`
+ - Add signer to account `A`: offers owned by `A` and selling the native asset
+ may no longer be IEIF
+
+From this analysis, it is clear that any proposal which attempts to modify or
+delete offers that are no longer IEIF would require maintaining the offer book
+after fee collection and after each operation. This would be both complicated
+and inefficient. Instead, we pursue a proposal which modifies the operations
+such that they guarantee offers remain IEIF. To achieve this, we first define
+new quantities which are derived data on the ledger:
+
+- `account.sellingLiabilities`
+ - For account `A`: the amount of native asset offered to be sold, aggregated
+ over all offers owned by `A`
+- `account.buyingLiabilities`
+ - For account `A`: the amount of native asset offered to be bought,
+ aggregated over all offers owned by `A`
+- `trustline.sellingLiabilities`
+ - For account `A` and non-native asset `X`: the amount of `X` offered to be
+ sold, aggregated over all offers owned by `A`
+- `trustline.buyingLiabilities`
+ - For account `A` and non-native asset `X`: the amount of `X` offered to be
+ bought, aggregated over all offers owned by `A`
+
+These quantities will be updated whenever an offer is created, modified, or
+deleted. When an offer is created, the liabilities for the offer will be
+calculated and added to `buyingLiabilities` and `sellingLiabilities` in the
+relevant account and/or trust lines. When an offer is deleted, the liabilities
+for the offer will be calculated and subtracted from `buyingLiabilities` and
+`sellingLiabilities` in the relevant account and/or trust lines. When an offer
+is modified, it can be viewed as a delete followed by a create so the
+`buyingLiabilities` and `sellingLiabilities` can be updated using the logic
+from those cases. As we already load accounts and trust lines when interacting
+with offers, the cost of maintaining these quantities should be minimal.
+
+We will now use these quantities to modify the operations such that they
+guarantee offers remain IEIF. In what follows, available limit is `INT64_MAX`
+for the native asset and `limit - buyingLiabilities` for non-native assets.
+Similarly, available balance is `balance - reserve - sellingLiabilities` for
+the native asset and `balance - sellingLiabilities` for non-native assets. In
+order for issuers to be able to buy or sell any quantity (even exceeding
+`INT64_MAX`) of an asset they issued, available limit and available balance
+will always be `INT64_MAX` in this case. The asset-backed offers proposal
+modifies the operations such that all offers remain IEIF after an operation:
+
+- Fees
+ - Account `A` is used to pay fees: transaction is not valid with result
+ `txINSUFFICIENT_BALANCE` if new available balance of native asset is
+ negative
+- `AllowTrustOp`
+ - Revoke authorization from account `A` to hold non-native asset `X`: all
+ offers owned by `A` and either buying or selling `X` are deleted
+- `ChangeTrustOp`
+ - Create trust line for account `A`: fails with result
+ `CHANGE_TRUST_LOW_RESERVE` if new available balance of native asset is
+ negative
+ - Reduce the limit on a trust line for account `A` to hold a non-native asset
+ `X`: fails with result `CHANGE_TRUST_INVALID_LIMIT` if new available limit
+ is negative
+- `CreateAccountOp`
+ - Create account using native assets from account `A`: fails with
+ `CREATE_ACCOUNT_UNDERFUNDED` if new available balance of native asset is
+ negative
+- `InflationOp`
+ - Account `W` is an inflation winner: inflation winners receive the minimum
+ of their winning and their available limit of native asset, with the
+ residual returned to the inflation pool
+- `ManageDataOp`
+ - Create account data for account `A`: fails with result
+ `MANAGE_DATA_LOW_RESERVE` if new available balance of native asset is
+ negative
+- `ManageOfferOp` (and `CreatePassiveOfferOp`)
+ - Account `A` crosses offer owned by account `M` selling asset `Y` for asset
+ `X`:
+ - `A` does not buy more `Y` than available limit
+ - `A` does not sell more `X` than available balance
+ - `M` does not buy more `X` than available limit
+ - `M` does not sell more `Y` than available balance
+ - Create offer for account `A` selling asset `X` for asset `Y`:
+ - `A` does not offer to sell more `X` than available balance
+ - `A` does not offer to buy more `Y` than available limit
+ - fails with result `MANAGE_OFFER_LOW_RESERVE` if new available balance of
+ native asset is negative
+- `MergeOp`
+ - Account `S` is merged into account `D`: fails with result
+ `ACCOUNT_MERGE_DEST_FULL` if new available limit of native asset is
+ negative
+- `PaymentOp`
+ - Account `S` pays asset `X` to account `D`:
+ - fails with result `PAYMENT_UNDERFUNDED` if new available balance of `X`
+ in `S` is negative
+ - fails with result `PAYMENT_LINE_FULL` if new available limit of `X` in
+ `D` is negative
+- `PathPaymentOp`
+ - Account `S` pays asset `X` to account `D` arriving as asset `Y`:
+ - fails with result `PATH_PAYMENT_UNDERFUNDED` if new available balance of
+ `X` in `S` is negative
+ - fails with result `PATH_PAYMENT_LINE_FULL` if new available limit of `Y`
+ in `D` is negative
+ - Account `S` crosses offer owned by account `M` selling asset `Y` for asset
+ `X`:
+ - `S` does not buy more `Y` than available limit
+ - `S` does not sell more `X` than available balance
+ - `M` does not buy more `X` than available limit
+ - `M` does not sell more `Y` than available balance
+- `SetOptionsOp`
+ - Add signer to account `A`: fails with result `SET_OPTIONS_LOW_RESERVE` if
+ new available balance of native asset is negative
+
+The behavior of `ManageOfferOp` (and `CreatePassiveOfferOp`) will undergo a
+considerable change in order to make the behavior predictable and performant
+with regard to liabilities. Before crossing any offers, both operations will
+now enforce the following requirements:
+
+- If modifying the offer `offerID`, then the liabilities associated with offer
+ `offerID` are removed
+- If a new offer is being created, the number of subentries is updated
+- If the buying liabilities associated with the new offer exceed the available
+ limit then the operation fails with `MANAGE_OFFER_LINE_FULL`
+- If the selling liabilities associated with the new offer exceed the available
+ balance then the operation fails with `MANAGE_OFFER_UNDERFUNDED`
+
+These updates to `ManageOfferOp` (and `CreatePassiveOfferOp`) imply all of the
+modifications discussed in the previous list for these operations.
## Backwards Compatibility
-We will denote the protocol version which enables this proposal as `PROPOSAL_VERSION`. The database schema can be updated to include `buyingLiabilities` and `sellingLiabilities`, where these values are set to `NULL` until the protocol version is at least `PROPOSAL_VERSION`.
+
+We will denote the protocol version which enables this proposal as
+`PROPOSAL_VERSION`. The database schema can be updated to include
+`buyingLiabilities` and `sellingLiabilities`, where these values are set to
+`NULL` until the protocol version is at least `PROPOSAL_VERSION`.
### Upgrading the Protocol Version
-When the protocol version is upgraded to `PROPOSAL_VERSION`, the values of `buyingLiabilities` and `sellingLiabilities` will need to be calculated for all accounts that own offers. For other accounts these values can either be left as `NULL` and updated lazily, or they can be updated globally to 0. It would be better to update the values lazily as this would require a much smaller bucket than updating globally to 0.
-It is possible, after the protocol version is upgraded to `PROPOSAL_VERSION`, that there are existing offers which are not IEIF. We propose to resolve this issue by deleting offers owned by accounts with excess liabilities. Specifically, for any account `A` and assets `X` and `Y`, this approach would delete any offer owned by `A` and selling `X` in exchange for `Y` if `A` has excess selling liabilities of `X` or excess buying liabilities of `Y`. As of ledger 18178688, there are a maximum of 6053 offers (out of 12734 total offers) that would be deleted, owned by a maximum of 983 accounts (out of 474454 total accounts). One disadvantage to this approach is that it would likely cause a considerable decrease in available liquidity for some time while new offers are created, although this impact would be smaller than in the alternative approach discussed below. A further disadvantage to this approach is that, for some accounts, some offers may be deleted while others remain which could be undesirable in some cases. Both of these disadvantages could potentially be mitigated by giving an advance notice of a few weeks to the community and developers so that they have time to update their offers such that they do not have excess liabilities. As in the alternative approach, it could occur that it is impossible to recreate some offers. But at least offers selling an asset issued by that account are less likely to be deleted, since there is no limit on liabilities for the issuer of an asset. Regarding the specific offers mentioned in the discussion of the alternative approach, none of the 3 that are selling an asset issued by that account would be deleted.
+When the protocol version is upgraded to `PROPOSAL_VERSION`, the values of
+`buyingLiabilities` and `sellingLiabilities` will need to be calculated for all
+accounts that own offers. For other accounts these values can either be left as
+`NULL` and updated lazily, or they can be updated globally to 0. It would be
+better to update the values lazily as this would require a much smaller bucket
+than updating globally to 0.
+
+It is possible, after the protocol version is upgraded to `PROPOSAL_VERSION`,
+that there are existing offers which are not IEIF. We propose to resolve this
+issue by deleting offers owned by accounts with excess liabilities.
+Specifically, for any account `A` and assets `X` and `Y`, this approach would
+delete any offer owned by `A` and selling `X` in exchange for `Y` if `A` has
+excess selling liabilities of `X` or excess buying liabilities of `Y`. As of
+ledger 18178688, there are a maximum of 6053 offers (out of 12734 total offers)
+that would be deleted, owned by a maximum of 983 accounts (out of 474454 total
+accounts). One disadvantage to this approach is that it would likely cause a
+considerable decrease in available liquidity for some time while new offers are
+created, although this impact would be smaller than in the alternative approach
+discussed below. A further disadvantage to this approach is that, for some
+accounts, some offers may be deleted while others remain which could be
+undesirable in some cases. Both of these disadvantages could potentially be
+mitigated by giving an advance notice of a few weeks to the community and
+developers so that they have time to update their offers such that they do not
+have excess liabilities. As in the alternative approach, it could occur that it
+is impossible to recreate some offers. But at least offers selling an asset
+issued by that account are less likely to be deleted, since there is no limit
+on liabilities for the issuer of an asset. Regarding the specific offers
+mentioned in the discussion of the alternative approach, none of the 3 that are
+selling an asset issued by that account would be deleted.
#### Alternative Approach: Delete all existing offers
-As the description suggests, this approach would delete all existing offers when the protocol is upgraded to `PROPOSAL_VERSION`. As of ledger 18178688, there are 12734 offers owned by 2653 accounts (out of 474454 total accounts). One disadvantage to this approach is that it would likely cause a considerable decrease in available liquidity for some time while new offers are created. Some offers which have been created belong to accounts that would not be able to recreate them. There are 5 offers owned by 4 accounts with no signers and a master key weight of 0, with 2 of these offers selling an asset issued by the account. There is 1 other offer owned by an account whose total weight of signers and master key does not exceed the medium threshold, and it is also selling an asset issued by the account. It is worth repeating that this is only a simple lower bound on the number of offers that could not be recreated.
+
+As the description suggests, this approach would delete all existing offers
+when the protocol is upgraded to `PROPOSAL_VERSION`. As of ledger 18178688,
+there are 12734 offers owned by 2653 accounts (out of 474454 total accounts).
+One disadvantage to this approach is that it would likely cause a considerable
+decrease in available liquidity for some time while new offers are created.
+Some offers which have been created belong to accounts that would not be able
+to recreate them. There are 5 offers owned by 4 accounts with no signers and a
+master key weight of 0, with 2 of these offers selling an asset issued by the
+account. There is 1 other offer owned by an account whose total weight of
+signers and master key does not exceed the medium threshold, and it is also
+selling an asset issued by the account. It is worth repeating that this is only
+a simple lower bound on the number of offers that could not be recreated.
### Base Reserve
-The previous section details only a single point of backward incompatibility, but the process of increasing the base reserve presents a similar issue of backward incompatibility which would be repeatedly possible in the future. The reason for this is that increasing the base reserve could cause offers selling the native asset to no longer be IEIF. The potential solutions presented in the previous section would also apply here, but the same disadvantages would still apply as well.
+
+The previous section details only a single point of backward incompatibility,
+but the process of increasing the base reserve presents a similar issue of
+backward incompatibility which would be repeatedly possible in the future. The
+reason for this is that increasing the base reserve could cause offers selling
+the native asset to no longer be IEIF. The potential solutions presented in the
+previous section would also apply here, but the same disadvantages would still
+apply as well.
## Test Cases
-* `buyingLiabilities` and `sellingLiabilities` are updated when offers are modified by `PathPaymentOp`, `ManageOfferOp`, `CreatePassiveOfferOp`, and `AllowTrustOp`
-* Each operation should have the new behavior described above
+
+- `buyingLiabilities` and `sellingLiabilities` are updated when offers are
+ modified by `PathPaymentOp`, `ManageOfferOp`, `CreatePassiveOfferOp`, and
+ `AllowTrustOp`
+- Each operation should have the new behavior described above
## Implementation
+
https://github.com/stellar/stellar-core/pull/1718
Corresponding commit is git: 4dab9625d42b252d3f11500151ffcc66a1cd5ad2
diff --git a/core/cap-0004.md b/core/cap-0004.md
index 85452c89c..dc9452943 100644
--- a/core/cap-0004.md
+++ b/core/cap-0004.md
@@ -11,68 +11,174 @@ Protocol version: 10
```
## Simple Summary
-As of protocol version 9, crossing offers can lead to unbounded relative rounding error and can even leave the book in a crossed state. We propose a new algorithm to resolve these issues.
-## Abstract
-As of protocol version 9, crossing offers can lead to unbounded relative rounding error and can even leave the book in a crossed state. We propose a new algorithm to resolve these issues. This algorithm will prevent unbounded relative rounding error by adding an error threshold, and will prevent the book becoming crossed by guaranteeing that the less valuable offer must always be removed from the book. The new algorithm also guarantees that any given offer can only suffer a single adverse rounding event by rounding in favor of the offer with more total value (which, equivalently, means the offer which remains in the book).
-
-## Motivation
-In what follows, quantities expressed like "100 stroop X" mean 100 times the minimum representable quantity of the asset X. Consider the following situation:
-
-- In transaction 1, account A creates an offer selling 100 stroop X at a price of 3 Y / 2 X.
-- In transaction 2, account B creates an offer selling 10 stroop Y at a price of 1 X / 2 Y.
+As of protocol version 9, crossing offers can lead to unbounded relative
+rounding error and can even leave the book in a crossed state. We propose a new
+algorithm to resolve these issues.
-Offer B executes against offer A, but it is impossible to execute the entire offer at the price of 3 Y / 2 X. The protocol uses a rounding algorithm to determine how this situation, and other situations like it, should be handled. As of protocol version 9, 10 stroop Y is exchanged for 6 stroop X meaning that the trade actually executes at a price of 5 Y / 3 X. The relative error in the price is over 11% when viewed as a price in Y / X, and is 10% when viewed as a price in X / Y. This situation is bad, but it is not nearly as bad as it can be: the rounding algorithm as of protocol version 9 actually has unbounded relative error. Consider, for illustration, the following worse situation:
-
-- In transaction 1, account A creates an offer selling 1 stroop X at a price of 1 Y / 50,000 X.
-- In transaction 2, account B creates an offer selling 100 Y at a price of 1 X / 1 Y.
+## Abstract
-As of protocol version 9, 1 stroop Y is exchanged for 1 stroop X meaning that the trade actually executes at a price of 1 Y / 1 X. The relative error in the price is 4999900% when viewed as a price in Y / X, and is 99.998% when viewed as a price in X / Y.
+As of protocol version 9, crossing offers can lead to unbounded relative
+rounding error and can even leave the book in a crossed state. We propose a new
+algorithm to resolve these issues. This algorithm will prevent unbounded
+relative rounding error by adding an error threshold, and will prevent the book
+becoming crossed by guaranteeing that the less valuable offer must always be
+removed from the book. The new algorithm also guarantees that any given offer
+can only suffer a single adverse rounding event by rounding in favor of the
+offer with more total value (which, equivalently, means the offer which remains
+in the book).
-There is still another issue with the rounding algorithm as of protocol version 9. Whenever offers cross, at least one of the offers must execute entirely in order to avoid a crossed book. There are situations, however, where the rounding algorithm as of protocol version 9 will allow the book to remain crossed. Consider the following situation:
+## Motivation
-- In transaction 1, account A creates an offer selling 100 X at a price of 10 Y / 1 X.
-- In transaction 2, account B creates an offer selling 1 stroop Y at a price of 1 X / 10 Y.
+In what follows, quantities expressed like "100 stroop X" mean 100 times the
+minimum representable quantity of the asset X. Consider the following
+situation:
+
+- In transaction 1, account A creates an offer selling 100 stroop X at a price
+ of 3 Y / 2 X.
+- In transaction 2, account B creates an offer selling 10 stroop Y at a price
+ of 1 X / 2 Y.
+
+Offer B executes against offer A, but it is impossible to execute the entire
+offer at the price of 3 Y / 2 X. The protocol uses a rounding algorithm to
+determine how this situation, and other situations like it, should be handled.
+As of protocol version 9, 10 stroop Y is exchanged for 6 stroop X meaning that
+the trade actually executes at a price of 5 Y / 3 X. The relative error in the
+price is over 11% when viewed as a price in Y / X, and is 10% when viewed as a
+price in X / Y. This situation is bad, but it is not nearly as bad as it can
+be: the rounding algorithm as of protocol version 9 actually has unbounded
+relative error. Consider, for illustration, the following worse situation:
+
+- In transaction 1, account A creates an offer selling 1 stroop X at a price of
+ 1 Y / 50,000 X.
+- In transaction 2, account B creates an offer selling 100 Y at a price of 1 X
+ / 1 Y.
+
+As of protocol version 9, 1 stroop Y is exchanged for 1 stroop X meaning that
+the trade actually executes at a price of 1 Y / 1 X. The relative error in the
+price is 4999900% when viewed as a price in Y / X, and is 99.998% when viewed
+as a price in X / Y.
+
+There is still another issue with the rounding algorithm as of protocol
+version 9. Whenever offers cross, at least one of the offers must execute
+entirely in order to avoid a crossed book. There are situations, however, where
+the rounding algorithm as of protocol version 9 will allow the book to remain
+crossed. Consider the following situation:
+
+- In transaction 1, account A creates an offer selling 100 X at a price of 10 Y
+ / 1 X.
+- In transaction 2, account B creates an offer selling 1 stroop Y at a price of
+ 1 X / 10 Y.
As of protocol version 9, nothing is exchanged and the book remains crossed.
## Specification
-We propose a new rounding algorithm that resolves the above issues. The algorithm proceeds through three phases. First, the total value associated with each offer must be computed. Second, rounding is conducted in such a manner that the price never becomes less favorable for the offer with more total value. Third, the offer which initially had less total value is removed from the book regardless of whether it has been executed entirely.
-In order to compute the total value associated with an offer, we must choose what this value will be denominated in. This choice is arbitrary, and we elected to use the selling asset of the offer which was already in the book. We define the total value associated with an offer to be the value of the goods on offer in terms of the selling asset of the offer, rescaled by the denominator of the price of the offer which was already in the book.
-
-The manner in which the rounding is conducted is the central aspect of this proposal. The rounding algorithm guarantees that the price never becomes less favorable for the offer with more total value. Since the offer with less total value is always removed from the book, this guarantees that a single offer never suffers adverse rounding more than once. The rounding algorithm is also price aware and attempts to minimize errors by rounding in the less valuable asset. In order to prevent unbounded relative error during rounding, we introduce a new error threshold. If the relative rounding error exceeds the error threshold, nothing is exchanged.
-
-The offer which initially had less total value is removed from the book regardless of whether it has been executed entirely, so it is guaranteed that the book never remains crossed. When the book is no longer crossed, whichever offer ultimately remained in the book is adjusted such that it can be executed entirely. This guarantees that, regardless of rounding and error thresholding, it is always possible to execute against an offer in the book by submitting an offer with more total value. It is possible that an offer cannot be adjusted to any offer that can be executed entirely, in which case that offer is removed from the book.
-
-There is an additional complication with regard to the error threshold in path payment. For path payment, the error threshold is applied asymmetrically in the sense that the price will never become more than 1% less favorable but could become arbitrarily more favorable for the offer which was already in the book. This is sensible as path payment has a max send parameter which determines the worst overall price that should be accepted.
-
-The technical details of implementing the above are subtle and difficult to discuss in the absence of working code. Please refer to the description and implementation of [exchangeV10](https://github.com/stellar/stellar-core/blob/c3c8fd2c95eae9daa8aab324d9ef3e07047aacc9/src/transactions/OfferExchange.cpp#L173).
+We propose a new rounding algorithm that resolves the above issues. The
+algorithm proceeds through three phases. First, the total value associated with
+each offer must be computed. Second, rounding is conducted in such a manner
+that the price never becomes less favorable for the offer with more total
+value. Third, the offer which initially had less total value is removed from
+the book regardless of whether it has been executed entirely.
+
+In order to compute the total value associated with an offer, we must choose
+what this value will be denominated in. This choice is arbitrary, and we
+elected to use the selling asset of the offer which was already in the book. We
+define the total value associated with an offer to be the value of the goods on
+offer in terms of the selling asset of the offer, rescaled by the denominator
+of the price of the offer which was already in the book.
+
+The manner in which the rounding is conducted is the central aspect of this
+proposal. The rounding algorithm guarantees that the price never becomes less
+favorable for the offer with more total value. Since the offer with less total
+value is always removed from the book, this guarantees that a single offer
+never suffers adverse rounding more than once. The rounding algorithm is also
+price aware and attempts to minimize errors by rounding in the less valuable
+asset. In order to prevent unbounded relative error during rounding, we
+introduce a new error threshold. If the relative rounding error exceeds the
+error threshold, nothing is exchanged.
+
+The offer which initially had less total value is removed from the book
+regardless of whether it has been executed entirely, so it is guaranteed that
+the book never remains crossed. When the book is no longer crossed, whichever
+offer ultimately remained in the book is adjusted such that it can be executed
+entirely. This guarantees that, regardless of rounding and error thresholding,
+it is always possible to execute against an offer in the book by submitting an
+offer with more total value. It is possible that an offer cannot be adjusted to
+any offer that can be executed entirely, in which case that offer is removed
+from the book.
+
+There is an additional complication with regard to the error threshold in path
+payment. For path payment, the error threshold is applied asymmetrically in the
+sense that the price will never become more than 1% less favorable but could
+become arbitrarily more favorable for the offer which was already in the book.
+This is sensible as path payment has a max send parameter which determines the
+worst overall price that should be accepted.
+
+The technical details of implementing the above are subtle and difficult to
+discuss in the absence of working code. Please refer to the description and
+implementation of
+[exchangeV10](https://github.com/stellar/stellar-core/blob/c3c8fd2c95eae9daa8aab324d9ef3e07047aacc9/src/transactions/OfferExchange.cpp#L173).
## Rationale
+
A variety of other proposals were considered, among them:
### Randomized Rounding
-This algorithm had three phases. First, it would determine which offer should remain in the book (for example using the same algorithm as described in Specification). Second, the error induced by rounding up and rounding down would be calculated. Third, the rounding would be performed stochastically such that the expected rounding error was 0. There were several disadvantages to this proposal:
-- Requires a source of randomness in the ledger that could not be exploited by validators
-- The limit price on any offer could be violated in both directions with unbounded relative error
+This algorithm had three phases. First, it would determine which offer should
+remain in the book (for example using the same algorithm as described in
+Specification). Second, the error induced by rounding up and rounding down
+would be calculated. Third, the rounding would be performed stochastically such
+that the expected rounding error was 0. There were several disadvantages to
+this proposal:
+
+- Requires a source of randomness in the ledger that could not be exploited by
+ validators
+- The limit price on any offer could be violated in both directions with
+ unbounded relative error
- Rounding errors average out over time but individuals could still be effected
### Lot Sizes
-This proposal focused on restructuring the offer book rather than improving the rounding algorithm. The idea was to split each market into many separate market segments distinguished by the lot size and the base asset (the asset used to express the lot size). The price of an offer would then be the quantity of the non-base asset that you are willing to exchange for lot size of the base asset. This forced the price to be an integer and guaranteed that crossing offers would always execute without any rounding. The advantages of this solution are that rounding never occurs and that each individual market segment is never crossed. There were several disadvantages to this proposal:
-- Trading is much more complicated since determining the best price requires looking at every market segment
-- Path payment is much more complicated since it needs to process not just every asset but every market segment of every asset
+This proposal focused on restructuring the offer book rather than improving the
+rounding algorithm. The idea was to split each market into many separate market
+segments distinguished by the lot size and the base asset (the asset used to
+express the lot size). The price of an offer would then be the quantity of the
+non-base asset that you are willing to exchange for lot size of the base asset.
+This forced the price to be an integer and guaranteed that crossing offers
+would always execute without any rounding. The advantages of this solution are
+that rounding never occurs and that each individual market segment is never
+crossed. There were several disadvantages to this proposal:
+
+- Trading is much more complicated since determining the best price requires
+ looking at every market segment
+- Path payment is much more complicated since it needs to process not just
+ every asset but every market segment of every asset
- Significant API changes required
## Backwards Compatibility
-This proposal will change the rounding behavior of offers. If any pre-signed transactions depended on the precise outcome of cross offer either through `ManageOffer` or `PathPayment`, it is possible that such a transaction will now fail.
-As of protocol version 10, every offer in the book must be immediately executable in full. For more information on this topic, refer to [CAP-0003](https://github.com/stellar/stellar-protocol/blob/335b11953904c9229327b95266201f405ae6c612/core/cap-0003.md). That proposal notes that offers which are not IEIF will be removed when upgrading to protocol version 10. As part of this process, offers will also be adjusted such that they can be executed entirely as described above. Analogous to what is mentioned in the Specification, offers which cannot be adjusted such that they can be executed entirely will be removed from the book during the upgrade.
+This proposal will change the rounding behavior of offers. If any pre-signed
+transactions depended on the precise outcome of cross offer either through
+`ManageOffer` or `PathPayment`, it is possible that such a transaction will now
+fail.
+
+As of protocol version 10, every offer in the book must be immediately
+executable in full. For more information on this topic, refer to
+[CAP-0003](https://github.com/stellar/stellar-protocol/blob/335b11953904c9229327b95266201f405ae6c612/core/cap-0003.md).
+That proposal notes that offers which are not IEIF will be removed when
+upgrading to protocol version 10. As part of this process, offers will also be
+adjusted such that they can be executed entirely as described above. Analogous
+to what is mentioned in the Specification, offers which cannot be adjusted such
+that they can be executed entirely will be removed from the book during the
+upgrade.
## Test Cases
+
Test cases for the new functionality were included in the implementation.
## Implementation
-Merged as of [stellar-core commit c3c8fd2c95eae9daa8aab324d9ef3e07047aacc9](https://github.com/stellar/stellar-core/commit/c3c8fd2c95eae9daa8aab324d9ef3e07047aacc9).
+
+Merged as of
+[stellar-core commit c3c8fd2c95eae9daa8aab324d9ef3e07047aacc9](https://github.com/stellar/stellar-core/commit/c3c8fd2c95eae9daa8aab324d9ef3e07047aacc9).
diff --git a/core/cap-0005.md b/core/cap-0005.md
index d91f92d04..ed2e1863b 100644
--- a/core/cap-0005.md
+++ b/core/cap-0005.md
@@ -2,7 +2,7 @@
```
CAP: 0005
-Title: Throttling and transaction pricing improvements
+Title: Throttling and transaction pricing improvements
Author: Nicolas Barry
Status: Final
Created: 2018-Oct-05
@@ -12,59 +12,64 @@ Protocol version: 11
```
## Simple Summary
+
Combined set of changes that rationalize how we throttle the network, and also
-makes it easier for clients to craft transactions that will make it into a ledger
-even when network fees are changing rapidly.
+makes it easier for clients to craft transactions that will make it into a
+ledger even when network fees are changing rapidly.
## Abstract
-
-Goal of the updated design is to maximize the number of operations included
-in a _candidate_ transaction set, while preserving fairness.
+
+Goal of the updated design is to maximize the number of operations included in
+a _candidate_ transaction set, while preserving fairness.
Note that there are two mechanism at play here: one that is implemented to
-construct _candidate_ values by validators which happens outside of consensus, and
-the other mechanism that is used to pick between two candidate sets during
+construct _candidate_ values by validators which happens outside of consensus,
+and the other mechanism that is used to pick between two candidate sets during
consensus.
Fairness in how transactions are included in a candidate set is equivalent to
-designing a sorting scheme for the list of transactions _before_
-trimming (typically all accumulated transactions on the validator) such that:
-* accounts have the same odds of having their transactions included, regardless
-of the distribution of transactions per account and regardless of how many
-transactions are picked from the sorted set.
-* transactions with a higher per operation fee have a strict advantage over lower
-fee transactions.
+designing a sorting scheme for the list of transactions _before_ trimming
+(typically all accumulated transactions on the validator) such that:
-Fairness is also achieved by attempting to _reduce_ the fee charged to accounts.
+- accounts have the same odds of having their transactions included, regardless
+ of the distribution of transactions per account and regardless of how many
+ transactions are picked from the sorted set.
+- transactions with a higher per operation fee have a strict advantage over
+ lower fee transactions.
+
+Fairness is also achieved by attempting to _reduce_ the fee charged to
+accounts.
This is achieved by associating a value `baseFee` to each transaction set
-(greater than `ledgerHeader.baseFee`), and using that base fee to compute the actual fee charged.
-With this change, `tx.fee` becomes an upper bound instead of being the actual
-fee deducted from the source account.
+(greater than `ledgerHeader.baseFee`), and using that base fee to compute the
+actual fee charged. With this change, `tx.fee` becomes an upper bound instead
+of being the actual fee deducted from the source account.
## Motivation
Right now the network configuration that governs the maximum throughput of the
network is `ledgerHeader.maxTxSetSize`, that controls the maximum number of
- _transactions_ that can be included in a transaction set; yet a good approximation
-of work (reflected in how fees are computed), is the total number of _operations_
-included in a transaction set.
-As any transaction can contain up to 100 operations, this causes the network
-setting to be overly conservative (as it has to assume the worst case situation
-where all transactions would be 100 operations).
-
-Another motivation for changing how fees are computed is that right now the
-fee charged for a given transaction is equal to the amount specified when the
+_transactions_ that can be included in a transaction set; yet a good
+approximation of work (reflected in how fees are computed), is the total number
+of _operations_ included in a transaction set. As any transaction can contain
+up to 100 operations, this causes the network setting to be overly conservative
+(as it has to assume the worst case situation where all transactions would be
+100 operations).
+
+Another motivation for changing how fees are computed is that right now the fee
+charged for a given transaction is equal to the amount specified when the
transaction was crafted.
When surge pricing occurs, or simply if `ledgerHeader.baseFee` is raised, this
creates problems:
-* for the typical user, this leads to transactions rejected by validators if the
-specified fee is too small.
-* for pre-signed transactions (smart contracts), the fee has to be specified with
-a value that exceeds the future fee required to be included in a transaction set.
-* in both cases the model can lead to excessive fees charged to users if the
-specified fee is bigger than needed.
+
+- for the typical user, this leads to transactions rejected by validators if
+ the specified fee is too small.
+- for pre-signed transactions (smart contracts), the fee has to be specified
+ with a value that exceeds the future fee required to be included in a
+ transaction set.
+- in both cases the model can lead to excessive fees charged to users if the
+ specified fee is bigger than needed.
## Specification
@@ -76,6 +81,7 @@ In the following specification, we'll use `StellarValue v` with its associated
No changes in XDR
### Construction of the pair (`txSetBaseFee`, `txSet`)
+
#### Repurposing `ledgerHeader.maxTxSetSize`
For ledgers with a protocolVersion supporting CAP0005, this setting controls
@@ -85,28 +91,29 @@ controls the number of _transactions_ in the transaction set).
#### Updated "surge pricing" algorithm
1. pick a random number S
-2. for each account, construct a queue of transactions for that account,
-sorted by sequence number in ascending order
- * implementation note: there might be an opportunity to consolidate with
-the transaction holding tank.
-3. construct a heap of those queues, sorted by:
- * the fee rate (descending) of the first transaction in each queue (so that the queue
-whose first transaction has the highest fee rate is on top of the heap)
- * first transaction hash Xored with S (ascending)
+2. for each account, construct a queue of transactions for that account, sorted
+ by sequence number in ascending order \* implementation note: there might be
+ an opportunity to consolidate with the transaction holding tank.
+3. construct a heap of those queues, sorted by: _ the fee rate (descending) of
+ the first transaction in each queue (so that the queue whose first
+ transaction has the highest fee rate is on top of the heap) _ first
+ transaction hash Xored with S (ascending)
4. `nb_operations_to_add = maxTxSetSize`
5. while (`nb_operations_to_add > 0 && !heap.empty()`)
- * queue = heap.pop()
- * if nb_operations(first transaction "tx" from queue) <= nb_operations_to_add
- * push tx into txSet
- * nb_operations_to_add = nb_operations_to_add - nb_operations(tx)
- * pop tx from queue
- * if queue non empty, push it into the heap
+ - queue = heap.pop()
+ - if nb_operations(first transaction "tx" from queue) <=
+ nb_operations_to_add
+ - push tx into txSet
+ - nb_operations_to_add = nb_operations_to_add - nb_operations(tx)
+ - pop tx from queue
+ - if queue non empty, push it into the heap
6. if `count_ops(txSet) > max(0, ledgerHeader.maxTxSetSize-100)`
- * surge pricing in effect
+ - surge pricing in effect
NB: if `ledgerHeader.ledgerVersion < CAP_0005.protocolVersion`,
-* step 4 becomes `nb_operations_to_add = maxTxSetSize*100`
-* step 5 defaults `nb_operations(tx)` to `100`
+
+- step 4 becomes `nb_operations_to_add = maxTxSetSize*100`
+- step 5 defaults `nb_operations(tx)` to `100`
### Validity and usage of the effective base fee
@@ -115,23 +122,25 @@ NB: if `ledgerHeader.ledgerVersion < CAP_0005.protocolVersion`,
The effective base fee for protocol supporting CAP_0005 is defined as follows:
1. if `nb_operations(txSet) <= max(0, ledgerHeader.maxTxSetSize-100)`
- * surge pricing not in effect
- * effectiveBaseFee = `ledgerHeader.baseFee`
+ - surge pricing not in effect
+ - effectiveBaseFee = `ledgerHeader.baseFee`
2. else, we use the smallest possible base fee for that transaction set
- * effectiveBaseFee = `min(baseFeeForTx(for(tx in txSet)))`
+ - effectiveBaseFee = `min(baseFeeForTx(for(tx in txSet)))`
Where `baseFeeForTx` is defined as `tx.fee / tx.ops.length()` rounded up.
-NB: Surge pricing is triggered as soon as we can't assume that a transaction was not included in the transaction set given the limit `ledgerHeader.maxTxSetSize`, which comes to `ledgerHeader.maxTxSetSize-100`.
+NB: Surge pricing is triggered as soon as we can't assume that a transaction
+was not included in the transaction set given the limit
+`ledgerHeader.maxTxSetSize`, which comes to `ledgerHeader.maxTxSetSize-100`.
#### Computation of the fee charged per transaction
The computation of `fee = computedFee(tx, effectiveBaseFee)` is done as:
1. if `ledgerHeader.ledgerVersion < CAP_0005.protocolVersion`
- * fee = `tx.fee`
+ - fee = `tx.fee`
2. else
- * fee = `min(tx.fee, effectiveBaseFee*tx.ops.length())`
+ - fee = `min(tx.fee, effectiveBaseFee*tx.ops.length())`
#### Validity of a `txSet`
@@ -139,7 +148,8 @@ From a fee point of view, the only requirement is that each source account has
a balance sufficient to cover the combined computed fees of that account's
transactions included in `txSet`
-if `ledgerHeader.ledgerVersion < CAP_0005.protocolVersion` we enforce that `txSet.length() <= maxTxSetSize`.
+if `ledgerHeader.ledgerVersion < CAP_0005.protocolVersion` we enforce that
+`txSet.length() <= maxTxSetSize`.
Otherwise we enforce that `nb_operations(txSet) <= maxTxSetSize`
@@ -149,14 +159,15 @@ The ballot protocol requires a function to combine candidate values produced by
the nomination protocol.
For older versions of the protocol, we do not change this function, ie we keep
-the transaction set with the highest number of transactions, and break ties
- by comparing the hash of transaction sets (XORed to pseudo-randomize).
+the transaction set with the highest number of transactions, and break ties by
+comparing the hash of transaction sets (XORed to pseudo-randomize).
For newer versions of the protocol, we'll be following a similar scheme by
picking the highest value sorted in order by:
-* highest number of operations
-* highest total fee (sum of fees charged)
-* highest txset hash (XORed by the hash of all value as before)
+
+- highest number of operations
+- highest total fee (sum of fees charged)
+- highest txset hash (XORed by the hash of all value as before)
## Rationale
@@ -186,8 +197,8 @@ as part of their cleanup that doesn't depend on having a predetermined balance.
### Surge pricing implementation
-The exact value generated by surge pricing doesn't need to match even when
-the network is still operating on an older version of the protocol.
+The exact value generated by surge pricing doesn't need to match even when the
+network is still operating on an older version of the protocol.
As such, there is no concern to try to preserve the existing behavior.
@@ -202,13 +213,14 @@ represent transactions but operations instead); A subsequent vote by validators
will be need to be coordinated in order to adjust it to a new desired value.
Practically speaking the disruption should be minimal:
-* Most transactions are single operation transactions and won't be effected.
-* Disruption will be mostly for transactions with more operations than
-`maxTxSetSize`. As of this writing, this network parameter is set to `50` on
-the public network, which means that transactions with more than `50` operations
-will not be accepted by the network between the two upgrades.
-* Validators can coordinate a "double upgrade", where both the protocol version
-and the `maxTxSetSize` field gets updated.
+
+- Most transactions are single operation transactions and won't be effected.
+- Disruption will be mostly for transactions with more operations than
+ `maxTxSetSize`. As of this writing, this network parameter is set to `50` on
+ the public network, which means that transactions with more than `50`
+ operations will not be accepted by the network between the two upgrades.
+- Validators can coordinate a "double upgrade", where both the protocol version
+ and the `maxTxSetSize` field gets updated.
## Test Cases
diff --git a/core/cap-0006.md b/core/cap-0006.md
index ae0097ee1..05b9e3cb8 100644
--- a/core/cap-0006.md
+++ b/core/cap-0006.md
@@ -11,23 +11,26 @@ Protocol version: 11
```
## Simple Summary
+
We introduce the `ManageBuyOffer` operation with functionality similar to the
`ManageOffer` operation except that the amount is specified in terms of the
`buying` asset instead of the `selling` asset.
## Abstract
+
The `ManageOffer` operation specifies the maximum amount of the `selling` asset
that should be sold by the offer. It is not, however, possible to express the
maximum amount of the `buying` asset that should be bought by the offer. These
-constraints are not equivalent because at the time of offer submission it is not
-known at what price the offer will execute. We propose to add a new operation
-called `ManageBuyOffer` which specifies the maximum amount of the `buying` asset
-that should be bought by the offer. The price will be the "price of thing being
-bought in terms of what you are selling" rather than the "price of thing being
-sold in terms of what you are buying". The behavior is otherwise analogous to
-the extant `ManageOffer` operation.
+constraints are not equivalent because at the time of offer submission it is
+not known at what price the offer will execute. We propose to add a new
+operation called `ManageBuyOffer` which specifies the maximum amount of the
+`buying` asset that should be bought by the offer. The price will be the "price
+of thing being bought in terms of what you are selling" rather than the "price
+of thing being sold in terms of what you are buying". The behavior is otherwise
+analogous to the extant `ManageOffer` operation.
## Motivation
+
Many financial institutions have an obligation to faithfully execute customer
orders. Customer orders to sell a certain quantity of an asset in exchange for
the maximum quantity of a different asset are already easily expressed in terms
@@ -38,7 +41,9 @@ are not expressible in terms of the `ManageOffer` operation. We introduce the
order.
## Specification
+
`ManageBuyOfferOp` specification:
+
```c++
struct ManageBuyOfferOp
{
@@ -93,6 +98,7 @@ Name changes are binary compatible, so for better naming consistency:
- `ManageOfferResult` will be renamed to `ManageSellOfferResult`
Additionally, we will update naming for `ManageOfferResultCode` to be
+
```c++
enum ManageSellOfferResultCode
{
@@ -119,6 +125,7 @@ enum ManageSellOfferResultCode
```
Updated `Operation` specification:
+
```c++
enum OperationType
{
@@ -178,79 +185,86 @@ struct Operation
```
## Rationale
-Adding `ManageBuyOffer` will not require modifying what data is contained in the
-ledger. This can be understood by considering offer execution as two distinct
-processes. The first process begins when an offer is submitted. If this offer
-matches against an existing offer, then those offers must execute at the price
-of the existing offer. This repeats until either the submitted offer has
-executed entirely or the submitted offer does not match against any existing
-offer. During this first process, limits on the buying amount are not equivalent
-to limits on the selling amount since the execution price is variable.
+
+Adding `ManageBuyOffer` will not require modifying what data is contained in
+the ledger. This can be understood by considering offer execution as two
+distinct processes. The first process begins when an offer is submitted. If
+this offer matches against an existing offer, then those offers must execute at
+the price of the existing offer. This repeats until either the submitted offer
+has executed entirely or the submitted offer does not match against any
+existing offer. During this first process, limits on the buying amount are not
+equivalent to limits on the selling amount since the execution price is
+variable.
The second process begins with adding to the offer book the remainder of the
-offer submitted at the start of the first process, if that offer was not already
-executed entirely. Any subsequent execution of this offer will occur at the
-price of this offer, unless the offer is modified (in which case the first
+offer submitted at the start of the first process, if that offer was not
+already executed entirely. Any subsequent execution of this offer will occur at
+the price of this offer, unless the offer is modified (in which case the first
process begins anew). Therefore, a limit on the buying amount is equivalent to
a limit on the selling amount during the second process.
At this point, it is clear what the semantics of the `ManageBuyOffer` operation
-should be. During the first process, which is a subset of the apply-phase of the
-operation, the total amount that can be executed is limited by the `buyAmount`
-specified in the `ManageBuyOfferOp`. At the start of the second process, the
-remaining `buyAmount` is converted into a sell amount and stored in the ledger,
-analogous to what would be done with the remaining `amount` at the end of
-`ManageOffer`. The `price` must also be inverted before it is stored in the
-ledger.
+should be. During the first process, which is a subset of the apply-phase of
+the operation, the total amount that can be executed is limited by the
+`buyAmount` specified in the `ManageBuyOfferOp`. At the start of the second
+process, the remaining `buyAmount` is converted into a sell amount and stored
+in the ledger, analogous to what would be done with the remaining `amount` at
+the end of `ManageOffer`. The `price` must also be inverted before it is stored
+in the ledger.
There is, however, one important caveat to all of the above. At the end of the
day, offers are stored on the ledger as sell offers which means that the amount
-is a sell amount. When rounding occurs in favor of an offer, it may receive more
-of the asset that is not limited than would be otherwise expected. During the
-first process, this does not cause an issue for either `ManageBuyOffer` or
-`ManageSellOffer` as each has a limit in terms of the appropriate asset. But, as
-noted, in the ledger all limits are on the selling asset so it is possible for
-`ManageBuyOffer` to buy more than expected during the second process.
+is a sell amount. When rounding occurs in favor of an offer, it may receive
+more of the asset that is not limited than would be otherwise expected. During
+the first process, this does not cause an issue for either `ManageBuyOffer` or
+`ManageSellOffer` as each has a limit in terms of the appropriate asset. But,
+as noted, in the ledger all limits are on the selling asset so it is possible
+for `ManageBuyOffer` to buy more than expected during the second process.
### Why is the price inverted?
+
As noted in the abstract, the price will be the "price of thing being bought in
terms of what you are selling" rather than the "price of thing being sold in
terms of what you are buying". There are three main reasons for this interface:
1. When making a market, `ManageSellOfferOp` and `ManageBuyOfferOp` will have
-prices that appear in the same units:
- - The price in `ManageSellOfferOp{selling=X, buying=Y, amount=A, price=P}`
-is the "price of thing being sold in terms of what you are buying" so it is the
-"price of X in terms of Y"
- - The price in `ManageBuyOfferOp{selling=Y, buying=X, buyAmount=A, price=P}`
-is the "price of thing being bought in terms of what you are selling" so it is
-the "price of X in terms of Y"
+ prices that appear in the same units: - The price in
+ `ManageSellOfferOp{selling=X, buying=Y, amount=A, price=P}` is the "price of
+ thing being sold in terms of what you are buying" so it is the "price of X
+ in terms of Y" - The price in
+ `ManageBuyOfferOp{selling=Y, buying=X, buyAmount=A, price=P}` is the "price
+ of thing being bought in terms of what you are selling" so it is the "price
+ of X in terms of Y"
2. As an extension of (1), if `{selling=X, buying=Y, amount=A, price=P}`
-represents the best offer in a given market, then it can be exactly crossed by
-submitting `ManageBuyOfferOp{selling=Y, buying=X, buyAmount=A, price=P}`
-3. Converting the sell amount in `ManageSellOfferOp` to an equivalent buy amount
-is accomplished by computing `amount * price`; converting the buy amount in
-`ManageBuyOfferOp` to an equivalent sell amount is accomplished by computing
-`buyAmount * price`
+ represents the best offer in a given market, then it can be exactly crossed
+ by submitting `ManageBuyOfferOp{selling=Y, buying=X, buyAmount=A, price=P}`
+3. Converting the sell amount in `ManageSellOfferOp` to an equivalent buy
+ amount is accomplished by computing `amount * price`; converting the buy
+ amount in `ManageBuyOfferOp` to an equivalent sell amount is accomplished by
+ computing `buyAmount * price`
### Validation result codes
+
In implementing `ManageBuyOffer`, it was observed that `ManageOffer` does not
-respect the convention that failure to validate should only return an error code
-labeled `MALFORMED`. `ManageBuyOffer` should respect this convention, and for
-consistency `ManageSellOffer` should also respect this convention starting in
-the protocol version which implements this proposal. Specifically validation of
-`ManageSellOffer` will return `MANAGE_SELL_OFFER_MALFORMED` instead of
+respect the convention that failure to validate should only return an error
+code labeled `MALFORMED`. `ManageBuyOffer` should respect this convention, and
+for consistency `ManageSellOffer` should also respect this convention starting
+in the protocol version which implements this proposal. Specifically validation
+of `ManageSellOffer` will return `MANAGE_SELL_OFFER_MALFORMED` instead of
`MANAGE_SELL_OFFER_NOT_FOUND`.
## Backwards Compatibility
+
This proposal is fully backward compatible.
## Test Cases
+
Some test cases that must be considered include:
-* If `ManageBuyOffer` and `ManageSellOfferOp` have the same max send and max
-receive after crossing offers, then the same offer is added to the ledger
-* `ManageBuyOffer` properly accounts for [liabilities](cap-0003.md)
+- If `ManageBuyOffer` and `ManageSellOfferOp` have the same max send and max
+ receive after crossing offers, then the same offer is added to the ledger
+- `ManageBuyOffer` properly accounts for [liabilities](cap-0003.md)
## Implementation
+
No implementation yet.
diff --git a/core/cap-0007.md b/core/cap-0007.md
index 3d5d1cd20..ce4a19d95 100644
--- a/core/cap-0007.md
+++ b/core/cap-0007.md
@@ -11,31 +11,33 @@ Protocol version: TBD
```
## Simple Summary
+
We introduce CreateDeterministicAccount operation which creates an account that
could only be created by the transaction creating it, allowing it to have a
known starting sequence number and initialization.
## Abstract
+Deterministic accounts are a critical infrastructure piece for building
+reliable contracts for three key reasons:
-Deterministic accounts are a critical infrastructure piece for building reliable contracts for three key reasons:
-
-1) Predictable sequence numbers
-2) Never-fail creation
-3) Known initialized state
+1. Predictable sequence numbers
+2. Never-fail creation
+3. Known initialized state
-We create deterministic accounts with a few changes to account structures to support non-key account IDs.
+We create deterministic accounts with a few changes to account structures to
+support non-key account IDs.
## Motivation
-Currently, when a contract must create an account as a part of it's settlement (such as in Starlight or in an Account Transfer),
-creating the account results in an unpredictable sequence number which prevents the presigning of transactions for it. Furthermore,
-accounts can not be created and modified at the same time preventing initializing an account on behalf of another party.
-
+Currently, when a contract must create an account as a part of it's settlement
+(such as in Starlight or in an Account Transfer), creating the account results
+in an unpredictable sequence number which prevents the presigning of
+transactions for it. Furthermore, accounts can not be created and modified at
+the same time preventing initializing an account on behalf of another party.
## Specification
-
We extend the AccountID type:
```c++
@@ -64,9 +66,8 @@ case ACCOUNT_ID_TYPE_REF_NEW_DET_ACCOUNT:
```
-
-
We add the following operation:
+
```c++
struct CreateDeterministicAccountOp
{
@@ -75,6 +76,7 @@ struct CreateDeterministicAccountOp
```
The result of this operation is an account created with the following state:
+
```c++
AccountEntry
{
@@ -99,10 +101,9 @@ Therefore subsequent operations with source account set to
This operation works with Account Merge, but subsequently
`ACCOUNT_ID_TYPE_NEWEST_DET_ACCOUNT` refers to no account.
-
Alternatively, we may store a vector of pointers to the accounts being created
(in the implementation) and refer to them by creation order. In this case, we
-use `ACCOUNT_ID_TYPE_REF_NEW_DET_ACCOUNT`. `which=0` points to the first
+use `ACCOUNT_ID_TYPE_REF_NEW_DET_ACCOUNT`. `which=0` points to the first
account created, `1` the second, etc. `which= = -1` refers to the most recently
created account, `-2` the next most recently created, etc. Only one of
`ACCOUNT_ID_TYPE_REF_NEW_DET_ACCOUNT` or `ACCOUNT_ID_TYPE_NEWEST_DET_ACCOUNT`
@@ -111,44 +112,43 @@ should make it into the final draft of the spec.
The account has no master signer, therefore setting masterWeight has no effect
(but can be used as an extra data field if desired).
-Deterministic accounts may always be merged even if the sequence number is above the
-current ledger, as there is no way to recreate them after merge.
-
-
+Deterministic accounts may always be merged even if the sequence number is
+above the current ledger, as there is no way to recreate them after merge.
## Rationale
-CreateDeterministicAccount is a minimal deterministic account derivation scheme.
+CreateDeterministicAccount is a minimal deterministic account derivation
+scheme.
The newly created account is deterministically derived from
- The transaction creating it, which transitively commits to:
- - The Source Account
- - The Source Account Sequence
-- The index of the operation in the transaction (making a unique key if repeated CreateDeterministicAccount operations)
-
+ - The Source Account
+ - The Source Account Sequence
+- The index of the operation in the transaction (making a unique key if
+ repeated CreateDeterministicAccount operations)
Alternative schemes to this deterministic account proposal derive only on the
source account, sequence number, and operation index rather than the complete
transaction hash. While strictly speaking, this should work for many of the use
cases where deterministic accounts are desired, and does not encumber the
complexity of not knowing the account ID inside the transaction, there are two
-main drawbacks that leade to this approach being superior. The first drawback is
-that there are multiple possible transactions that *could* create the account,
-which makes it possible for a malicious actor to trick another agent in certain
-cases. For example, if a 2 party protocol is proposed by the malicious actor,
-signatures are acquired, but then the malicious actor aborts, and then the
-protocol is restarted with different values, the other party may not recall that
-they have already approved a transaction in the first protocol. The second
-drawback is that because of the multiplicity of generation of this variant of
-deterministic account, the existence of the account does not serve as a
-testament to the success of the operations in the transaction which created the
-account. Such testaments are useful for complex contracts as they can rely on
-state implied by the existence of an account (such as the current signer of
-another account modified in the generating transaction being the same signer as
-a third account). The abscence of these two drawbacks simultaneously makes this
-version of deterministic accounts less useful for attackers and more useful for
-contracting.
+main drawbacks that leade to this approach being superior. The first drawback
+is that there are multiple possible transactions that _could_ create the
+account, which makes it possible for a malicious actor to trick another agent
+in certain cases. For example, if a 2 party protocol is proposed by the
+malicious actor, signatures are acquired, but then the malicious actor aborts,
+and then the protocol is restarted with different values, the other party may
+not recall that they have already approved a transaction in the first protocol.
+The second drawback is that because of the multiplicity of generation of this
+variant of deterministic account, the existence of the account does not serve
+as a testament to the success of the operations in the transaction which
+created the account. Such testaments are useful for complex contracts as they
+can rely on state implied by the existence of an account (such as the current
+signer of another account modified in the generating transaction being the same
+signer as a third account). The abscence of these two drawbacks simultaneously
+makes this version of deterministic accounts less useful for attackers and more
+useful for contracting.
An example of using CreateDeterministicAccount is as follows:
@@ -178,54 +178,50 @@ Tx0:
The in-transaction setup guarantees that if the account is created it is in the
desired state.
-
Protocols around using CreateDeterministicAccount will be the subject of a
subsequent SEP.
-
-Because of the hash cycle (account ID is transaction hash, transaction therefore
-can't include it), pre-auth transactions will not work immediately for
-Deterministic Accounts as described. A subsequent CAP will propose allowing a
-preauth transaction to specify _self_ as the source account for a pre-auth, which
-relaxes this constraint.
+Because of the hash cycle (account ID is transaction hash, transaction
+therefore can't include it), pre-auth transactions will not work immediately
+for Deterministic Accounts as described. A subsequent CAP will propose allowing
+a preauth transaction to specify _self_ as the source account for a pre-auth,
+which relaxes this constraint.
The newest account account ID type is of limited use. There are certain cases
that it does not work for (such as making two deterministic accounts which
approve trust lines for each other). The alternative form adds more complexity
and bookkeeping cost, but provides a simpler API for implementers and supports
-all possible initializations. If the added complexity is not too much, it should
-be preferred. The ability to refer by negative indexes is useful for defining
-templates which should nest inside of other transactions without need to
-recompute indexes.
+all possible initializations. If the added complexity is not too much, it
+should be preferred. The ability to refer by negative indexes is useful for
+defining templates which should nest inside of other transactions without need
+to recompute indexes.
The decision to not include the master signer is a trade off. On the one hand,
it means deterministic accounts cannot include as many public key signers as
non-deterministic accounts. On the other hand 20 signers is likely enough for
most use cases, and the lack of the master key makes account handling code
simpler. MuSig based signature constructions also support constructing larger
-multisig circuits as well, so it is unclear how much utility is gained by having
-one more signer. Another field for a signer could be added to deterministic
-accounts, but this would bloat the account struct further and would break
-compatibility with older software. Potentially, the 20 signer limit could be
-changed to 21 and run time checked (somehow) to only be allowed for
+multisig circuits as well, so it is unclear how much utility is gained by
+having one more signer. Another field for a signer could be added to
+deterministic accounts, but this would bloat the account struct further and
+would break compatibility with older software. Potentially, the 20 signer limit
+could be changed to 21 and run time checked (somehow) to only be allowed for
deterministic accounts. This would allow SDK authors to provide identical
interfaces. If this is needed, it should be the subject of a separate CAP
finalized in conjunction with this CAP. The other ergonomics issue is that it
might be surprising to a implementer that there is no default signer and might
-result in them creating accounts in a frozen state. This is desirable however --
-for many types of smart contract, there is no such concept as a master signer
- (just a few determinisitc steps) and for issuing fixed supply or fixed
- schedule tokens you would only want to use pre signed transactions and a
- master signer would be superflous.
-
-
+result in them creating accounts in a frozen state. This is desirable however
+-- for many types of smart contract, there is no such concept as a master
+signer (just a few determinisitc steps) and for issuing fixed supply or fixed
+schedule tokens you would only want to use pre signed transactions and a master
+signer would be superflous.
## Backwards Compatibility
-This proposal requires updating existing APIs to accept the new key types.
+This proposal requires updating existing APIs to accept the new key types.
Future work should clean up the XDR AccountID typedef to be an enum.
-
## Implementation
+
No implementation yet.
diff --git a/core/cap-0008.md b/core/cap-0008.md
index a39a2db8d..399f395b5 100644
--- a/core/cap-0008.md
+++ b/core/cap-0008.md
@@ -12,7 +12,8 @@ Protocol version: TBD
## Simple Summary
-We introduce a new variant of Preauthorized Transaction which refers to the account it resides on automatically.
+We introduce a new variant of Preauthorized Transaction which refers to the
+account it resides on automatically.
## Abstract
@@ -23,8 +24,6 @@ yet known.
Self Identified Pre-Auth Transactions address this issue by allowing the source
ID to be a special value.
-
-
## Motivation
Adding Pre-Auth transactions to Deterministic accounts enables the creation of
@@ -35,10 +34,8 @@ increasing the throughput of channels created per second, and decreasing the
code complexity of such applications as such automata can be guaranteed to
succeed and not partially fail.
-
## Specification
-
We assume CAP-0007 is accepted.
We extend the AccountID type:
@@ -106,7 +103,6 @@ case SIGNER_KEY_TYPE_HASH_X:
```
-
`ACCOUNT_ID_TYPE_SELF` may be used generally in the body of any transaction --
not just pre-auth -- to refer to the source of a transaction (not the
operation).
@@ -116,24 +112,23 @@ signatures and there are `SIGNER_KEY_TYPE_PRE_AUTH_SELF_TX` entries, should set
the source account to `ACCOUNT_ID_TYPE_SELF` and hash it and see if there are
any matches, and treat them like a pre-auth transaction is treated otherwise.
-
## Rationale
This enables the adding of pre-auth transactions to deterministic accounts at
the time of creation.
-It also enables a general purpose shorthand for referring to the source account,
-which makes implementing certain contracts simpler (as there is no need to
-traverse the transaction and fill in all templated locations for self). This
-makes invoice creation simpler.
+It also enables a general purpose shorthand for referring to the source
+account, which makes implementing certain contracts simpler (as there is no
+need to traverse the transaction and fill in all templated locations for self).
+This makes invoice creation simpler.
An alternative approach could template more than just the source account, such
-as sequences and other fields. Such templates should be considered independently
-of this change even at the cost of potential redundancy because this feature is
-limited in scope and complexity and doesn't introduce transaction malleability
-issues. In a sense, it is not even a templating feature as it can't be used for
-anything except a self-reference -- it is purely a feature to address a hash
-cycle issue.
+as sequences and other fields. Such templates should be considered
+independently of this change even at the cost of potential redundancy because
+this feature is limited in scope and complexity and doesn't introduce
+transaction malleability issues. In a sense, it is not even a templating
+feature as it can't be used for anything except a self-reference -- it is
+purely a feature to address a hash cycle issue.
Before this proposal is finalized, it should be decided if
`SIGNER_KEY_TYPE_PRE_AUTH_SELF_TX` should be allowed to be used on non
@@ -142,14 +137,17 @@ regular accounts, but perhaps it simplifies downstream software if they do not
need to differentiate between normal and deterministic accounts.
It's also possible to not introduce a new type of pre-auth for this change, and
-just to overload the existing pre-auth to match. However, this makes preauth checking
-more expensive in many cases (because the hash needs to be recomputed with the self key).
-Furthermore, it is more explicit to use a new signer type, which makes it easier
-for spendability solvers to analyze pre-auths of both variants for pattern matching.
+just to overload the existing pre-auth to match. However, this makes preauth
+checking more expensive in many cases (because the hash needs to be recomputed
+with the self key). Furthermore, it is more explicit to use a new signer type,
+which makes it easier for spendability solvers to analyze pre-auths of both
+variants for pattern matching.
## Backwards Compatibility
+
This proposal requires updating existing APIs to accept the new key types and
updating siganture validation to handle the new pre-auth type.
## Implementation
+
No implementation yet.
diff --git a/core/cap-0009.md b/core/cap-0009.md
index f0b41b3e1..98ccf25c8 100644
--- a/core/cap-0009.md
+++ b/core/cap-0009.md
@@ -14,25 +14,25 @@ Protocol version: TBD
When we are aspiring to merge an account and remove trustlines, we need to get
our account in a predictable state. This CAP introduces Linear Accounts (or
-Exterior Immutable Accounts) which restricts an account to only allow transactions where
-the account is the source to modify the account.
+Exterior Immutable Accounts) which restricts an account to only allow
+transactions where the account is the source to modify the account.
## Abstract
Sometimes we want to be sure of exactly what the state of an account is when we
are generating certain operations: we want to know exactly how much balance
should be available, we want to know that no sequence bumps can occur
-externally, etc. Currently it's difficult to ensure this.
+externally, etc. Currently it's difficult to ensure this.
Linear Accounts are a sort of "critical section" mode for an account which
prevents outside operations from taking place.
## Motivation
-Currently, when you want to merge an account you send all of the available lumen
-balance to the merge recipient -- but what if a new payment just came in!
-They'll get more money than expected! This is ok sometimes, but usually is a big
-problem!
+Currently, when you want to merge an account you send all of the available
+lumen balance to the merge recipient -- but what if a new payment just came in!
+They'll get more money than expected! This is ok sometimes, but usually is a
+big problem!
This problem is even worse for trustlines. Trustlines have a limit. In order to
remove a trustline it must have no assets in it, otherwise it will fail. If a
@@ -44,7 +44,6 @@ to your account.
This prevents merging because all trustlines must be removed to merge.
-
## Specification
For Linear accounts, we introduce three new flags, `AUTH_EXTERIOR_IMMUTABLE`,
@@ -52,38 +51,37 @@ For Linear accounts, we introduce three new flags, `AUTH_EXTERIOR_IMMUTABLE`,
If `AUTH_EXTERIOR_IMMUTABLE` is set, then transactions which attempt to modify
the state of the account in any way are either invalid or fail (determined by
-`AUTH_EXTERIOR_IMMUTABLE_ACTION` -- set : fail, unset : invalid), unless the modification occurs as a result of
-an offer executing or a transaction with the account as the source.
-
-If `AUTH_PARTIAL_LINEAR` is set, then operations which modify the account simply
-encumber an additional signature obligation on behalf of the account being
-modified. This signature is unique in that if a EdDSA signature, it signs `SHA256(AUTH_PARTIAL_LINEAR || TX Hash || Account
-Modified)`. Such signatures replace signatures from the same key already on the
-transaction if present, and must come after all regular signatures in the
-DecoratedSignature list.
+`AUTH_EXTERIOR_IMMUTABLE_ACTION` -- set : fail, unset : invalid), unless the
+modification occurs as a result of an offer executing or a transaction with the
+account as the source.
-If `AUTH_IMMUTABLE` is set, then the flags may not be changed (continues to work
-as before). `AUTH_EXTERIOR_IMMUTABLE`, `AUTH_EXTERIOR_IMMUTABLE_ACTION`, and
-`AUTH_PARTIAL_LINEAR` may be set when `AUTH_IMMUTABLE` is set.
+If `AUTH_PARTIAL_LINEAR` is set, then operations which modify the account
+simply encumber an additional signature obligation on behalf of the account
+being modified. This signature is unique in that if a EdDSA signature, it signs
+`SHA256(AUTH_PARTIAL_LINEAR || TX Hash || Account Modified)`. Such signatures
+replace signatures from the same key already on the transaction if present, and
+must come after all regular signatures in the DecoratedSignature list.
+If `AUTH_IMMUTABLE` is set, then the flags may not be changed (continues to
+work as before). `AUTH_EXTERIOR_IMMUTABLE`, `AUTH_EXTERIOR_IMMUTABLE_ACTION`,
+and `AUTH_PARTIAL_LINEAR` may be set when `AUTH_IMMUTABLE` is set.
## Rationale
This design accomplishes several orthogonal goals:
1. Temporary suspensions of mutability (achieved via
- `AUTH_EXTERIOR_IMMUTABLE_ACTION=invalid`) which is assumed to
- quickly resume. This allowss for whatever critical section to be handled
- without needing to re-sign transactions
+ `AUTH_EXTERIOR_IMMUTABLE_ACTION=invalid`) which is assumed to quickly
+ resume. This allowss for whatever critical section to be handled without
+ needing to re-sign transactions
1. Permanent suspensions of mutability (achieved via
- `AUTH_EXTERIOR_IMMUTABLE_ACTION=fail`) which permanently
- prevents the transaction from succeeding.
+ `AUTH_EXTERIOR_IMMUTABLE_ACTION=fail`) which permanently prevents the
+ transaction from succeeding.
1. Receiver-approved transactions via `AUTH_PARTIAL_LINEAR`/not invalidating
things which have already been signed via a pre-signed transaction.
-1. Provides a mechanism (via `AUTH_IMMUTABLE`) to prove that this will *never*
+1. Provides a mechanism (via `AUTH_IMMUTABLE`) to prove that this will _never_
be enabled.
-
The EdDSA signatures are special cased to include the account they are signing
on to prevent the 'confused deputy' from accidentally authorizing a transaction
linearization where it was not desired. It is not particularly expensive given
@@ -91,19 +89,17 @@ the mandate that 'confused deputy safe' signatures come after regular
signatures. If a future CAP eliminates confused deputy issue, then these
signatures shall still be required to explicitly approve the partial linearity.
-
## Backwards Compatibility
-At first glance, it may seem like this introduces many new ways for transactions
-to fail or be invalid. But it does not, because simply merging an account has
-the same types of effects -- if the source account of an operation doesn't
-exist, then the transaction is invalid, and if the destination of an operation
-doesn't exist, then the transaction fails.
+At first glance, it may seem like this introduces many new ways for
+transactions to fail or be invalid. But it does not, because simply merging an
+account has the same types of effects -- if the source account of an operation
+doesn't exist, then the transaction is invalid, and if the destination of an
+operation doesn't exist, then the transaction fails.
Therefore this doesn't materially impact the status quo, other than making it
possible/easier for developers to control accounts.
-
## Implementation
None yet.
diff --git a/core/cap-0010.md b/core/cap-0010.md
index b809a2c7e..d1c5364ca 100644
--- a/core/cap-0010.md
+++ b/core/cap-0010.md
@@ -13,30 +13,30 @@ Protocol version: TBD
## Simple Summary
Transactions get stuck because of insufficient fee sometimes, especially if the
-protocol changes!
+protocol changes!
We partially address this with fee-bumping account.
## Abstract
-Transactions get stuck either because of insufficient fees being paid or because
-min fees increase.
+Transactions get stuck either because of insufficient fees being paid or
+because min fees increase.
CAP-10 specifies a 64 bit fee-only account (denominated in basefees) which can
be used to bump subsequent transactions from the account.
+
## Motivation
In protocols which transactions are presigned or preauth for long durations,
it's a major headache if they can't get in because of fee insuficiency. CAP-05
-helps with this issue a bit, but doesn't fully address the issue for when things
-are truly stuck because they are now invalid.
-
+helps with this issue a bit, but doesn't fully address the issue for when
+things are truly stuck because they are now invalid.
## Specification
-
We extend the Account with a `fee_balance` account. `fee_balance` is a receive
only account.
+
```c++
struct AccountEntry
{
@@ -78,11 +78,11 @@ Transactions should not be rejected for insufficient fee before querying to see
if a `fee_balance` exists and might cover the transaction.
When a transaction executes for a given source account, it bids a fee of `fee`.
-In this case, the fee is first charged to the account and then any excess to the
+In this case, the fee is first charged to the account and then any excess to
+the `fee_balance`. If the `fee` is insufficient for a given ledger, it then
+bids a fee of `fee + fee_balance`. In this case, an amount `fee` paid is first
+charged against the account, and then any excess is charged against the
`fee_balance`.
-If the `fee` is insufficient for a given ledger, it then bids a fee of `fee +
-fee_balance`. In this case, an amount `fee` paid is first charged against the
-account, and then any excess is charged against the `fee_balance`.
On merge, the fees are forwarded to the `fee_balance` of the merge target.
@@ -173,38 +173,35 @@ The semantics of which are as follows:
If the account does not exist, nothing happens.
-If the account sequence number is greater than `unless_passed`, nothing happens.
+If the account sequence number is greater than `unless_passed`, nothing
+happens.
Otherwise, `min(op.amount, op.source.balance - op.source.reserved)` is deducted
from the source account and added to the destination's `fee_balance`.
-
-
## Rationale
-It's not too expensive to add new entries to accounts, so we add a full 64 bits.
-Alternatively, we could store a separate table for accounts with a
+It's not too expensive to add new entries to accounts, so we add a full 64
+bits. Alternatively, we could store a separate table for accounts with a
`fee_balance` as a subentry, but this is an implementer's choice.
-
-We don't allow to recover the funds from the `fee_balance`, but we do allow them
-to transfer during merge. This is a sort of "fee monad", which prevents
+We don't allow to recover the funds from the `fee_balance`, but we do allow
+them to transfer during merge. This is a sort of "fee monad", which prevents
malapropriation of fee subsidies paid by third parties.
We asess fees against the account's balance first because the `fee_balance`
should only be touched in cases where the balance was insufficient to cover the
-expressed transaction fee, or the expressed transaction fee was insufficient for
-inclusion.
+expressed transaction fee, or the expressed transaction fee was insufficient
+for inclusion.
## Backwards Compatibility
+Transactions which don't pay base fee rate must now query to see if a
+fee_balance exists rather than being outright invalid.
-Transactions which don't pay base fee rate must now query to see if a fee_balance
-exists rather than being outright invalid.
-
-Previously, a user might expect that transactions signed with 0 fee would not be
-valid. Under this proposal, they may be. This doesn't really change the status
-quo because users are not given a guarantee about transaction fees never
+Previously, a user might expect that transactions signed with 0 fee would not
+be valid. Under this proposal, they may be. This doesn't really change the
+status quo because users are not given a guarantee about transaction fees never
decreasing.
## Implementation
diff --git a/core/cap-0011.md b/core/cap-0011.md
index df4babc2e..626fc197b 100644
--- a/core/cap-0011.md
+++ b/core/cap-0011.md
@@ -12,34 +12,36 @@ Protocol version: TBD
## Simple Summary
-This CAP introduces a locking signer which allows for relative time locks of accounts.
+This CAP introduces a locking signer which allows for relative time locks of
+accounts.
## Abstract
-In stellar, transactions are allowed to condition their validity based on time. However,
-because time is defined to be absolute, and the time at which events are confirmed into the
-ledger are non-deterministic, this makes it difficult to reason about the scheduling of events
-in complex contracts. Furthermore, transaction validity ranges don't exclude the possibility
-of another transaction with a different time range being availabke,
+In stellar, transactions are allowed to condition their validity based on time.
+However, because time is defined to be absolute, and the time at which events
+are confirmed into the ledger are non-deterministic, this makes it difficult to
+reason about the scheduling of events in complex contracts. Furthermore,
+transaction validity ranges don't exclude the possibility of another
+transaction with a different time range being availabke,
-
-To address this partially, we introduce the notion of a time lock at the account
-level which may be absolute or relative. This construct provides flexibility for
-contract designers to sequence time based events easily and circumvent the need
-for periodic updates in time based protocols.
+To address this partially, we introduce the notion of a time lock at the
+account level which may be absolute or relative. This construct provides
+flexibility for contract designers to sequence time based events easily and
+circumvent the need for periodic updates in time based protocols.
## Motivation
Currently, transactions may specify a valid after and valid before date range.
-However, this range is absolute so it does not permit protocols that specify for
-some action to happen *after* another action without knowledge of the exact
-timing. This makes writing channel protocols difficult as they typically require
-periodic updates. See
+However, this range is absolute so it does not permit protocols that specify
+for some action to happen _after_ another action without knowledge of the exact
+timing. This makes writing channel protocols difficult as they typically
+require periodic updates. See
[here](https://www.stellar.org/blog/lightning-on-stellar-roadmap/).
A relative timeout signer
## Specification
+
```c++
enum SignerKeyType
{
@@ -85,15 +87,15 @@ case SIGNER_KEY_TYPE_TIMEOUT:
When a timeout key is added to an account, the accounts seuence number may not
be increased until the current ledger sequence and most recent close time are
both greater than or equal to the times specified in the
-`SIGNER_KEY_TYPE_TIMEOUT`. The `actual_` fields may be set to an arbitrary value
-and are filled in upon receipt of the relative locktime with the actual computed
-time. The `original_` fields are set to the `actual_` fields value before they are modified. Transactions where the account is not the source may still succeed while
-the timeout is present unless otherwise specified.
-
+`SIGNER_KEY_TYPE_TIMEOUT`. The `actual_` fields may be set to an arbitrary
+value and are filled in upon receipt of the relative locktime with the actual
+computed time. The `original_` fields are set to the `actual_` fields value
+before they are modified. Transactions where the account is not the source may
+still succeed while the timeout is present unless otherwise specified.
If the relative seq and time flags are set then when the entry is added the
timeout is computed relative to the execution time of the transaction adding
-it. For example, were it to be 100 ledgers relatively and it was added at
+it. For example, were it to be 100 ledgers relatively and it was added at
ledger 64534, then the next sequence adjusting transaction could happen at
ledger 64634.
@@ -101,37 +103,33 @@ If the REMOVABLE flag is set, then the timeout may be removed by a transaction
which has a different source account. If it is not set, the timeout may not be
removed until it has matured.
-If `NO_AUTOREMOVE` is set, then the key does not delete itself automatically off
-of the account once satisfied. If it is unset, the signer removes itself
-automatically once satisfied. To manually remove a relative signer, it should be
-referred to with the `original_` fields set to the `acutal_` value it was set to be
-when added.
-
-If `ONLY_BUMPSEQ` is set, then the only operation that may use the account as a source
-is a BUMPSEQ.
-
-If `NO_BUMPSEQ` is set, then BUMPSEQ may not use the account as a source of an operation.
-
-If `ALLOW_MID` or `ALLOW_HI` are set, then the timeout may be overrided by a MID
-or HIGH threshold set of signers. `ALLOW_LOW` option is provided even though in *most*
-cases it is useless. However, if the account is set to have signers below the threshold,
-and a contract path has the effect of decreasing the threshold, perhaps it is desired to
-then ignore the timeout.
-
+If `NO_AUTOREMOVE` is set, then the key does not delete itself automatically
+off of the account once satisfied. If it is unset, the signer removes itself
+automatically once satisfied. To manually remove a relative signer, it should
+be referred to with the `original_` fields set to the `acutal_` value it was
+set to be when added.
+If `ONLY_BUMPSEQ` is set, then the only operation that may use the account as a
+source is a BUMPSEQ.
+If `NO_BUMPSEQ` is set, then BUMPSEQ may not use the account as a source of an
+operation.
+If `ALLOW_MID` or `ALLOW_HI` are set, then the timeout may be overrided by a
+MID or HIGH threshold set of signers. `ALLOW_LOW` option is provided even
+though in _most_ cases it is useless. However, if the account is set to have
+signers below the threshold, and a contract path has the effect of decreasing
+the threshold, perhaps it is desired to then ignore the timeout.
## Rationale
-
This design covers the various use cases discussed for timeouts.
For example, in a ratchet account for starlight channels, you Bump Sequence and
then add a relative timeout of the desired period. This gives the counterparty
-sufficient time to come online to counterclaim. The relative timeout removes the
-need to periodically sign update transactions. This improves the security and
-simplicity of channel implementations.
+sufficient time to come online to counterclaim. The relative timeout removes
+the need to periodically sign update transactions. This improves the security
+and simplicity of channel implementations.
Strictly speaking, for this use case we minimally need the relative timeout
version of only one time field (ledger sequence or time), and do not need the
@@ -140,15 +138,14 @@ signer reserves reserved until the end of the protocol.
If allowed to be set though, the other options allow for a more flexible model
for contracting with timeouts between when certain operations may take place.
-BUMPSEQ is exempted by default if the account should be restricted for outside
+BUMPSEQ is exempted by default if the account should be restricted for outside
operations, given that it's standard use is for timeout based protocols.
-
The complexity around the swapping of the `original_` and `actual_` values
-allows for a contract designer to have a time invariant identifier for identical
-relative timeouts added at different times. Without this, removing timeouts
-could only be done by transactions which are signed after observing the relative
-times computed.
+allows for a contract designer to have a time invariant identifier for
+identical relative timeouts added at different times. Without this, removing
+timeouts could only be done by transactions which are signed after observing
+the relative times computed.
## Backwards Compatibility
@@ -156,8 +153,9 @@ The signer is 32 bytes so should be compatible.
Old software must be able to understand when an account is locked.
-Technically, this change is equivalent to removing keys from an account from the perspective of another account.
+Technically, this change is equivalent to removing keys from an account from
+the perspective of another account.
+
## Implementation
None yet.
-
diff --git a/core/cap-0012.md b/core/cap-0012.md
index 684a34b8d..ab309d595 100644
--- a/core/cap-0012.md
+++ b/core/cap-0012.md
@@ -12,18 +12,17 @@ Protocol version: TBD
## Simple Summary
-Allow an account to be created with deterministic sequence numbers and
-proofs of the creating transaction.
+Allow an account to be created with deterministic sequence numbers and proofs
+of the creating transaction.
## Abstract
-Allow a new type of account to be created whose name is a
-deterministic function of the source account and sequence number of
-the creating transaction and whose sequence numbers are deterministic.
-Such deterministic accounts also contain a creation time and the
-transaction id of the creating account, allowing transactions to
-verify that an account was created at least some time in the past and
-that it was created by a specific account.
+Allow a new type of account to be created whose name is a deterministic
+function of the source account and sequence number of the creating transaction
+and whose sequence numbers are deterministic. Such deterministic accounts also
+contain a creation time and the transaction id of the creating account,
+allowing transactions to verify that an account was created at least some time
+in the past and that it was created by a specific account.
## Motivation
@@ -33,35 +32,33 @@ The goal is to enable several usage patterns:
- Sign transactions on an account that doesn't exist yet.
-- Allow arbitrarily nested pre-auth transactions that create accounts
- that have pre-auth transactions that create accounts and so forth.
+- Allow arbitrarily nested pre-auth transactions that create accounts that have
+ pre-auth transactions that create accounts and so forth.
-- If two operations in the same transaction both have such nested
- accounts with pre-auth transactions, the most deeply nested accounts
- resulting from the two operations should be able to reference each
- other's issued assets.
+- If two operations in the same transaction both have such nested accounts with
+ pre-auth transactions, the most deeply nested accounts resulting from the two
+ operations should be able to reference each other's issued assets.
-- Require that one disclose the intent to execute a transaction some
- minimum time before actually executing the transaction.
+- Require that one disclose the intent to execute a transaction some minimum
+ time before actually executing the transaction.
-- Pay the fee for another transaction if the original transaction's
- fee is too low.
+- Pay the fee for another transaction if the original transaction's fee is too
+ low.
## Specification
### Deterministic accounts
-There are now two ways to create an account: the original
-`CREATE_ACCOUNT` operation and a new `CREATE_DET_ACCOUNT` that creates
-an account whose sequence number is deterministically initialized to
-0x100000000 (2^{32}). A deterministically-created account has the
-current transaction automatically added as a pre-auth transaction,
-allowing the current transaction to add signers and otherwise
-manipulate options on the account. The public key specified in the
-account creation operation also gets added to the newly created
-account with signer weight 255.
-
-~~~ {.c}
+There are now two ways to create an account: the original `CREATE_ACCOUNT`
+operation and a new `CREATE_DET_ACCOUNT` that creates an account whose sequence
+number is deterministically initialized to 0x100000000 (2^{32}). A
+deterministically-created account has the current transaction automatically
+added as a pre-auth transaction, allowing the current transaction to add
+signers and otherwise manipulate options on the account. The public key
+specified in the account creation operation also gets added to the newly
+created account with signer weight 255.
+
+```{.c}
enum OperationType
{
/* ... */
@@ -110,16 +107,16 @@ case CREATE_ACCOUNT_SUCCESS:
default:
void;
};
-~~~
+```
### Modifications to `AccountID`
-There are now two account types, depending on how the account was
-created. To simplify the XDR, we also propose merging the public key,
-account type, and signer type constants into a single `enum`, since it
-will be convenient to keep the constants distinct.
+There are now two account types, depending on how the account was created. To
+simplify the XDR, we also propose merging the public key, account type, and
+signer type constants into a single `enum`, since it will be convenient to keep
+the constants distinct.
-~~~ {.c}
+```{.c}
enum AccountOrSigner {
// A Key can name a signer or an account (or both)
KEY_ED25519 = 0,
@@ -146,20 +143,19 @@ struct CreatorSeqPayload {
SequenceNumber seqNum; // Sequence number of tx creating account
unsigned opIndex; // Index of operation that created account
};
-~~~
+```
### Changes to `AccountEntry`
-Each newly created account now contains two extra pieces of
-information:
+Each newly created account now contains two extra pieces of information:
-* The transaction ID of the transaction that created the account (a
- hash of `TransactionSignaturePayload` for that transaction), and
+- The transaction ID of the transaction that created the account (a hash of
+ `TransactionSignaturePayload` for that transaction), and
-* The creation time of the account (`closeTime` from the ledger in
- which the creation transaction ran).
+- The creation time of the account (`closeTime` from the ledger in which the
+ creation transaction ran).
-~~~ {.c}
+```{.c}
struct AccountEntry {
/* ... */
// reserved for future use
@@ -183,16 +179,16 @@ struct AccountEntry {
}
ext;
};
-~~~
+```
### `CHECK_ACCOUNT`
-A new `CHECK_ACCOUNT` operation has no side effects, but is invalid if
-the source account does not exist or does not meet certain criteria.
-The `CHECK_ACCOUNT` operation does not require a signature from the
-operation's source account.
+A new `CHECK_ACCOUNT` operation has no side effects, but is invalid if the
+source account does not exist or does not meet certain criteria. The
+`CHECK_ACCOUNT` operation does not require a signature from the operation's
+source account.
-~~~ {.c}
+```{.c}
enum AccountConditionType {
ACC_MIN_AGE = 1,
ACC_CREATOR = 2,
@@ -216,118 +212,111 @@ struct AccountCondition switch (AccountConditionType type) {
typedef AccountConditionType CheckAccountOp<2>;
// Returns void, since it can never fail
-~~~
-
-Note that `CHECK_ACCOUNT` affects the validity of a transaction. In
-particular, a transaction is always invalid if the `sourceAccount` of
-a `CHECK_ACCOUNT` operation does not exist or does not satisfy the
-specified conditions at the time of transaction validation. Note,
-however, that this is different from guaranteeing that `CHECK_ACCOUNT`
-never fails. In particular, a set of transactions could be ordered so
-as to delete or modify the `sourceAccount`, making a previously valid
-`CHECK_ACCOUNT` operation fail. In that case the enclosing
-transaction will fail, consuming a fee and sequence number.
-
-Higher-level protocols may depend on a transaction with a
-`CHECK_ACCOUNT` operation not failing. To ensure the operation does
-not fail, such a protocol must ensure monotonicity of the
-conditions--in other words, an untrustworthy party may have the power
-to make the condition true (rendering the transaction valid), but must
-not subsequently have the power to make the condition false.
-
-If transaction _C_ refers to transaction _P_ using an `ACC_SEQ_MIN`
-condition, and _C_'s sequence number is one less than `seqMin`, then
-any extra fees in _C_ can contribute to executing _P_ if _P_ does not
-have a sufficient `fee`. This solves the problem of insufficient fees
-on a transaction that cannot be resigned.
+```
+
+Note that `CHECK_ACCOUNT` affects the validity of a transaction. In particular,
+a transaction is always invalid if the `sourceAccount` of a `CHECK_ACCOUNT`
+operation does not exist or does not satisfy the specified conditions at the
+time of transaction validation. Note, however, that this is different from
+guaranteeing that `CHECK_ACCOUNT` never fails. In particular, a set of
+transactions could be ordered so as to delete or modify the `sourceAccount`,
+making a previously valid `CHECK_ACCOUNT` operation fail. In that case the
+enclosing transaction will fail, consuming a fee and sequence number.
+
+Higher-level protocols may depend on a transaction with a `CHECK_ACCOUNT`
+operation not failing. To ensure the operation does not fail, such a protocol
+must ensure monotonicity of the conditions--in other words, an untrustworthy
+party may have the power to make the condition true (rendering the transaction
+valid), but must not subsequently have the power to make the condition false.
+
+If transaction _C_ refers to transaction _P_ using an `ACC_SEQ_MIN` condition,
+and _C_'s sequence number is one less than `seqMin`, then any extra fees in _C_
+can contribute to executing _P_ if _P_ does not have a sufficient `fee`. This
+solves the problem of insufficient fees on a transaction that cannot be
+resigned.
## Rationale
-These mechanisms solve a bunch of issues that come up in the context
-of payment channels. Because there are competing proposals already
-(CAP-0007 and CAP-0008), this document adopts the rationale of those
-documents by reference unless and until the protocol working group
-decides to move forward with account aliases.
+These mechanisms solve a bunch of issues that come up in the context of payment
+channels. Because there are competing proposals already (CAP-0007 and
+CAP-0008), this document adopts the rationale of those documents by reference
+unless and until the protocol working group decides to move forward with
+account aliases.
## Backwards Compatibility
-The data structures are all backwards compatible. However, the author
-suggests moving keys, account IDs and account aliases into a single
-namespace, namely the `AccountOrSigner` enum. There's nothing wrong
-with having unions that don't allow every value in an enum. By
-contrast, it will get confusing if we use multiple enums and try to
-keep all of their values in sync.
+The data structures are all backwards compatible. However, the author suggests
+moving keys, account IDs and account aliases into a single namespace, namely
+the `AccountOrSigner` enum. There's nothing wrong with having unions that don't
+allow every value in an enum. By contrast, it will get confusing if we use
+multiple enums and try to keep all of their values in sync.
## Example
-Consider a payment channel funded by an initial transaction TI, and
-intended to be closed by the last in a series of transactions T_1,
-T_2, ..., T_n. The channel will also involve a special declaration
-transaction TD that can be executed by any participant who wants to
-close the channel unilaterally. The T_i transactions can only be
-executed some time (e.g., 24 hours) after TD has been executed.
-Finally, for each T_i and user u, the participants sign a revocation
-transaction RT_{u,i} that allows user u to invalidate all T_j with j <
-i.
+Consider a payment channel funded by an initial transaction TI, and intended to
+be closed by the last in a series of transactions T*1, T_2, ..., T_n. The
+channel will also involve a special declaration transaction TD that can be
+executed by any participant who wants to close the channel unilaterally. The
+T_i transactions can only be executed some time (e.g., 24 hours) after TD has
+been executed. Finally, for each T_i and user u, the participants sign a
+revocation transaction RT*{u,i} that allows user u to invalidate all T_j with j
+< i.
-The participants first create TI, but do not sign it. Then they
-create, sign, and exchange TD, T_1, and RT_{u,1} for all u. Finally,
-the users sign and submit TI to place funds in escrow and make TD
-valid to submit.
+The participants first create TI, but do not sign it. Then they create, sign,
+and exchange TD, T*1, and RT*{u,1} for all u. Finally, the users sign and
+submit TI to place funds in escrow and make TD valid to submit.
-From this point forward, at each step i, the participants create and
-sign T_i, and after obtaining a signed T_i, sign R_{u,i} for each user
-u.
+From this point forward, at each step i, the participants create and sign T*i,
+and after obtaining a signed T_i, sign R*{u,i} for each user u.
-To close the channel unilaterally when T_i is the latest transaction,
-user u submits TD and R_{u,i}. Should any user u' believe T_j is
-valid for j>i, that user submits RT_{u',j} so as to invalidate T_i.
-Finally, 24 hours after TD has executed, any user can submit T_i (or
-T_j if j > i).
+To close the channel unilaterally when T*i is the latest transaction, user u
+submits TD and R*{u,i}. Should any user u' believe T*j is valid for j>i, that
+user submits RT*{u',j} so as to invalidate T_i. Finally, 24 hours after TD has
+executed, any user can submit T_i (or T_j if j > i).
-In constructing transactions for the channel, we will use the
-following accounts:
+In constructing transactions for the channel, we will use the following
+accounts:
-* D -- a declaration account, deterministically created from F, whose
- existence declares that some user has intent to close the channel.
+- D -- a declaration account, deterministically created from F, whose existence
+ declares that some user has intent to close the channel.
-* E -- an escrow account deterministically created by TI, with n-of-n
- multisig for all parties.
+- E -- an escrow account deterministically created by TI, with n-of-n multisig
+ for all parties.
-* F -- a fee account, also created by TI, also with n-of-n multisig
- for all parties, containing only as many XLM as channel owners are
- willing to pay to execute a transaction.
+- F -- a fee account, also created by TI, also with n-of-n multisig for all
+ parties, containing only as many XLM as channel owners are willing to pay to
+ execute a transaction.
The transactions are then constructed as follows:
-* TI deterministically creates and funds E and F.
-
-* TD has source account F, sequence number 2^{32}+1, and a very high
- fee (so that in the event of rising fees, any user can add funds to
- F to make TD go through). It has the following operations:
- - Deterministically create account D
- - Move enough XLM from E to F for another transaction
-
-* T_i has source account F and sequence number 2^{32}+3+i, a very high
- fee, and the following operations:
- - CHECK_ACCOUNT: ensure D has existed at least 24 hours
- - MERGE_ACCOUNT: D into E
- - MERGE_ACCOUNT: F into E
- - Whatever is needed to disburse funds after termination at step i
- - Note that if T_i is actually a series of k transactions, then
- the sequence should start at 2^{32}+3+ki and only the last
- transaction should merge the accounts.
-
-* R_{u,i} has a source account that belongs to u, a high fee (so u can
- increase the balance of that account as necessary to pay arbitrarily
- high fees), and the following operations:
- - CHECK_ACCOUNT: make sure D exists
- - BUMP_SEQUENCE E to 2^{32}+3+i (or 2^{32}+3+ki if k > 1)
-
-Note that this protocol satisfies the monotonicity property: Once
-account D exists, it cannot be deleted except by collaboration of all
-the users. Hence, the `CHECK_ACCOUNT` operations will never cause T_i
-or R_{u,i} to fail, only to be invalid.
+- TI deterministically creates and funds E and F.
+
+- TD has source account F, sequence number 2^{32}+1, and a very high fee (so
+ that in the event of rising fees, any user can add funds to F to make TD go
+ through). It has the following operations:
+ - Deterministically create account D
+ - Move enough XLM from E to F for another transaction
+
+- T_i has source account F and sequence number 2^{32}+3+i, a very high fee, and
+ the following operations:
+ - CHECK_ACCOUNT: ensure D has existed at least 24 hours
+ - MERGE_ACCOUNT: D into E
+ - MERGE_ACCOUNT: F into E
+ - Whatever is needed to disburse funds after termination at step i
+ - Note that if T_i is actually a series of k transactions, then the sequence
+ should start at 2^{32}+3+ki and only the last transaction should merge the
+ accounts.
+
+- R\_{u,i} has a source account that belongs to u, a high fee (so u can
+ increase the balance of that account as necessary to pay arbitrarily high
+ fees), and the following operations:
+ - CHECK_ACCOUNT: make sure D exists
+ - BUMP_SEQUENCE E to 2^{32}+3+i (or 2^{32}+3+ki if k > 1)
+
+Note that this protocol satisfies the monotonicity property: Once account D
+exists, it cannot be deleted except by collaboration of all the users. Hence,
+the `CHECK_ACCOUNT` operations will never cause T*i or R*{u,i} to fail, only to
+be invalid.
## Implementation
diff --git a/core/cap-0013.md b/core/cap-0013.md
index ac91429cd..a04358a9a 100644
--- a/core/cap-0013.md
+++ b/core/cap-0013.md
@@ -12,29 +12,70 @@ Protocol version: TBD
## Rejected
-Superceded by [CAP23](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0023.md)
+Superceded by
+[CAP23](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0023.md)
## Simple Summary
-This proposal would rename trustlines to "balances", and would add operations allowing any account to add a balance to any other account (as long as they also send the `base reserve` lumens to support that newly added balance), and to remove any of its own balances (including ones where the issuer has frozen the balance). It also adds an ALLOW_ADD_BALANCE flag to accounts, which, if unset, means that AddBalance operations that set that account as the `destination` will fail.
-
-This proposal would bring the behavior of Stellar assets and accounts somewhat closer to the behavior of ERC20 tokens on Ethereum, where, by default, any asset can be sent to any user without permission. Under these principles, any asset-specific differentiatxion from these rules should be enforced by the asset issuer, via the AUTHORIZATION_REQUIRED flag (or other flags we may introduce, such as co-signed assets or [immutable accounts](https://github.com/stellar/stellar-protocol/pull/210)). Any account-specific differentiation should be enforced by the ALLOW_ADD_BALANCE flag.
+This proposal would rename trustlines to "balances", and would add operations
+allowing any account to add a balance to any other account (as long as they
+also send the `base reserve` lumens to support that newly added balance), and
+to remove any of its own balances (including ones where the issuer has frozen
+the balance). It also adds an ALLOW_ADD_BALANCE flag to accounts, which, if
+unset, means that AddBalance operations that set that account as the
+`destination` will fail.
+
+This proposal would bring the behavior of Stellar assets and accounts somewhat
+closer to the behavior of ERC20 tokens on Ethereum, where, by default, any
+asset can be sent to any user without permission. Under these principles, any
+asset-specific differentiatxion from these rules should be enforced by the
+asset issuer, via the AUTHORIZATION_REQUIRED flag (or other flags we may
+introduce, such as co-signed assets or
+[immutable accounts](https://github.com/stellar/stellar-protocol/pull/210)).
+Any account-specific differentiation should be enforced by the
+ALLOW_ADD_BALANCE flag.
## Motivation
-This makes it easier for an issuer, exchange, or other service provider to send new assets to users without any interaction from that user. [SEP-0013](https://github.com/stellar/stellar-protocol/pull/205) is an alternative partial solution to this problem that does not involve protocol changes.
-
-The RemoveBalance operation also fixes some quirks in how the existing protocol handles merging accounts. Right now, it is impossible to presign or preauthorize transactions that will reliably merge an account into another account, because there is no way to reliably remove trustlines (since a ChangeTrust operation to remove a trustline can be foiled by anyone else sending you the asset) or merge accounts that still have trustlines. Additionally, in the current protocol, any issuer with which you have a trustline can make it impossible to merge your account, simply by freezing that asset. This would prevent you from recovering the `2*base reserve` XLM for the account as well as the `1*base reserve` XLM for that asset. (If we end up rejecting this proposal, we could create a new proposal that would solve these issues in a different way.)
+This makes it easier for an issuer, exchange, or other service provider to send
+new assets to users without any interaction from that user.
+[SEP-0013](https://github.com/stellar/stellar-protocol/pull/205) is an
+alternative partial solution to this problem that does not involve protocol
+changes.
+
+The RemoveBalance operation also fixes some quirks in how the existing protocol
+handles merging accounts. Right now, it is impossible to presign or
+preauthorize transactions that will reliably merge an account into another
+account, because there is no way to reliably remove trustlines (since a
+ChangeTrust operation to remove a trustline can be foiled by anyone else
+sending you the asset) or merge accounts that still have trustlines.
+Additionally, in the current protocol, any issuer with which you have a
+trustline can make it impossible to merge your account, simply by freezing that
+asset. This would prevent you from recovering the `2*base reserve` XLM for the
+account as well as the `1*base reserve` XLM for that asset. (If we end up
+rejecting this proposal, we could create a new proposal that would solve these
+issues in a different way.)
## Specification
### General changes
-Rename "trustline" to "balance" in the documentation, internal implementations, and newly created operations. Horizon's API already uses "Balances" to refer to trustlines. Any external APIs that currently use the term "Trustline" or "Lines" (particularly including the ChangeTrust and AllowTrust ops) should not be changed.
+Rename "trustline" to "balance" in the documentation, internal implementations,
+and newly created operations. Horizon's API already uses "Balances" to refer to
+trustlines. Any external APIs that currently use the term "Trustline" or
+"Lines" (particularly including the ChangeTrust and AllowTrust ops) should not
+be changed.
-The concept of limits on balances is deprecated, and we clearly communicate our intention to remove it in the future (in favor of all Balances having the "max" limit).
+The concept of limits on balances is deprecated, and we clearly communicate our
+intention to remove it in the future (in favor of all Balances having the "max"
+limit).
-The Change Trust operation is deprecated. Removing it from the protocol (even years after being deprecated) may not be worth the potential cost, but we should clearly discourage its use, and encourage users to use AddBalance even for adding balances to their own accounts. (To make this easier, SDKs could set the `destination` on AddBalance to be `sourceAccount` by default, if no destination is provided.)
+The Change Trust operation is deprecated. Removing it from the protocol (even
+years after being deprecated) may not be worth the potential cost, but we
+should clearly discourage its use, and encourage users to use AddBalance even
+for adding balances to their own accounts. (To make this easier, SDKs could set
+the `destination` on AddBalance to be `sourceAccount` by default, if no
+destination is provided.)
## New Operations
@@ -48,13 +89,19 @@ struct AddBalanceOp
};
```
-If `destination` does not have a balance for `asset`, the operation transfers base_reserve XLM from `sourceAccount` to `destination` (unless `sourceAccount` and `destination` are the same), and adds a balance for `asset` to `destination`.
+If `destination` does not have a balance for `asset`, the operation transfers
+base_reserve XLM from `sourceAccount` to `destination` (unless `sourceAccount`
+and `destination` are the same), and adds a balance for `asset` to
+`destination`.
-If `asset` has the AUTHORIZATION_REQUIRED flag, the trustline's authorized flag is initialized to false.
+If `asset` has the AUTHORIZATION_REQUIRED flag, the trustline's authorized flag
+is initialized to false.
-If `destination` already has a balance for `asset`, this operation does nothing.
+If `destination` already has a balance for `asset`, this operation does
+nothing.
-If `destination` has the ALLOW_ADD_BALANCE flag set to false, the operation fails.
+If `destination` has the ALLOW_ADD_BALANCE flag set to false, the operation
+fails.
#### RemoveBalance
@@ -65,36 +112,78 @@ struct AddBalanceOp
};
```
-Removes the `asset` balance from `sourceAccount`, sending any tokens stored in that balance to the issuer (i.e., burning them).
-If there is no balance for `asset` on `sourceAccount`, this operation does nothing.
-If there is a balance, this operation works regardless of whether the balance has the `authorized` flag set.
+Removes the `asset` balance from `sourceAccount`, sending any tokens stored in
+that balance to the issuer (i.e., burning them). If there is no balance for
+`asset` on `sourceAccount`, this operation does nothing. If there is a balance,
+this operation works regardless of whether the balance has the `authorized`
+flag set.
-This (unlike ChangeTrust) allows an account to successfully remove a balance even if the issuer has frozen it or if some third party has unexpectedly sent some tokens to it, thus fixing two of the warts in the current protocol.
+This (unlike ChangeTrust) allows an account to successfully remove a balance
+even if the issuer has frozen it or if some third party has unexpectedly sent
+some tokens to it, thus fixing two of the warts in the current protocol.
### New account flags
#### ALLOW_ADD_BALANCE
-This account flag, if set to false, prevents the AddBalance operation from adding balances to the account.
+This account flag, if set to false, prevents the AddBalance operation from
+adding balances to the account.
-This flag is set to true on accounts that are created after the protocol change takes effect. It is set to false on accounts that already existed before the protocol change takes effect.
+This flag is set to true on accounts that are created after the protocol change
+takes effect. It is set to false on accounts that already existed before the
+protocol change takes effect.
-This flag is necessary for some special cases because otherwise, when merging an account, any other user would be able to cause the merge to fail by using AddBalance to add some balance to your account. Wallets can use this flag to stop new balances from being added to an account, which gives them an opportunity to remove all existing balances with [RemoveBalance](#removebalance) before merging the account. This flag can also be set to false when an account is initially created, if that account will be used in any deterministic protocols that rely on AccountMerge working successfully.
+This flag is necessary for some special cases because otherwise, when merging
+an account, any other user would be able to cause the merge to fail by using
+AddBalance to add some balance to your account. Wallets can use this flag to
+stop new balances from being added to an account, which gives them an
+opportunity to remove all existing balances with
+[RemoveBalance](#removebalance) before merging the account. This flag can also
+be set to false when an account is initially created, if that account will be
+used in any deterministic protocols that rely on AccountMerge working
+successfully.
## Backwards Compatibility
-The ability to add balances to other users' accounts is a significant conceptual change in the protocol.
-
-Wallet software will have to be changed to correctly notice third-party addition of new balances to the account. Any lumen-only wallet (which previously was assured that the account it managed would not receive unexpected assets) will have to make a choice whether to support additional assets, or just ignore any incoming payments of other assets. Any wallets that fully support the AccountMerge operation will need to add support for locking the account and removing any stray balances before merging the account.
-
-Any protocol that relies on the deterministic behavior of AccountMerge will need to be changed. One such protocol is Starlight, which currently merges the channel accounts into a wallet account as the last step of the protocol, but we have already decided to remove AccountMerge from that protocol, in favor of setting the options on the channel accounts to give the recipient control over them. I am not aware of any other protocols that use AccountMerge (and it would be unwise for them to do so in most cases, since AccountMerge can already be caused to fail by the issuers of any assets that have trustlines on that account). Setting the ALLOW_ADD_BALANCE flag to false on all existing accounts should also reduce any compatibility issues caused by this change.
-
-I can't currently think of other ways that these changes could upset reasonable expectations, but the community might raise some.
+The ability to add balances to other users' accounts is a significant
+conceptual change in the protocol.
+
+Wallet software will have to be changed to correctly notice third-party
+addition of new balances to the account. Any lumen-only wallet (which
+previously was assured that the account it managed would not receive unexpected
+assets) will have to make a choice whether to support additional assets, or
+just ignore any incoming payments of other assets. Any wallets that fully
+support the AccountMerge operation will need to add support for locking the
+account and removing any stray balances before merging the account.
+
+Any protocol that relies on the deterministic behavior of AccountMerge will
+need to be changed. One such protocol is Starlight, which currently merges the
+channel accounts into a wallet account as the last step of the protocol, but we
+have already decided to remove AccountMerge from that protocol, in favor of
+setting the options on the channel accounts to give the recipient control over
+them. I am not aware of any other protocols that use AccountMerge (and it would
+be unwise for them to do so in most cases, since AccountMerge can already be
+caused to fail by the issuers of any assets that have trustlines on that
+account). Setting the ALLOW_ADD_BALANCE flag to false on all existing accounts
+should also reduce any compatibility issues caused by this change.
+
+I can't currently think of other ways that these changes could upset reasonable
+expectations, but the community might raise some.
## Alternatives
-This proposal previously skipped the ALLOW_ADD_BALANCE change, and would have changed the behavior of AccountMerge so that non-lumen balances were merged as well. However, it was pointed out that the change to AccountMerge would mean that a single AccountMerge operation could have up to ~N effects (where N is the number of assets on that account), which is undesirable behavior for denial-of-service reasons.
-
-[SEP-0013](https://github.com/stellar/stellar-protocol/pull/205), as mentioned above, is a potential solution to some of the same problems that does not involve protocol changes.
-
-If we decide not to make protocol changes to allow adding balances, we still might want to add the RemoveBalance operation (which works unconditionally) in order to allow accounts to be merged predictably, and to prevent an issuer with which you have a trustline from preventing you from ever merging your account.
+This proposal previously skipped the ALLOW_ADD_BALANCE change, and would have
+changed the behavior of AccountMerge so that non-lumen balances were merged as
+well. However, it was pointed out that the change to AccountMerge would mean
+that a single AccountMerge operation could have up to ~N effects (where N is
+the number of assets on that account), which is undesirable behavior for
+denial-of-service reasons.
+
+[SEP-0013](https://github.com/stellar/stellar-protocol/pull/205), as mentioned
+above, is a potential solution to some of the same problems that does not
+involve protocol changes.
+
+If we decide not to make protocol changes to allow adding balances, we still
+might want to add the RemoveBalance operation (which works unconditionally) in
+order to allow accounts to be merged predictably, and to prevent an issuer with
+which you have a trustline from preventing you from ever merging your account.
diff --git a/core/cap-0014.md b/core/cap-0014.md
index bb4a18f4e..acfb6046b 100644
--- a/core/cap-0014.md
+++ b/core/cap-0014.md
@@ -12,23 +12,21 @@ Protocol version: TBD
## Simple Summary
-As of protocol version 10, orders are sorted in protocol such that transactions
+As of protocol version 10, orders are sorted in protocol such that transactions
for an account are sorted by sequence number (ascending) the order between
accounts is randomized. We propose leaving the transactions un-sorted, but
checking that the order satisfies certain properties.
## Abstract
-
-As of protocol version 10, orders are sorted in protocol such that transactions
+As of protocol version 10, orders are sorted in protocol such that transactions
for an account are sorted by sequence number (ascending) the order between
accounts is randomized. We propose leaving the transactions un-sorted, but
checking that the order satisfies certain properties. The new properties ensure
-that transactions can apply in-order, but permit a malicious nominator to easily
-place a specific transaction to occur before another. This is not a change from
-current behavior, because a nominator may (with a small number of tries), adjust
-the TxSetFrame to place that transaction in front anyways.
-
+that transactions can apply in-order, but permit a malicious nominator to
+easily place a specific transaction to occur before another. This is not a
+change from current behavior, because a nominator may (with a small number of
+tries), adjust the TxSetFrame to place that transaction in front anyways.
## Motivation
@@ -36,31 +34,29 @@ The current requirement of a randomized sort is intended to prevent malicious
nominators from front-running a transaction, but it does not achieve that goal
because a nominator may front-run a transaction in small number of trials.
-Randomized ordering does, however, neccessitate that the result of a transaction
-is tri-valent (succeed, fail, invalid).
+Randomized ordering does, however, neccessitate that the result of a
+transaction is tri-valent (succeed, fail, invalid).
If instead, we force the order to be determined by the nominator according to
some rules, this opens the door to them only including transactions which
succeed in a TxSetFrame.
-It also, as a side-benefit, reduces the asymptotic latency of validating a ledger
-because the ordering must only be _verified_ by each node.
-
-
-
+It also, as a side-benefit, reduces the asymptotic latency of validating a
+ledger because the ordering must only be _verified_ by each node.
## Specification
In this version we iterate over the TxSet transactions and check that they are
in a valid order with respect to the sequences of each account such that each
-transaction can be applied. This implies nominators must propose the exact ordering
-validly.
+transaction can be applied. This implies nominators must propose the exact
+ordering validly.
We also eagerly consider BumpSequence operations during this check to ensure
that transactions are also properly linearized with respect to BumpSequence.
-This implies that certain types of transaction graphs with two BumpSequences may
-be impossible to linearize, however we specify that the ordering of transactions
-may change in future ledger versions so this cannot be depended on.
+This implies that certain types of transaction graphs with two BumpSequences
+may be impossible to linearize, however we specify that the ordering of
+transactions may change in future ledger versions so this cannot be depended
+on.
This ensures that the mTransactions are sorted in an order such that the
transactions can all be applied and consume a Sequence number.
@@ -70,8 +66,6 @@ decide the ordering.
For previous ledger versions we must maintain the old behavior.
-
-
## Rationale
Making this adjustment makes it more clear that relying on the randomization of
@@ -81,8 +75,6 @@ order that the nominator selects is arbitary and attacker-controlled.
Checking consistency given BumpSequences fixes an issue which can cause fees to
be paid unexpectedly with a contract written to use bump sequence.
-
-
## Backwards Compatibility
The old sorting algorithm for `sortForApply` and `sortForHash` must remain for
@@ -96,7 +88,8 @@ order though, any valid order suffices.
## Forwards Compatibility
-In new versions, we may want to enforce stricter rules on the order of transactions.
+In new versions, we may want to enforce stricter rules on the order of
+transactions.
The arbitary ordering at the nominator layer makes it easy for futures changes
to further constrict the behavior, e.g., enforcing that the ordering must be
@@ -106,16 +99,15 @@ grouped by account, etc.
Such changes are then something that downstream software must already be _able_
to handle witout surprise.
-It's possible that CAP-0014 does not specify a forwards compatible set of rules,
-in which case the rule should be incompatibly updated. Because we specify that
-the linearization properties shall *not* be relied on for future ledger
-versions, this does not break expectations but may require rewriting software
-designs.
-
-If there is a need for other orders for compact lookup proofs for light clients,
-the commitments should be generated and verified separately from the base
-ordering.
+It's possible that CAP-0014 does not specify a forwards compatible set of
+rules, in which case the rule should be incompatibly updated. Because we
+specify that the linearization properties shall _not_ be relied on for future
+ledger versions, this does not break expectations but may require rewriting
+software designs.
+If there is a need for other orders for compact lookup proofs for light
+clients, the commitments should be generated and verified separately from the
+base ordering.
## Implementation
diff --git a/core/cap-0015.md b/core/cap-0015.md
index 4840bdf3d..9f52b336b 100644
--- a/core/cap-0015.md
+++ b/core/cap-0015.md
@@ -12,10 +12,12 @@ Protocol version: 13
```
## Simple Summary
-This proposal introduces fee-bump transactions, which allow an arbitrary account
-to pay the fee for a transaction.
+
+This proposal introduces fee-bump transactions, which allow an arbitrary
+account to pay the fee for a transaction.
## Motivation
+
If a transaction has insufficient `fee`, for example due to either surge
pricing or an increase in the `baseFee`, that transaction will not be included
in a transaction set. If the transaction is only signed by the party that wants
@@ -24,38 +26,42 @@ fee, sign it, and submit it. But there are many circumstances where this is not
possible, such as when pre-signed and pre-authorized transactions are involved.
In these cases, it is possible that a high-value transaction (such as the
release of escrowed funds or the settlement of a payment channel) cannot
-execute as of protocol version 12. Fee-bump transactions will resolve this issue
-by enabling anyone to pay the fee for an already existing transaction.
-
-The mechanism described here will also facilitate another usage: when applications
-(such as games) are willing to pay user fees. As of protocol version 12, this
-could be resolved in two ways. In the first approach, funds could be sent
-directly to the users' account but there is no guarantee that the user will
-spend those funds on fees and there is no way to recover unspent funds. In the
-second approach, one or more accounts controlled by the application could be
-used as the source account for user transactions but this leads to sequence
-number management issues.
+execute as of protocol version 12. Fee-bump transactions will resolve this
+issue by enabling anyone to pay the fee for an already existing transaction.
+
+The mechanism described here will also facilitate another usage: when
+applications (such as games) are willing to pay user fees. As of protocol
+version 12, this could be resolved in two ways. In the first approach, funds
+could be sent directly to the users' account but there is no guarantee that the
+user will spend those funds on fees and there is no way to recover unspent
+funds. In the second approach, one or more accounts controlled by the
+application could be used as the source account for user transactions but this
+leads to sequence number management issues.
### Goals Alignment
+
This proposal is aligned with several Stellar Network Goals, among them:
- The Stellar Network should facilitate simplicity and interoperability with
-other protocols and networks.
-- The Stellar Network should make it easy for developers of Stellar projects
-to create highly usable products.
+ other protocols and networks.
+- The Stellar Network should make it easy for developers of Stellar projects to
+ create highly usable products.
## Abstract
+
`TransactionEnvelope` is transformed from an XDR `struct` to an XDR `union`
while preserving binary compatibility. `FeeBumpTransaction` is introduced with
corresponding envelope type `ENVELOPE_TYPE_TX_FEE_BUMP` as a new type of
-transaction. Fee-bump transactions as described in this proposal will enable any
-account to pay the fee for an existing transaction without the need to re-sign
-the existing transaction or manage sequence numbers.
+transaction. Fee-bump transactions as described in this proposal will enable
+any account to pay the fee for an existing transaction without the need to
+re-sign the existing transaction or manage sequence numbers.
## Specification
### XDR
+
The new transaction, transaction envelope, and related XDR types are:
+
```c++
enum EnvelopeType
{
@@ -145,6 +151,7 @@ struct TransactionSignaturePayload
```
The new transaction result XDR types are:
+
```c++
enum TransactionResultCode
{
@@ -225,8 +232,9 @@ struct TransactionResult
### Semantics
### How does the TransactionEnvelope transformation work?
-In order to create multiple types of transaction envelopes, we needed to perform
-a clever transformation of the XDR. We split the discriminant off
+
+In order to create multiple types of transaction envelopes, we needed to
+perform a clever transformation of the XDR. We split the discriminant off
`Transaction.sourceAccount` leaving a raw ed25519 public key (this type is
`TransactionV0`), then used the discriminant as the discriminant for the
`TransactionEnvelope` union. We then added a new type of transaction envelope,
@@ -235,6 +243,7 @@ them in the future. The third type of transaction is the fee-bump transaction,
which can wrap a new-style transaction envelope of type ENVELOPE_TYPE_TX.
#### Fee Rate
+
A fee-bump transaction has an effective number of operations equal to one plus
the number of operations in the inner transaction. Correspondingly, the minimum
fee for the fee-bump transaction is one base fee more than the minimum fee for
@@ -243,9 +252,10 @@ one plus the number of operations in the inner transaction rather than the
number of operations in the inner transaction alone.
#### Validity
-Prior to the protocol version in which this proposal is implemented,
-only a `TransactionEnvelope` of type `ENVELOPE_TYPE_TX_V0` can be valid whereas
-a `TransactionEnvelope` of any other type will be invalid with result
+
+Prior to the protocol version in which this proposal is implemented, only a
+`TransactionEnvelope` of type `ENVELOPE_TYPE_TX_V0` can be valid whereas a
+`TransactionEnvelope` of any other type will be invalid with result
`txNOT_SUPPORTED`. Starting in the protocol version in which this proposal is
implemented, only a `TransactionEnvelope` of type `ENVELOPE_TYPE_TX` or
`ENVELOPE_TYPE_TX_FEE_BUMP` can be valid whereas a `TransactionEnvelope` of any
@@ -254,9 +264,9 @@ other type will be invalid with result `txNOT_SUPPORTED`. Because
`TransactionEnvelope` of type `ENVELOPE_TYPE_TX_V0` can simply be converted to
`ENVELOPE_TYPE_TX`.
-Validity requirements for a `TransactionEnvelope` of type `ENVELOPE_TYPE_TX` are
-identical to the existing validity requirements for a `TransactionEnvelope` of
-type `ENVELOPE_TYPE_TX_V0`.
+Validity requirements for a `TransactionEnvelope` of type `ENVELOPE_TYPE_TX`
+are identical to the existing validity requirements for a `TransactionEnvelope`
+of type `ENVELOPE_TYPE_TX_V0`.
To validate a `TransactionEnvelope E` of type `ENVELOPE_TYPE_TX_FEE_BUMP` with
inner transaction envelope `F = E.feeBump().tx.innerTx`, check that
@@ -271,64 +281,73 @@ inner transaction envelope `F = E.feeBump().tx.innerTx`, check that
(pre-authorized transaction hashes are implicitly included here), otherwise
return `txBAD_AUTH`
- `E.feeBump().tx.feeSource` has sufficient available native balance (see
- CAP-0003) to pay `E.feeBump().tx.fee` in addition to fees that will be paid by
- this account for other transactions, otherwise return `txINSUFFICIENT_BALANCE`
+ CAP-0003) to pay `E.feeBump().tx.fee` in addition to fees that will be paid
+ by this account for other transactions, otherwise return
+ `txINSUFFICIENT_BALANCE`
- `F` is valid, except that
- - `F.v1().tx.fee` may be less than the minimum fee for `F`, but must be
- non-negative
- - `F.v1().tx.sourceAccount` may not have sufficient available native balance
+ - `F.v1().tx.fee` may be less than the minimum fee for `F`, but must be
+ non-negative
+ - `F.v1().tx.sourceAccount` may not have sufficient available native balance
otherwise return `txINNER_FAILED` with the inner result set to the result of
validating `F`
#### Replace-by-Fee
+
If a node receives a valid `TransactionEnvelope E'` of type
-`ENVELOPE_TYPE_TX_FEE_BUMP` with source account `A` and sequence number `N` when
-it already has a `TransactionEnvelope E` with source account `A` and sequence
-number `N` in the transaction queue, then it should replace `E` with `E'` if and
-only if the fee rate for `E'` is at least 10 times the fee rate for `E`. It
-should be emphasized that source account in this section refers specifically to
-the source account of the _sequence number_ of the transaction, although these
-transactions may still have different fee source accounts.
+`ENVELOPE_TYPE_TX_FEE_BUMP` with source account `A` and sequence number `N`
+when it already has a `TransactionEnvelope E` with source account `A` and
+sequence number `N` in the transaction queue, then it should replace `E` with
+`E'` if and only if the fee rate for `E'` is at least 10 times the fee rate for
+`E`. It should be emphasized that source account in this section refers
+specifically to the source account of the _sequence number_ of the transaction,
+although these transactions may still have different fee source accounts.
#### Surge Pricing
+
The logic for surge pricing is unchanged. The only new consideration is that a
`TransactionEnvelope E` of type `ENVELOPE_TYPE_TX_FEE_BUMP` should be included
in the queue for `E.feeBump().tx.innerTx.v1().sourceAccount` rather than the
queue for `E.feeBump().tx.feeSource`.
#### Application and Results
+
Even if the outer transaction of a fee-bump is invalid during application, we
will still apply the inner transaction. Specifically, the behavior is:
- Remove used one-time signers for the outer transaction
- Apply the inner transaction (as if there were no outer transaction)
-If the inner transaction succeeds, then the result is `txFEE_BUMP_INNER_SUCCESS`
-and the `innerResultPair` is what would have been produced by the inner
-transaction in the absence of the outer transaction. Analogously, if the inner
-transaction fails, then the result is `txFEE_BUMP_INNER_FAILED` and the
-`innerResultPair` is what would have been produced by the inner transaction in
-the absence of the outer transaction. No other results are possible.
+If the inner transaction succeeds, then the result is
+`txFEE_BUMP_INNER_SUCCESS` and the `innerResultPair` is what would have been
+produced by the inner transaction in the absence of the outer transaction.
+Analogously, if the inner transaction fails, then the result is
+`txFEE_BUMP_INNER_FAILED` and the `innerResultPair` is what would have been
+produced by the inner transaction in the absence of the outer transaction. No
+other results are possible.
## Rationale
### Make ENVELOPE_TYPE_TX_V0 Invalid
+
This proposal uses the same signatures for `TransactionEnvelope` of type
`ENVELOPE_TYPE_TX_V0` and `ENVELOPE_TYPE_TX`. This makes it possible to convert
a `TransactionEnvelope` of type `ENVELOPE_TYPE_TX_V0` into a
-`TransactionEnvelope` of type `ENVELOPE_TYPE_TX` without needing new signatures.
-The main advantage of this is it allows us to make `ENVELOPE_TYPE_TX_V0` invalid
-and only support `ENVELOPE_TYPE_TX` in `FeeBumpTransaction`. Furthermore, making
-`ENVELOPE_TYPE_TX_V0` invalid simplifies the possibility of having a "canonical"
-or "normalized" XDR representation of a `TransactionEnvelope` (this concept has
-arisen during the discussion of some CAPs in the past).
+`TransactionEnvelope` of type `ENVELOPE_TYPE_TX` without needing new
+signatures. The main advantage of this is it allows us to make
+`ENVELOPE_TYPE_TX_V0` invalid and only support `ENVELOPE_TYPE_TX` in
+`FeeBumpTransaction`. Furthermore, making `ENVELOPE_TYPE_TX_V0` invalid
+simplifies the possibility of having a "canonical" or "normalized" XDR
+representation of a `TransactionEnvelope` (this concept has arisen during the
+discussion of some CAPs in the past).
### Replay Prevention
+
`FeeBumpTransaction` does not contain an explicit sequence number because it
relies on the sequence number of the inner transaction for replay prevention.
### Fees
+
A fee-bump transaction has an effective number of operations strictly greater
than the number of operations in the inner transaction because it requires more
work for the network to process a fee-bump transaction than the inner
@@ -337,24 +356,25 @@ transaction alone.
The semantics specify that a fee-bump transaction is invalid if the fee rate of
the outer transaction does not exceed the fee rate of the inner transaction.
This restriction is designed to prevent an obvious misunderstanding of the
-semantics of `FeeBumpTransaction`. Suppose someone has a signed transaction with
-fee `F`, but they actually do not want to pay a fee greater than `F'`. So they
-use a `FeeBumpTransaction` with the outer fee set to `F'`. But if the outer
-fee rate is less than the inner fee rate, then in any case where the fee-bump
-could be included in the transaction set the inner transaction could also have
-been included. This completely defeats the purpose of submitting the
+semantics of `FeeBumpTransaction`. Suppose someone has a signed transaction
+with fee `F`, but they actually do not want to pay a fee greater than `F'`. So
+they use a `FeeBumpTransaction` with the outer fee set to `F'`. But if the
+outer fee rate is less than the inner fee rate, then in any case where the
+fee-bump could be included in the transaction set the inner transaction could
+also have been included. This completely defeats the purpose of submitting the
`FeeBumpTransaction` with fee `F'` because a malicious adversary could always
cause you to pay fee `F`. Therefore we prohibit this possibility entirely.
### Application and Results
+
The only purpose of a fee-bump transaction is to bid a higher fee for the inner
transaction to either make the transaction valid in the event that the fee rate
is less than the base fee, or to accelerate inclusion of the transaction in the
-event of surge pricing. In either case, the fee-bump has fulfilled its role once
-it has been included in the transaction set. The source account for each
+event of surge pricing. In either case, the fee-bump has fulfilled its role
+once it has been included in the transaction set. The source account for each
transaction is charged the fee before any transactions are actually applied, so
-all that remains when applying a fee-bump transaction is to remove used one-time
-signers and to apply the inner transaction.
+all that remains when applying a fee-bump transaction is to remove used
+one-time signers and to apply the inner transaction.
There are several reasons that the inner transaction must be applied regardless
of the validity of the outer transaction at apply time. The most important
@@ -362,13 +382,14 @@ reason is replay protection: as noted above, the sequence number of the inner
transaction provides replay protection for the fee-bump transaction. If the
inner transaction were not applied when the outer transaction became invalid at
apply time, then the sequence number would not be consumed in that case. This
-would allow the fee-bump transaction to be played again if the outer transaction
-were to become valid again.
+would allow the fee-bump transaction to be played again if the outer
+transaction were to become valid again.
Given the above, there is no reason to return the result of the fee-bump
transaction separately from the result of the inner transaction.
## Backwards Incompatibilities
+
All downstream systems which submit transactions to stellar-core will need to
transform `TransactionEnvelope` of type `ENVELOPE_TYPE_TX_V0` to type
`ENVELOPE_TYPE_TX` before submission. As a temporary bridge, stellar-core will
@@ -382,10 +403,14 @@ continue to function, but downstream systems that process historical
transactions will need to be updated with the new XDR.
## Security Concerns
+
None yet.
## Test Cases
+
None yet.
## Implementation
-This proposal was implemented in https://github.com/stellar/stellar-core/pull/2419.
+
+This proposal was implemented in
+https://github.com/stellar/stellar-core/pull/2419.
diff --git a/core/cap-0016.md b/core/cap-0016.md
index 99048c6a9..b61c369e6 100644
--- a/core/cap-0016.md
+++ b/core/cap-0016.md
@@ -12,27 +12,25 @@ Protocol version: TBD
# Motivation
-A number of Stellar asset issuers need greater control over assets
-than simply authorizing particular accounts to hold the asset. As a
-result, we are seeing people build solutions in which the asset issuer
-is actually a co-signer on asset holders' accounts. This solution has
-several drawbacks. In particular, the asset owner cannot unilaterally
-withdraw XLM or other assets from his or her account, and it is very
-hard to trade one such restricted asset for another (as both issuers
-would need to be cosigners on the account, giving one issuer
+A number of Stellar asset issuers need greater control over assets than simply
+authorizing particular accounts to hold the asset. As a result, we are seeing
+people build solutions in which the asset issuer is actually a co-signer on
+asset holders' accounts. This solution has several drawbacks. In particular,
+the asset owner cannot unilaterally withdraw XLM or other assets from his or
+her account, and it is very hard to trade one such restricted asset for another
+(as both issuers would need to be cosigners on the account, giving one issuer
permission to authorize transactions on the other's asset).
-This proposal is a first cut at attempting to address the problem
-through minimal stellar-core changes.
+This proposal is a first cut at attempting to address the problem through
+minimal stellar-core changes.
# Co-signer authorized trust lines
-To accommodate asset issuers with restrictions, we propose adding
-another mode for trustlines of `AUTH_REQUIRED` assets, called
-"cosigner authorized," that lies somewhere between being not
-authorized and fully authorized. The idea is to allow issuers to
-require asset holders to request co-signing of transactions involving
-the asset without being cosigners on asset holders' accounts.
+To accommodate asset issuers with restrictions, we propose adding another mode
+for trustlines of `AUTH_REQUIRED` assets, called "cosigner authorized," that
+lies somewhere between being not authorized and fully authorized. The idea is
+to allow issuers to require asset holders to request co-signing of transactions
+involving the asset without being cosigners on asset holders' accounts.
```c
enum TrustLineFlags
@@ -44,70 +42,63 @@ enum TrustLineFlags
};
```
-There are now three modes for the trustline of an `AUTH_REQUIRED`
-asset, based on `TrustLineEntry::flags`:
+There are now three modes for the trustline of an `AUTH_REQUIRED` asset, based
+on `TrustLineEntry::flags`:
-* If `(flags & 3) == 0`, the source account is not authorized. Any
- `PAYMENT`, `PATH_PAYMENT`, `MANAGE_OFFER`, or
- `CREATE_PASSIVE_OFFER`, with the source account causes the whole
- transaction to fail, with only two exceptions:
+- If `(flags & 3) == 0`, the source account is not authorized. Any `PAYMENT`,
+ `PATH_PAYMENT`, `MANAGE_OFFER`, or `CREATE_PASSIVE_OFFER`, with the source
+ account causes the whole transaction to fail, with only two exceptions:
+ - A `PATH_PAYMENT` is allowed to contain the asset in `path`, so long as it
+ does not apear in `sendAsset` or `destAsset`, and
- * A `PATH_PAYMENT` is allowed to contain the asset in `path`, so
- long as it does not apear in `sendAsset` or `destAsset`, and
+ - A `MANAGE_OFFER` is allowed if it sets `amount` to 0 (to delete the offer).
- * A `MANAGE_OFFER` is allowed if it sets `amount` to 0 (to delete
- the offer).
+ In addition, any operation with a different (even authorized) source account
+ will fail if it attempts to send the asset to an account with
+ `(flags & 3) == 0`. Finally, existing orders on the order book belonging to
+ the account with `(flags & 3) == 0` are immediately invalid and cannot be
+ filled. This behavior should be close or identical to the status quo.
- In addition, any operation with a different (even authorized) source
- account will fail if it attempts to send the asset to an account
- with `(flags & 3) == 0`. Finally, existing orders on the order book
- belonging to the account with `(flags & 3) == 0` are immediately
- invalid and cannot be filled. This behavior should be close or
- identical to the status quo.
+- If `(flags & 3) == COAUTHORIZED_FLAG` for an account, then the account has
+ certain additional permissions compared to when those bits are 0.
+ Specifically:
+ - The account's offers in the order book continue to be valid, and can be
+ filled at any time.
-* If `(flags & 3) == COAUTHORIZED_FLAG` for an account, then the
- account has certain additional permissions compared to when those
- bits are 0. Specifically:
+ - The account can use `MANAGE_OFFER` to reduce the `amount` of an offer
+ involving the asset (including canceling the offer with and `amount` of 0),
+ but not to increase the offer.
- * The account's offers in the order book continue to be valid, and
- can be filled at any time.
+ - The account can receive (but not send) payments and path payments in the
+ asset, up to the `limit` of the trustline.
- * The account can use `MANAGE_OFFER` to reduce the `amount` of an
- offer involving the asset (including canceling the offer with
- and `amount` of 0), but not to increase the offer.
+ However, any other operation on the asset requires that the transaction be
+ co-signed by the asset issuer at low threshold. Moreover, if the account
+ increases the `limit` on the trustline and the transaction is not cosigned by
+ the issuer, then the `COAUTHORIZED_FLAG` is cleared so `(flags & 3)` goes
+ back to 0.
- * The account can receive (but not send) payments and path
- payments in the asset, up to the `limit` of the trustline.
-
- However, any other operation on the asset requires that the
- transaction be co-signed by the asset issuer at low threshold.
- Moreover, if the account increases the `limit` on the trustline and
- the transaction is not cosigned by the issuer, then the
- `COAUTHORIZED_FLAG` is cleared so `(flags & 3)` goes back to 0.
-
-* If `(flags & 3) == AUTHORIZED_FLAG`, then the account can perform
- arbitrary transactions on the asset, including sending payments to
- accounts with `COAUTHORIZED_FLAG`.
+- If `(flags & 3) == AUTHORIZED_FLAG`, then the account can perform arbitrary
+ transactions on the asset, including sending payments to accounts with
+ `COAUTHORIZED_FLAG`.
# Operation changes
-Implementing cosigned assets requires changes to two operations.
-Because an asset issuer's signature may be out of place on a
-transaction requiring asset issuer coauthorization, we add a `NopOp`
-operation that requires a signature from an arbitrary source account.
-Such an operation will be useful in other contexts, too, as currently
-pre-authorized transactions sometimes require ugly hacks like having
-an account send one Stroup to itself. Second, we have to modify
-`ALLOW_TRUST` to enable issuers to set the new `COAUTHORIZED_FLAG`.
+Implementing cosigned assets requires changes to two operations. Because an
+asset issuer's signature may be out of place on a transaction requiring asset
+issuer coauthorization, we add a `NopOp` operation that requires a signature
+from an arbitrary source account. Such an operation will be useful in other
+contexts, too, as currently pre-authorized transactions sometimes require ugly
+hacks like having an account send one Stroup to itself. Second, we have to
+modify `ALLOW_TRUST` to enable issuers to set the new `COAUTHORIZED_FLAG`.
## `NO_OPERATION` (`NopOp`)
-The `NO_OPERATION` operation does nothing, but requires a valid
-signature from the operation's source account. Since the default
-source account is the source of the transaction, this will always
-succeed at low threshold if the operation's `sourceAccount` is
-`NULL`. However, it can be used to require a low threshold signature
-from an asset issuer.
+The `NO_OPERATION` operation does nothing, but requires a valid signature from
+the operation's source account. Since the default source account is the source
+of the transaction, this will always succeed at low threshold if the
+operation's `sourceAccount` is `NULL`. However, it can be used to require a low
+threshold signature from an asset issuer.
```c
enum NopOp {
@@ -153,18 +144,17 @@ default:
};
```
-`NopOp` is not strictly necessary, as a redundant `ALLOW_TRUST` can be
-used instead, but having `NopOp` is cleaner and has other applications
-to pre-authorized transactions.
+`NopOp` is not strictly necessary, as a redundant `ALLOW_TRUST` can be used
+instead, but having `NopOp` is cleaner and has other applications to
+pre-authorized transactions.
## `ALLOW_TRUST` flags
-Only one small change is required to `AllowTrustOp`. We change the
-`authorize` field from a `bool` to a `uint32`. This provides binary
-compatibility with clients that do not know about the
-`COAUTHORIZED_FLAG` (since `AUTHORIZED_FLAG == TRUE` in XDR), but
-having a `uint32` provides the additional option of setting it to
-`COAUTHORIZED_FLAG`.
+Only one small change is required to `AllowTrustOp`. We change the `authorize`
+field from a `bool` to a `uint32`. This provides binary compatibility with
+clients that do not know about the `COAUTHORIZED_FLAG` (since
+`AUTHORIZED_FLAG == TRUE` in XDR), but having a `uint32` provides the
+additional option of setting it to `COAUTHORIZED_FLAG`.
```c
struct AllowTrustOp
diff --git a/core/cap-0017.md b/core/cap-0017.md
index 1d044bcfb..b602616dd 100644
--- a/core/cap-0017.md
+++ b/core/cap-0017.md
@@ -11,49 +11,64 @@ Protocol version: TBD
```
## Simple Summary
-This proposal makes specifying and implementing the protocol easier by simplifying the semantics of
-`LedgerEntry.lastModifiedLedgerSeq`.
+
+This proposal makes specifying and implementing the protocol easier by
+simplifying the semantics of `LedgerEntry.lastModifiedLedgerSeq`.
## Abstract
-It is currently possible for `LedgerEntry.lastModifiedLedgerSeq` to be updated by an operation even
-if that operation did not make any modifications to the underlying data of that `LedgerEntry`. This
-document proposes changing the semantics of `lastModifiedLedgerSeq` to be updated by an operation
-if and only if the underlying data of that `LedgerEntry` has changed.
+
+It is currently possible for `LedgerEntry.lastModifiedLedgerSeq` to be updated
+by an operation even if that operation did not make any modifications to the
+underlying data of that `LedgerEntry`. This document proposes changing the
+semantics of `lastModifiedLedgerSeq` to be updated by an operation if and only
+if the underlying data of that `LedgerEntry` has changed.
## Motivation
-The main benefit of this proposal is that implementing the protocol would be simpler. The current
-behavior forces implementers to carefully manage whether `lastModifiedLedgerSeq` should or should
-not be updated in any given circumstance. The only reliable way for an implementer to determine
-when `lastModifiedLedgerSeq` should be updated in the current protocol version is by referring to
-an existing implementation. This proposal would unambiguously specify when `lastModifiedLedgerSeq`
-should and should not be updated.
-
-For example, using `AllowTrust` to authorize a trustline that is already authorized will update the
-`lastModifiedLedgerSeq` of the trustline without making any other modifications. In contrast, a
-tentative winner of `Inflation` that ends up winning nothing (due to, for example, having excessive
-native asset liabilities) is unchanged and does not receive an updated `lastModifiedLedgerSeq`.
+
+The main benefit of this proposal is that implementing the protocol would be
+simpler. The current behavior forces implementers to carefully manage whether
+`lastModifiedLedgerSeq` should or should not be updated in any given
+circumstance. The only reliable way for an implementer to determine when
+`lastModifiedLedgerSeq` should be updated in the current protocol version is by
+referring to an existing implementation. This proposal would unambiguously
+specify when `lastModifiedLedgerSeq` should and should not be updated.
+
+For example, using `AllowTrust` to authorize a trustline that is already
+authorized will update the `lastModifiedLedgerSeq` of the trustline without
+making any other modifications. In contrast, a tentative winner of `Inflation`
+that ends up winning nothing (due to, for example, having excessive native
+asset liabilities) is unchanged and does not receive an updated
+`lastModifiedLedgerSeq`.
## Specification
-The value of `lastModifiedLedgerSeq` should be updated by an operation if and only if the net
-effect of that operation includes updating the underlying data of a `LedgerEntry`. Specifically, if
-`leBefore` and `leAfter` are the `LedgerEntry` (excluding `lastModifiedLedgerSeq`) before and after
-the operation has been applied, respectively, then `lastModifiedLedgerSeq` should be updated to
-`LedgerHeader.ledgerSeq` if and only if `leBefore != leAfter`.
+
+The value of `lastModifiedLedgerSeq` should be updated by an operation if and
+only if the net effect of that operation includes updating the underlying data
+of a `LedgerEntry`. Specifically, if `leBefore` and `leAfter` are the
+`LedgerEntry` (excluding `lastModifiedLedgerSeq`) before and after the
+operation has been applied, respectively, then `lastModifiedLedgerSeq` should
+be updated to `LedgerHeader.ledgerSeq` if and only if `leBefore != leAfter`.
## Rationale
-This proposal is to be applied at the operation level as it guarantees that if the net effect of
-any operation in a transaction set includes updating the underlying data of a `LedgerEntry` then
-the value of `lastModifiedLedgerSeq` will be updated. This behavior is desirable from an
-implementation perspective because it allows each operation to correctly determine whether
-`lastModifiedLedgerSeq` should be updated without any knowledge of whether the `LedgerEntry` has
-been or will be modified by other operations in the same transaction set.
+
+This proposal is to be applied at the operation level as it guarantees that if
+the net effect of any operation in a transaction set includes updating the
+underlying data of a `LedgerEntry` then the value of `lastModifiedLedgerSeq`
+will be updated. This behavior is desirable from an implementation perspective
+because it allows each operation to correctly determine whether
+`lastModifiedLedgerSeq` should be updated without any knowledge of whether the
+`LedgerEntry` has been or will be modified by other operations in the same
+transaction set.
## Backwards Compatibility
-Downstream systems which rely on `lastModifiedLedgerSeq` could be effected as the semantics of that
-field will change.
+
+Downstream systems which rely on `lastModifiedLedgerSeq` could be effected as
+the semantics of that field will change.
## Test Cases
+
None yet.
## Implementation
+
No implementation yet.
diff --git a/core/cap-0018.md b/core/cap-0018.md
index 13e4e3394..6f8e2bbfc 100644
--- a/core/cap-0018.md
+++ b/core/cap-0018.md
@@ -11,18 +11,41 @@ Protocol version: 13
```
## Simple Summary
-This proposal provides issuers with a level of authorization between unauthorized and fully authorized. This level of authorization will allow a trustline to maintain liabilities (see CAP-0003) without permitting any other operations.
+
+This proposal provides issuers with a level of authorization between
+unauthorized and fully authorized. This level of authorization will allow a
+trustline to maintain liabilities (see CAP-0003) without permitting any other
+operations.
## Abstract
-This proposal adds a new flag to `TrustLineFlags` which offers a level of authorization intermediate between unauthorized and fully authorized. It is then shown how this new flag enables similar behavior to CAP-0016 by carefully setting and toggling the level of authorization. Additional flags could also be added, which would enable fine-grained control of authorization on a per-account basis.
-## Motivation
-A number of Stellar asset issuers need greater control over assets than simply authorizing particular accounts to hold the asset. The `TrustLineEntry.flags` field allows an issuer to toggle between the following two states:
+This proposal adds a new flag to `TrustLineFlags` which offers a level of
+authorization intermediate between unauthorized and fully authorized. It is
+then shown how this new flag enables similar behavior to CAP-0016 by carefully
+setting and toggling the level of authorization. Additional flags could also be
+added, which would enable fine-grained control of authorization on a
+per-account basis.
-- `TrustLineEntry.flags == 0`: The account can hold a balance but cannot receive payments, send payments, maintain offers, or manage offers
-- `TrustLineEntry.flags == AUTHORIZED_FLAG`: The account can hold a balance, receive payments, send payments, maintain offers, and manage offers
+## Motivation
-If the issuer does not intend to allow the account to maintain offers, then toggling between the two states described above can be used to control what accounts do with their assets. To achieve this, an issuer could leave accounts in the unauthorized state. In order to do anything with the assets an account holds, that account would need to become authorized. This would require the signature of the issuer. The owner of such an account could then request that the issuer sign a transaction; the issuer should refuse to sign any transaction that does not return the account to the unauthorized state. A transaction that an issuer might be willing to sign could look like
+A number of Stellar asset issuers need greater control over assets than simply
+authorizing particular accounts to hold the asset. The `TrustLineEntry.flags`
+field allows an issuer to toggle between the following two states:
+
+- `TrustLineEntry.flags == 0`: The account can hold a balance but cannot
+ receive payments, send payments, maintain offers, or manage offers
+- `TrustLineEntry.flags == AUTHORIZED_FLAG`: The account can hold a balance,
+ receive payments, send payments, maintain offers, and manage offers
+
+If the issuer does not intend to allow the account to maintain offers, then
+toggling between the two states described above can be used to control what
+accounts do with their assets. To achieve this, an issuer could leave accounts
+in the unauthorized state. In order to do anything with the assets an account
+holds, that account would need to become authorized. This would require the
+signature of the issuer. The owner of such an account could then request that
+the issuer sign a transaction; the issuer should refuse to sign any transaction
+that does not return the account to the unauthorized state. A transaction that
+an issuer might be willing to sign could look like
- Operation 1: Issuer uses `AllowTrust` to fully authorize account A, asset X
- Operation 2: Issuer uses `AllowTrust` to fully authorize account B, asset X
@@ -32,14 +55,25 @@ If the issuer does not intend to allow the account to maintain offers, then togg
which would require the signatures of the issuer and account A.
-But this approach does not work if the account should be allowed to maintain offers. As soon as the account becomes unauthorized, all of its offers will be removed from the ledger. So some issuers have taken a more drastic approach, where the issuer is actually a signer on the asset holders' accounts. This solution has several drawbacks:
+But this approach does not work if the account should be allowed to maintain
+offers. As soon as the account becomes unauthorized, all of its offers will be
+removed from the ledger. So some issuers have taken a more drastic approach,
+where the issuer is actually a signer on the asset holders' accounts. This
+solution has several drawbacks:
-1. The owner of an account cannot unilaterally utilize assets unrelated to the issuer without the signature of that issuer
-2. If multiple issuers want to become signers on a single account, then many signatures are potentially required for simple operations
-3. Issuers cannot easily rotate keys, because their keys are on every account holding their asset
+1. The owner of an account cannot unilaterally utilize assets unrelated to the
+ issuer without the signature of that issuer
+2. If multiple issuers want to become signers on a single account, then many
+ signatures are potentially required for simple operations
+3. Issuers cannot easily rotate keys, because their keys are on every account
+ holding their asset
## Specification
-This proposal requires the addition of `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` to `TrustLineFlags`, so the XDR becomes
+
+This proposal requires the addition of
+`AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` to `TrustLineFlags`, so the XDR
+becomes
+
```c++
enum TrustLineFlags
{
@@ -51,7 +85,13 @@ enum TrustLineFlags
const MASK_TRUSTLINE_FLAGS = 3;
```
-In order to interact with these flags, the type of the `authorize` field of `AllowTrustOp` must be changed from `bool` to `uint32`. This provides binary compatibility with clients that do not know about the new flags (since `AUTHORIZED_FLAG == TRUE` in XDR), but having a `uint32` provides the additional option of setting it to any combination of `TrustLineFlags`. The XDR becomes
+In order to interact with these flags, the type of the `authorize` field of
+`AllowTrustOp` must be changed from `bool` to `uint32`. This provides binary
+compatibility with clients that do not know about the new flags (since
+`AUTHORIZED_FLAG == TRUE` in XDR), but having a `uint32` provides the
+additional option of setting it to any combination of `TrustLineFlags`. The XDR
+becomes
+
```c++
struct AllowTrustOp
{
@@ -74,60 +114,121 @@ struct AllowTrustOp
};
```
-The new behavior of `ALLOW_TRUST` is to set `flags = authorize` on the relevant trustline. Semantically, this is equivalent to the existing behavior of `ALLOW_TRUST` because
-
-- `authorize == FALSE` becomes `authorize == 0` which removes any authorization from the trustline
-- `authorize == TRUE` becomes `authorize == AUTHORIZED_FLAG` which makes the trustline fully authorized
-
-
-The combination `AUTHORIZED_FLAG | AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` is not valid because `AUTHORIZED_FLAG` implies `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG`. Trying to set `authorize` to any value above `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` is also invalid. `ALLOW_TRUST_MALFORMED` is the error code that will be returned.
-
-If `AUTH_REVOCABLE_FLAG` is not set, the transition from `AUTHORIZED_FLAG` to `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` is invalid. `ALLOW_TRUST_CANT_REVOKE` is the error code that will be returned.
-
-
-If a trustline `tl` has `tl.flags == AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` then
-
-1. `tl.balance > 0` is permitted (this is always true for any value of `tl.flags`)
-2. `tl.liabilities.buying > 0` is permitted (this is not true if `tl.flags == 0`)
-3. `tl.liabilities.selling > 0` is permitted (this is not true if `tl.flags == 0`)
-4. `tl.balance` can only change if an existing liability is converted into a balance. (this is not true if `tl.flags == AUTHORIZED`)
-5. No operation which creates or modifies an offer buying or selling `tl.asset` is permitted but deleting such offers is permitted. (this is not true if `tl.flags == AUTHORIZED`)
-6. Upgrades should treat `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` the same way as `AUTHORIZED_FLAG` because liabilities can only be decreased on upgrades (since protocol version 11). An invariant for this should be added.
+The new behavior of `ALLOW_TRUST` is to set `flags = authorize` on the relevant
+trustline. Semantically, this is equivalent to the existing behavior of
+`ALLOW_TRUST` because
+
+- `authorize == FALSE` becomes `authorize == 0` which removes any authorization
+ from the trustline
+- `authorize == TRUE` becomes `authorize == AUTHORIZED_FLAG` which makes the
+ trustline fully authorized
+
+The combination `AUTHORIZED_FLAG | AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` is
+not valid because `AUTHORIZED_FLAG` implies
+`AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG`. Trying to set `authorize` to any
+value above `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` is also invalid.
+`ALLOW_TRUST_MALFORMED` is the error code that will be returned.
+
+If `AUTH_REVOCABLE_FLAG` is not set, the transition from `AUTHORIZED_FLAG` to
+`AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` is invalid. `ALLOW_TRUST_CANT_REVOKE`
+is the error code that will be returned.
+
+If a trustline `tl` has `tl.flags == AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG`
+then
+
+1. `tl.balance > 0` is permitted (this is always true for any value of
+ `tl.flags`)
+2. `tl.liabilities.buying > 0` is permitted (this is not true if
+ `tl.flags == 0`)
+3. `tl.liabilities.selling > 0` is permitted (this is not true if
+ `tl.flags == 0`)
+4. `tl.balance` can only change if an existing liability is converted into a
+ balance. (this is not true if `tl.flags == AUTHORIZED`)
+5. No operation which creates or modifies an offer buying or selling `tl.asset`
+ is permitted but deleting such offers is permitted. (this is not true if
+ `tl.flags == AUTHORIZED`)
+6. Upgrades should treat `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` the same way
+ as `AUTHORIZED_FLAG` because liabilities can only be decreased on upgrades
+ (since protocol version 11). An invariant for this should be added.
## Rationale
### How can this new flag be used?
-Using `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` enables issuers to control what accounts do with their assets even if the accounts should be allowed to maintain offers. To achieve this, an issuer could leave accounts in the `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` state. Such an account is allowed to maintain liabilities, meaning it can own offers but cannot otherwise do anything with the asset. In order to do anything with the assets an account holds, that account would need to become authorized. This would require the signature of the issuer. The owner of such an account could then request that the issuer sign a transaction; the issuer should refuse to sign any transaction that does not return the account to the `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` state. A transaction that an issuer might be willing to sign could look like
+
+Using `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` enables issuers to control what
+accounts do with their assets even if the accounts should be allowed to
+maintain offers. To achieve this, an issuer could leave accounts in the
+`AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` state. Such an account is allowed to
+maintain liabilities, meaning it can own offers but cannot otherwise do
+anything with the asset. In order to do anything with the assets an account
+holds, that account would need to become authorized. This would require the
+signature of the issuer. The owner of such an account could then request that
+the issuer sign a transaction; the issuer should refuse to sign any transaction
+that does not return the account to the
+`AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` state. A transaction that an issuer
+might be willing to sign could look like
- Operation 1: Issuer uses `AllowTrust` to fully authorize account A, asset X
- Operation 2: Issuer uses `AllowTrust` to fully authorize account B, asset X
- Operation 3: Payment from A to B
-- Operation 4: Issuer uses `AllowTrust` to set account B, asset X to `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` state
-- Operation 5: Issuer uses `AllowTrust` to set account A, asset X to `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` state
+- Operation 4: Issuer uses `AllowTrust` to set account B, asset X to
+ `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` state
+- Operation 5: Issuer uses `AllowTrust` to set account A, asset X to
+ `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` state
or
- Operation 1: Issuer uses `AllowTrust` to fully authorize account A, asset X
- Operation 2: Account A manages offer to buy or sell X
-- Operation 3: Issuer uses `AllowTrust` to set account A, asset X to `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` state
+- Operation 3: Issuer uses `AllowTrust` to set account A, asset X to
+ `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` state
both of which would require the signatures of the issuer and account A.
-In comparison to CAP-0016 this proposal would prohibit a fully authorized account from sending to an account in the `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` state without the signature of the issuer, which could be a desirable feature because it means issuers entirely control their asset. But this solution is flexible in that it would be possible to add other flags to `TrustLineFlags` which would control other aspects of authorization. For example, one could add the flag `AUTHORIZED_TO_INCREASE_BALANCE_FLAG` which would permit an account to receive payments. The behavior of CAP-0016 could be emulated entirely with this additional flag. An appropriate set of such flags would provide issuers with fine-grained control of authorization on a per-account basis, which justifies the name of this proposal.
+In comparison to CAP-0016 this proposal would prohibit a fully authorized
+account from sending to an account in the
+`AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` state without the signature of the
+issuer, which could be a desirable feature because it means issuers entirely
+control their asset. But this solution is flexible in that it would be possible
+to add other flags to `TrustLineFlags` which would control other aspects of
+authorization. For example, one could add the flag
+`AUTHORIZED_TO_INCREASE_BALANCE_FLAG` which would permit an account to receive
+payments. The behavior of CAP-0016 could be emulated entirely with this
+additional flag. An appropriate set of such flags would provide issuers with
+fine-grained control of authorization on a per-account basis, which justifies
+the name of this proposal.
### Why requirement (5)?
+
Note that requirement (5) implies but is not equivalent to
6. No operation which increases `tl.liabilities.buying` is permitted
7. No operation which increases `tl.liabilities.selling` is permitted
-Although requirements (6) and (7) seem more natural than requirement (5), since they refer only to individual values in the ledger like the other requirements, they are actually insufficient. If requirements (6) and (7) were included instead of (5) then it would be possible for account A to modify an offer selling asset X for asset Y and change it into an offer selling asset X for asset Z without a signature from the issuers of X or Y even if A is only `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` for one or both of those assets. This would allow accounts to easily evade restrictions from the issuer on which assets can be exchanged for X. Requirements (6) and (7) would also make it possible for accounts to change the price of offers without a signature from the relevant issuer or issuers.
+Although requirements (6) and (7) seem more natural than requirement (5), since
+they refer only to individual values in the ledger like the other requirements,
+they are actually insufficient. If requirements (6) and (7) were included
+instead of (5) then it would be possible for account A to modify an offer
+selling asset X for asset Y and change it into an offer selling asset X for
+asset Z without a signature from the issuers of X or Y even if A is only
+`AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` for one or both of those assets. This
+would allow accounts to easily evade restrictions from the issuer on which
+assets can be exchanged for X. Requirements (6) and (7) would also make it
+possible for accounts to change the price of offers without a signature from
+the relevant issuer or issuers.
## Backwards Compatibility
-The only backwards incompatibility with this proposal is if downstream systems relied on the fact that `!(TrustLineEntry.flags & AUTHORIZED_FLAG)` implies unauthorized. In this case, downstream systems might erroneously conclude that an account is unauthorized when it is in fact in the `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` state.
+
+The only backwards incompatibility with this proposal is if downstream systems
+relied on the fact that `!(TrustLineEntry.flags & AUTHORIZED_FLAG)` implies
+unauthorized. In this case, downstream systems might erroneously conclude that
+an account is unauthorized when it is in fact in the
+`AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` state.
## Test Cases
+
None yet.
## Implementation
+
https://github.com/stellar/stellar-core/pull/2387
diff --git a/core/cap-0019.md b/core/cap-0019.md
index 983c07a26..d7d71f1c6 100644
--- a/core/cap-0019.md
+++ b/core/cap-0019.md
@@ -12,43 +12,41 @@ Protocol version: 13
## Simple Summary
-Allow future extensibility of transaction types by making
-`TransactionEnvelope` contain a union.
+Allow future extensibility of transaction types by making `TransactionEnvelope`
+contain a union.
## Abstract
-`TransactionEnvelope` now contains a union for the transaction type.
-For binary compatibility, legacy transactions are now of type
-`Transaction0` and can have an Ed25519 account ID only.
+`TransactionEnvelope` now contains a union for the transaction type. For binary
+compatibility, legacy transactions are now of type `Transaction0` and can have
+an Ed25519 account ID only.
## Motivation
-Right now, it will be very difficult to upgrade the `Transaction` type
-in a backwards compatible way. However, until we add a new account
-type, we can take advantage of the fact that all `Transaction`
-structures, when marshaled, start with a 0-valued 32-bit integer (part
-of the `AccountID` which can only be a `PublicKey` or type
-`PUBLIC_KEY_TYPE_ED25519`). We make `TransactionEnvelope` contain a
-union, and when the discriminant is 0, we have it contain a new type
+Right now, it will be very difficult to upgrade the `Transaction` type in a
+backwards compatible way. However, until we add a new account type, we can take
+advantage of the fact that all `Transaction` structures, when marshaled, start
+with a 0-valued 32-bit integer (part of the `AccountID` which can only be a
+`PublicKey` or type `PUBLIC_KEY_TYPE_ED25519`). We make `TransactionEnvelope`
+contain a union, and when the discriminant is 0, we have it contain a new type
`Transaction0` which is basically a `Transaction` with the
`PUBLIC_KEY_TYPE_ED25519` stripped off the beginning and the key type
hard-coded to Ed25519.
## Specification
-We need only two changes. `EnvelopeType` conveniently didn't have a
-0, so we now allocate that for legacy transaction envelopes. We then
-add a union inside `TransactionEnvelope`. Note that
-`TransactionSignaturePayload` already had a union, so we don't need to
-change it. In particularly, we *don't* add a case for
-`ENVELOPE_TYPE_TX0` because we don't want there to be multiple ways to
+We need only two changes. `EnvelopeType` conveniently didn't have a 0, so we
+now allocate that for legacy transaction envelopes. We then add a union inside
+`TransactionEnvelope`. Note that `TransactionSignaturePayload` already had a
+union, so we don't need to change it. In particularly, we _don't_ add a case
+for `ENVELOPE_TYPE_TX0` because we don't want there to be multiple ways to
compute a transaction ID.
-We expect implementations to provide a helper function for converting
-a `Transaction0` into a `Transaction`, as doing so is necessary for
-computing the transaction ID.
+We expect implementations to provide a helper function for converting a
+`Transaction0` into a `Transaction`, as doing so is necessary for computing the
+transaction ID.
-~~~~ {.c}
+```{.c}
enum EnvelopeType
{
ENVELOPE_TYPE_TX0 = 0, // new
@@ -88,31 +86,28 @@ case ENVELOPE_TYPE_TX:
DecoratedSignature signatures<20>;
} v1;
};
-~~~~
+```
## Rationale
-There have been a number of proposals to change the transaction
-format, or add different types of transaction. We have also discussed
-new `AccountID` types. Though we haven't decided on which proposals
-to adopt, changing the transaction format will become much harder if
-we do so after adopting a new `AccountID` type, so we might as well
-make this union change now.
+There have been a number of proposals to change the transaction format, or add
+different types of transaction. We have also discussed new `AccountID` types.
+Though we haven't decided on which proposals to adopt, changing the transaction
+format will become much harder if we do so after adopting a new `AccountID`
+type, so we might as well make this union change now.
## Backwards Compatibility
-The changes are backwards compatible with legacy binary transactions.
-The new code will be able to read old transactions, but old code
-cannot read the new `TransactionEnvelope` type. Hence, a phased
-deployment makes sense.
+The changes are backwards compatible with legacy binary transactions. The new
+code will be able to read old transactions, but old code cannot read the new
+`TransactionEnvelope` type. Hence, a phased deployment makes sense.
## Example
-Imagine we want to add a new `feeSource` field to transactions. We
-can simply introduce a new type `Transaction4` that contains this
-extra field, then:
+Imagine we want to add a new `feeSource` field to transactions. We can simply
+introduce a new type `Transaction4` that contains this extra field, then:
-~~~~ {.c}
+```{.c}
enum EnvelopeType
{
ENVELOPE_TYPE_TX0 = 0,
@@ -160,7 +155,7 @@ case ENVELOPE_TYPE_TX4:
DecoratedSignature signatures<20>;
} v4;
};
-~~~~
+```
## Implementation
diff --git a/core/cap-0020.md b/core/cap-0020.md
index 2a0a625fb..bfdb2db35 100644
--- a/core/cap-0020.md
+++ b/core/cap-0020.md
@@ -12,78 +12,90 @@ Protocol version: 11
## Simple Summary
-This CAP extends the `BucketEntryType` enum with two new values called `INITENTRY` and `METAENTRY`,
-and adds a new `metaEntry` field in the `BucketEntry` union.
-
-The semantics of the `INITENTRY` value are identical to the semantics of the `LIVEENTRY` value (and
-reuse the `liveEntry` field) except that `INITENTRY` also denotes the entry that is the
-(chronologically) first copy of the entry in any live range of the entry in the bucket list. In
-other words: a bucket entry marked `INITENTRY` implies that either no entry with the same ledger key
-exists in an older bucket, or else that the (chronologically) preceding entry with the same ledger
-key was `DEADENTRY`.
-
-The purpose of the `METAENTRY` type is to record the ledger protocol version in buckets and, in
-particular, to facilitate switching merge algorithm to accommodate buckets with `INITENTRY`
-semantics. Therefore the `METAENTRY` entry type (or something like it) is a prerequisite for the
-`INITENTRY` type.
+This CAP extends the `BucketEntryType` enum with two new values called
+`INITENTRY` and `METAENTRY`, and adds a new `metaEntry` field in the
+`BucketEntry` union.
+
+The semantics of the `INITENTRY` value are identical to the semantics of the
+`LIVEENTRY` value (and reuse the `liveEntry` field) except that `INITENTRY`
+also denotes the entry that is the (chronologically) first copy of the entry in
+any live range of the entry in the bucket list. In other words: a bucket entry
+marked `INITENTRY` implies that either no entry with the same ledger key exists
+in an older bucket, or else that the (chronologically) preceding entry with the
+same ledger key was `DEADENTRY`.
+
+The purpose of the `METAENTRY` type is to record the ledger protocol version in
+buckets and, in particular, to facilitate switching merge algorithm to
+accommodate buckets with `INITENTRY` semantics. Therefore the `METAENTRY` entry
+type (or something like it) is a prerequisite for the `INITENTRY` type.
## Abstract
-The ledger state in stellar-core is maintained in a special-purpose log-structured merge (LSM) tree
-called the bucket list. The design of this data structure is part of the overall protocol and its
-contents are defined in the protocol XDR files. This CAP proposes a very slight change to one of the
-constituent datatypes in the LSM tree to address a performance problem observed in the field.
-
-The current bucket list consists of a set of entries that can be in one of two states: **live** or
-**dead**. Dead entries (so-called "tombstones") exist strictly to override the live-ness of a live
-entry in an older level of the bucket list. If a tombstone is present in the bucket list for which
-no live entry exists in an older level, that tombstone is **redundant**: there is nothing for it to
-override.
-
-The original design of the bucket list assumed that most entries in the ledger would be relatively
-long-lived (such as accounts) and therefore the presence of redundant tombstones would not be a
-major performance issue. In practice, we observe that the great majority of ledger entries have
-turned out to be short-lived (primarily offers) and therefore buckets have accumulated many
+The ledger state in stellar-core is maintained in a special-purpose
+log-structured merge (LSM) tree called the bucket list. The design of this data
+structure is part of the overall protocol and its contents are defined in the
+protocol XDR files. This CAP proposes a very slight change to one of the
+constituent datatypes in the LSM tree to address a performance problem observed
+in the field.
+
+The current bucket list consists of a set of entries that can be in one of two
+states: **live** or **dead**. Dead entries (so-called "tombstones") exist
+strictly to override the live-ness of a live entry in an older level of the
+bucket list. If a tombstone is present in the bucket list for which no live
+entry exists in an older level, that tombstone is **redundant**: there is
+nothing for it to override.
+
+The original design of the bucket list assumed that most entries in the ledger
+would be relatively long-lived (such as accounts) and therefore the presence of
+redundant tombstones would not be a major performance issue. In practice, we
+observe that the great majority of ledger entries have turned out to be
+short-lived (primarily offers) and therefore buckets have accumulated many
redundant tombstones.
-The change in this CAP will eliminate the conditions that create redundant tombstones. They will
-then gradually be merged-out of the set of live buckets, as the ledger evolves and buckets are
-merged. Within a few months, the accumulated redundant tombstones will be eliminated from live
-buckets. Historical buckets containing redundant tombstones will remain, but new buckets will be
-**much** smaller.
+The change in this CAP will eliminate the conditions that create redundant
+tombstones. They will then gradually be merged-out of the set of live buckets,
+as the ledger evolves and buckets are merged. Within a few months, the
+accumulated redundant tombstones will be eliminated from live buckets.
+Historical buckets containing redundant tombstones will remain, but new buckets
+will be **much** smaller.
## Motivation
-At present, buckets in the existing network are overwhelmingly composed of redundant tombstones for
-short-lived offers. As a typical example, a recent second-from-oldest-level bucket contains 5.9m
-redundant offer tombstones and 850k live entries, of which only 4.7k are for live offers. These
-tombstones incur a significant performance cost in a variety of day-to-day operations of the
-network, including point-in-time catchup and regular bucket merge operations during transaction
-processing.
-
-The buildup of redundant tombstones occurs because the existing merge algorithm conservatively
-preserves tombstones until the final level of the bucket list. This conservative behavior is
-required because the existing representation of live entries in buckets does not indicate whether a
-live entry is the _oldest_ existing entry with a given key; the merge algorithm must therefore assume
-the possibility that any tombstone may be shadowing some other live entry in an older bucket, and
-preserve all tombstones.
-
-If the merge algorithm could determine that a tombstone was being merged with the oldest entry with
-a given key -- if it were being merged with a live entry that was marked as the _initial_ entry with
-its key -- then the merge algorithm could discard both the "initial" live entry and the tombstone,
-accumulating zero space in the merged bucket. This is exactly what this CAP proposes to do.
+At present, buckets in the existing network are overwhelmingly composed of
+redundant tombstones for short-lived offers. As a typical example, a recent
+second-from-oldest-level bucket contains 5.9m redundant offer tombstones and
+850k live entries, of which only 4.7k are for live offers. These tombstones
+incur a significant performance cost in a variety of day-to-day operations of
+the network, including point-in-time catchup and regular bucket merge
+operations during transaction processing.
+
+The buildup of redundant tombstones occurs because the existing merge algorithm
+conservatively preserves tombstones until the final level of the bucket list.
+This conservative behavior is required because the existing representation of
+live entries in buckets does not indicate whether a live entry is the _oldest_
+existing entry with a given key; the merge algorithm must therefore assume the
+possibility that any tombstone may be shadowing some other live entry in an
+older bucket, and preserve all tombstones.
+
+If the merge algorithm could determine that a tombstone was being merged with
+the oldest entry with a given key -- if it were being merged with a live entry
+that was marked as the _initial_ entry with its key -- then the merge algorithm
+could discard both the "initial" live entry and the tombstone, accumulating
+zero space in the merged bucket. This is exactly what this CAP proposes to do.
## Specification
-The ledger protocol number will be increased, to version TBD. This is a breaking change. Merging
-buckets in a ledger of the newer protocol version (and beyond) will need to use a revised merge
-algorithm (see below).
+The ledger protocol number will be increased, to version TBD. This is a
+breaking change. Merging buckets in a ledger of the newer protocol version (and
+beyond) will need to use a revised merge algorithm (see below).
### XDR changes
-The `BucketEntryType` enum will be extended with two new values: `INITENTRY` and `METAENTRY`.
+The `BucketEntryType` enum will be extended with two new values: `INITENTRY`
+and `METAENTRY`.
-The `BucketEntry` union will be extended with a corresponding new state `metaEntry` of type `MetaEntry`.
+The `BucketEntry` union will be extended with a corresponding new state
+`metaEntry` of type `MetaEntry`.
The updated relevant XDR definitions follow:
@@ -127,93 +139,102 @@ case METAENTRY:
### Bucket content changes
-Under protocol TBD, new buckets will have a single METAENTRY written as their first entry, which
-carries metadata that indicates the conditions under which the bucket was formed: currently only its
-protocol version. Absence of a METAENTRY implies that the bucket originates from before protocol
-version TBD.
+Under protocol TBD, new buckets will have a single METAENTRY written as their
+first entry, which carries metadata that indicates the conditions under which
+the bucket was formed: currently only its protocol version. Absence of a
+METAENTRY implies that the bucket originates from before protocol version TBD.
-Under protocol TBD, the semantics of adding a live entry to a fresh bucket at the head of the bucket
-list will be changed to reflect the lifecycle of the live entry: if the entry is being added because
-it is **new** (as the result of a creation event), it must be added in `INITENTRY` state rather than
-the `LIVEENTRY` state. If the entry is being added because of an **update** to the same key, the
-entry must be added in `LIVEENTRY` state.
+Under protocol TBD, the semantics of adding a live entry to a fresh bucket at
+the head of the bucket list will be changed to reflect the lifecycle of the
+live entry: if the entry is being added because it is **new** (as the result of
+a creation event), it must be added in `INITENTRY` state rather than the
+`LIVEENTRY` state. If the entry is being added because of an **update** to the
+same key, the entry must be added in `LIVEENTRY` state.
### Protocol version changes
-Any merge will be performed under the maximum protocol version of all of the input buckets and
-shadow buckets involved in the merge. If the protocol version selected by inspecting buckets
-involved in a merge exceeds the protocol version of the ledger at which the merge is being
-started, the merge will fail with an error.
+Any merge will be performed under the maximum protocol version of all of the
+input buckets and shadow buckets involved in the merge. If the protocol version
+selected by inspecting buckets involved in a merge exceeds the protocol version
+of the ledger at which the merge is being started, the merge will fail with an
+error.
### Merge algorithm changes
-Under protocol TBD, the semantics of merging buckets will be changed to the following:
-
- - If two entries have different keys, emit the lower-ordered one as we do today.
- - If the two entries have the same key and neither is `INITENTRY`, take the newer entry as we do
- today.
- - If the two entries have the same key and at least one is `INITENTRY`:
- - If the newer entry is of type `DEADENTRY`, output nothing. Consider both entries annihilated.
- - If the newer entry is of type `LIVEENTRY`, output an entry in `INITENTRY` state with the value
- of the newer `LIVEENTRY` entry.
- - If the newer entry is of type `INITENTRY`:
- - If the older entry is of type `DEADENTRY`, output an entry in `LIVEENTRY` state with the value
- of the newer `INITENTRY` entry.
- - Otherwise signal an error: an `INITENTRY` should never be the next lifecycle state for an
- entry in `INITENTRY` or `LIVEENTRY` state.
-
-
-The following table summarizes the rules for merging entries in `INITENTRY` state:
-
-
- | old | new | result |
- |---------------|--------------|---------------|
- | INITENTRY | DEADENTRY | empty |
- | INITENTRY=x | LIVEENTRY=y | INITENTRY=y |
- | DEADENTRY | INITENTRY=x | LIVEENTRY=x |
- | INITENTRY | INITENTRY | error |
- | LIVEENTRY | INITENTRY | error |
+Under protocol TBD, the semantics of merging buckets will be changed to the
+following:
+
+- If two entries have different keys, emit the lower-ordered one as we do
+ today.
+- If the two entries have the same key and neither is `INITENTRY`, take the
+ newer entry as we do today.
+- If the two entries have the same key and at least one is `INITENTRY`:
+ - If the newer entry is of type `DEADENTRY`, output nothing. Consider both
+ entries annihilated.
+ - If the newer entry is of type `LIVEENTRY`, output an entry in `INITENTRY`
+ state with the value of the newer `LIVEENTRY` entry.
+ - If the newer entry is of type `INITENTRY`:
+ - If the older entry is of type `DEADENTRY`, output an entry in `LIVEENTRY`
+ state with the value of the newer `INITENTRY` entry.
+ - Otherwise signal an error: an `INITENTRY` should never be the next
+ lifecycle state for an entry in `INITENTRY` or `LIVEENTRY` state.
+
+The following table summarizes the rules for merging entries in `INITENTRY`
+state:
+
+| old | new | result |
+| ----------- | ----------- | ----------- |
+| INITENTRY | DEADENTRY | empty |
+| INITENTRY=x | LIVEENTRY=y | INITENTRY=y |
+| DEADENTRY | INITENTRY=x | LIVEENTRY=x |
+| INITENTRY | INITENTRY | error |
+| LIVEENTRY | INITENTRY | error |
### Shadowed entry elision changes
-Under protocol TBD, the semantics of emitting shadowed entries during merges will be changed to
-preserve both `INITENTRY` and `DEADENTRY` entries. Only `LIVEENTRY` entries can be elided, in order
-to retain the lifecycle structure of each ledger key over time.
+Under protocol TBD, the semantics of emitting shadowed entries during merges
+will be changed to preserve both `INITENTRY` and `DEADENTRY` entries. Only
+`LIVEENTRY` entries can be elided, in order to retain the lifecycle structure
+of each ledger key over time.
## Rationale
-The existing bucket list does compact-out entries that have been shadowed at _newer_ levels; it was
-simply an oversight in the initial design to not compact-out tombstones that are redundant with
-respect to _older_ levels.
+The existing bucket list does compact-out entries that have been shadowed at
+_newer_ levels; it was simply an oversight in the initial design to not
+compact-out tombstones that are redundant with respect to _older_ levels.
-Such redundant tombstone compaction is a standard feature of LSM trees, for example see the logic in
-LevelDB:
+Such redundant tombstone compaction is a standard feature of LSM trees, for
+example see the logic in LevelDB:
https://github.com/google/leveldb/blob/master/db/db_impl.cc#L965-L974
-We could follow a similar approach to this code, using (for example) ledger sequence numbers and
-deducing that a given tombstone is not relevant based on the ledger number of the bucket it is being
-merged into; but this would be a more invasive (and more fragile) change. The proposed change in
-this CAP is the simplest possible that we could think of.
+We could follow a similar approach to this code, using (for example) ledger
+sequence numbers and deducing that a given tombstone is not relevant based on
+the ledger number of the bucket it is being merged into; but this would be a
+more invasive (and more fragile) change. The proposed change in this CAP is the
+simplest possible that we could think of.
## Backwards Compatibility
-This change will produce ledgers that contain buckets that have a `BucketEntryType` value
-(`INITENTRY`) that is unknown to older versions of stellar-core. If those versions try to process
-such a bucket, they will fail with an error, but this should not occur since they will fail earlier
-with a protocol-version mismatch error.
-
-Since each ledger indicates the protocol version in its header, new versions of stellar-core will be
-able to process both old and new buckets appropriately, enabling the new logic only when forming a
-bucket for a new-version ledger. It should not need to employ version-sensitive logic when _reading_
-buckets, since the old and new behavior are identical on old input buckets (those without
+This change will produce ledgers that contain buckets that have a
+`BucketEntryType` value (`INITENTRY`) that is unknown to older versions of
+stellar-core. If those versions try to process such a bucket, they will fail
+with an error, but this should not occur since they will fail earlier with a
+protocol-version mismatch error.
+
+Since each ledger indicates the protocol version in its header, new versions of
+stellar-core will be able to process both old and new buckets appropriately,
+enabling the new logic only when forming a bucket for a new-version ledger. It
+should not need to employ version-sensitive logic when _reading_ buckets, since
+the old and new behavior are identical on old input buckets (those without
`INITENTRY` entries).
-Semantically, very little will otherwise change: the high level _meaning_ of the bucket list will
-remain unchanged, as will be the contents of the active SQL database against-which transactions
-execute. Only the representation of entries in the bucket list will change: newly-created entries
-will be differentiated from updated entries, and redundant tombstones will thereby be compressed-out
-of buckets.
+Semantically, very little will otherwise change: the high level _meaning_ of
+the bucket list will remain unchanged, as will be the contents of the active
+SQL database against-which transactions execute. Only the representation of
+entries in the bucket list will change: newly-created entries will be
+differentiated from updated entries, and redundant tombstones will thereby be
+compressed-out of buckets.
## Test Cases
@@ -221,4 +242,5 @@ Extensive testcases will accompany the implementation.
## Implementation
-Prototype implementation is present in https://github.com/stellar/stellar-core/pull/1950
+Prototype implementation is present in
+https://github.com/stellar/stellar-core/pull/1950
diff --git a/core/cap-0021.md b/core/cap-0021.md
index c5c6686a0..f2d040c9e 100644
--- a/core/cap-0021.md
+++ b/core/cap-0021.md
@@ -15,64 +15,61 @@ Protocol version: 19
## Simple Summary
-This proposal generalizes the `timeBounds` field in `Transaction` to
-support other conditions, including conditions that relax sequence
-number checking and provide relative timelocks.
+This proposal generalizes the `timeBounds` field in `Transaction` to support
+other conditions, including conditions that relax sequence number checking and
+provide relative timelocks.
## Motivation
-Sequence numbers are tricky for anything other than simple payments.
-For instance, pre-authorized transactions can only execute when the
-source account has a specific sequence number. Worse yet, sequence
-numbers make it difficult for protocols such as payment channels to
-guarantee that one participant can execute a transaction signed by all
-participants. In general, an N-party protocol requires N auxiliary
-accounts, one for each participant; each logical transaction
-pre-signed by all N participants must actually be implemented as N
-pre-signed transactions using each auxiliary account at a source, so
-that one participant can still submit a pre-signed transaction even if
-another participant has changed the sequence number on a different
-auxiliary account. This is further complicated by the need to
-maintain a reserve balance on each auxiliary account.
+Sequence numbers are tricky for anything other than simple payments. For
+instance, pre-authorized transactions can only execute when the source account
+has a specific sequence number. Worse yet, sequence numbers make it difficult
+for protocols such as payment channels to guarantee that one participant can
+execute a transaction signed by all participants. In general, an N-party
+protocol requires N auxiliary accounts, one for each participant; each logical
+transaction pre-signed by all N participants must actually be implemented as N
+pre-signed transactions using each auxiliary account at a source, so that one
+participant can still submit a pre-signed transaction even if another
+participant has changed the sequence number on a different auxiliary account.
+This is further complicated by the need to maintain a reserve balance on each
+auxiliary account.
### Goals Alignment
-This proposal advances network scalability by facilitating off-chain
-payment channels. It advances security and simplicity and
-interoperability with other networks by enabling relative timelocks.
-Finally, the proposal makes it easier for developers to create highly
-usable products by enabling time-delayed key recovery.
+This proposal advances network scalability by facilitating off-chain payment
+channels. It advances security and simplicity and interoperability with other
+networks by enabling relative timelocks. Finally, the proposal makes it easier
+for developers to create highly usable products by enabling time-delayed key
+recovery.
## Abstract
-This proposal extends `AccountEntry` to keep track of the time and
-ledger number at which the account's sequence number was last changed.
-It also replaces the `timeBounds` field of `Transaction` with a union
-that allows more general transaction preconditions. One of these
-preconditions requires that the sequence number of `sourceAccount`
-have been modified at least some period of time in the past,
-effectively providing a relative timelock. Another precondition
-optionally weakens sequence number checking so as to allow a
+This proposal extends `AccountEntry` to keep track of the time and ledger
+number at which the account's sequence number was last changed. It also
+replaces the `timeBounds` field of `Transaction` with a union that allows more
+general transaction preconditions. One of these preconditions requires that the
+sequence number of `sourceAccount` have been modified at least some period of
+time in the past, effectively providing a relative timelock. Another
+precondition optionally weakens sequence number checking so as to allow a
transaction to execute when the `sourceAccount` is within some range.
## Specification
`AccountEntryExtensionV2`'s `ext` field is extended to keep track of
-`seqLedger` and `seqTime`--the ledger number and time at which the
-sequence number was set to its present value. These values are updated
-in two situations:
+`seqLedger` and `seqTime`--the ledger number and time at which the sequence
+number was set to its present value. These values are updated in two
+situations:
1. Transaction: For the `sourceAccount` of every executed transaction.
-2. `BumpSequenceOp` operation: For the `sourceAccount` of every
-successfully executed `BumpSequenceOp` operation, regardless of whether
-the `BumpSequenceOp` actually increased or modified the sequence number
-of the account. This allows an account to update it's `seqLedger`
-or `seqTime` without using an additional sequence number.
+2. `BumpSequenceOp` operation: For the `sourceAccount` of every successfully
+ executed `BumpSequenceOp` operation, regardless of whether the
+ `BumpSequenceOp` actually increased or modified the sequence number of the
+ account. This allows an account to update it's `seqLedger` or `seqTime`
+ without using an additional sequence number.
-If an account does not have an `AccountEntryExtensionV3` because it
-hasn't been upgraded yet, then it behaves as if `seqLedger` and
-`seqTime` are both 0.
+If an account does not have an `AccountEntryExtensionV3` because it hasn't been
+upgraded yet, then it behaves as if `seqLedger` and `seqTime` are both 0.
```c++
// An ExtensionPoint is always marshaled as a 32-bit 0 value. At a
@@ -113,28 +110,25 @@ struct AccountEntryExtensionV2
};
```
-Preconditions are represented by a new `Preconditions` union with
-discriminant `type`. Values `PRECOND_NONE` and `PRECOND_TIME` are
-binary compatible with the current `timeBounds` field (which is of
-type `TimeBounds*`). Value `PRECOND_V2` is the new type of
-precondition. Note that `minSeqNum`, if non-NULL, relaxes the range
-of sequence numbers at which a transaction can be executed. However,
-after executing a transaction, `sourceAccount`'s sequence number is
-always set to the transaction's `seqNum`--like an implicit
-`BUMP_SEQUENCE` operation. This guarantees transactions cannot be
-replayed, even when the previous account `seqNum` is well below the
-transaction's `seqNum`. The final element of `Preconditions` is an
-array of extra signers required for the transaction. This can be used
-with `SIGNER_KEY_TYPE_HASH_X` to sign a transaction that can only be
-executed in exchange for disclosing a hash preimage.
-
-Note that a `TransactionV1Envelope` may contain at most 20 signatures.
-Any signatures required by the `extraSigners` field reside in the
-`TransactionV1Envelope` and hence must reside in the same 20 signature
-slots. As a consequence, a transaction that would require 20
-signatures without an `extraSigners` field generally cannot contain
-`extraSigners` unless the `extraSigners` are satisfied by the same
-signatures as the `sourceAccount`s.
+Preconditions are represented by a new `Preconditions` union with discriminant
+`type`. Values `PRECOND_NONE` and `PRECOND_TIME` are binary compatible with the
+current `timeBounds` field (which is of type `TimeBounds*`). Value `PRECOND_V2`
+is the new type of precondition. Note that `minSeqNum`, if non-NULL, relaxes
+the range of sequence numbers at which a transaction can be executed. However,
+after executing a transaction, `sourceAccount`'s sequence number is always set
+to the transaction's `seqNum`--like an implicit `BUMP_SEQUENCE` operation. This
+guarantees transactions cannot be replayed, even when the previous account
+`seqNum` is well below the transaction's `seqNum`. The final element of
+`Preconditions` is an array of extra signers required for the transaction. This
+can be used with `SIGNER_KEY_TYPE_HASH_X` to sign a transaction that can only
+be executed in exchange for disclosing a hash preimage.
+
+Note that a `TransactionV1Envelope` may contain at most 20 signatures. Any
+signatures required by the `extraSigners` field reside in the
+`TransactionV1Envelope` and hence must reside in the same 20 signature slots.
+As a consequence, a transaction that would require 20 signatures without an
+`extraSigners` field generally cannot contain `extraSigners` unless the
+`extraSigners` are satisfied by the same signatures as the `sourceAccount`s.
```c++
typedef uint64 Duration;
@@ -193,8 +187,8 @@ union Preconditions switch (PreconditionType type) {
Note we add an unsigned `Duration` type, used by `minSeqAge`.
-We make use of the new `Preconditions` type by replacing `timeBounds`
-in the `Transaction` structure as follows:
+We make use of the new `Preconditions` type by replacing `timeBounds` in the
+`Transaction` structure as follows:
```c++
struct Transaction
@@ -215,103 +209,97 @@ struct Transaction
};
```
-A transaction whose preconditions are not satisfied is
-_non-executable_. In most cases, non-executable transactions should
-not be included in blocks. However, it is possible that a prior
-transaction in the same block can turn a executable transaction into a
-non-executable one. If this happens, the non-executable transaction
-still incurs a fee and increments the `sourceAccount` sequence number.
-
-To minimize the presence of non-executable transactions in blocks, a
-block may not contain both a transaction with a non-zero
-`minSeqLedgerGap` or `minSeqAge` and one with a lower `seqNum` on the
-same `sourceAccount`. Unfortunately, this does not entirely eliminate
-the possibility of non-executable transactions in blocks; for
-instance, a `BUMP_SEQUENCE` operation in a transaction from a
-different `sourceAccount` can invalidate the `minSeqAge` or
-`minSeqLedgerGap` on another transaction.
+A transaction whose preconditions are not satisfied is _non-executable_. In
+most cases, non-executable transactions should not be included in blocks.
+However, it is possible that a prior transaction in the same block can turn a
+executable transaction into a non-executable one. If this happens, the
+non-executable transaction still incurs a fee and increments the
+`sourceAccount` sequence number.
+
+To minimize the presence of non-executable transactions in blocks, a block may
+not contain both a transaction with a non-zero `minSeqLedgerGap` or `minSeqAge`
+and one with a lower `seqNum` on the same `sourceAccount`. Unfortunately, this
+does not entirely eliminate the possibility of non-executable transactions in
+blocks; for instance, a `BUMP_SEQUENCE` operation in a transaction from a
+different `sourceAccount` can invalidate the `minSeqAge` or `minSeqLedgerGap`
+on another transaction.
### Transaction forwarding and ordering
-A transaction submitted to the network is valid only if it is part of
-a valid series of pending transactions on the same `sourceAccount`
-that can all be valid in the same block. For example, if a source
-account has `seqNum` 10, then a submitted transaction with `seqNum` 12
-and no preconditions is valid (and should be forwarded) only if there
-is also a pending valid transaction with sequence number 11. The
-`minSeqNum` field in this proposal relaxes validity to allow a valid
-series of transactions on the same `sourceAccount` with discontinuous
-`seqNum` fields. The gaps, however, cannot be filled in (if the queue
-already had `seqNum` 10 and 12 with a valid gap, 11 should not be
-accepted and forwarded). Regardless of these gaps, all transactions on the
-same `sourceAccount` in the same block must be executed in order of
-increasing `seqNum`. Hence, the presence of the `minSeqNum` field may
-make transactions valid that would not otherwise be valid, but cannot
-invalidate otherwise valid transactions, since lower `seqNum` fields
-always execute first before higher ones that would invalidate them.
+A transaction submitted to the network is valid only if it is part of a valid
+series of pending transactions on the same `sourceAccount` that can all be
+valid in the same block. For example, if a source account has `seqNum` 10, then
+a submitted transaction with `seqNum` 12 and no preconditions is valid (and
+should be forwarded) only if there is also a pending valid transaction with
+sequence number 11. The `minSeqNum` field in this proposal relaxes validity to
+allow a valid series of transactions on the same `sourceAccount` with
+discontinuous `seqNum` fields. The gaps, however, cannot be filled in (if the
+queue already had `seqNum` 10 and 12 with a valid gap, 11 should not be
+accepted and forwarded). Regardless of these gaps, all transactions on the same
+`sourceAccount` in the same block must be executed in order of increasing
+`seqNum`. Hence, the presence of the `minSeqNum` field may make transactions
+valid that would not otherwise be valid, but cannot invalidate otherwise valid
+transactions, since lower `seqNum` fields always execute first before higher
+ones that would invalidate them.
A transaction with a non-zero `minSeqAge` or `minSeqLedgerGap` must be
-discarded and not forwarded--as if its `minTime` has not yet
-arrived--if either A) the appropriate condition (`minSeqAge` or
-`minSeqLedgerGap`) does not yet hold, or B) there are pending valid
-transactions with lower sequence numbers on the same `sourceAccount`.
-Conversely, after receiving and forwarding a valid transaction with a
-non-zero `minSeqAge` or `minSeqLedgerGap`, subsequently received
-transactions with earlier sequence numbers must be discarded.
-However, a nominated block is valid so long as its transactions can be
-executed. This means a validator can vote for a block containing
-transactions that the validator would have discarded. For example,
-consider the following two transactions on the same `sourceAccount`
-which currently has `seqNum` 10:
-
-* T1 has `seqNum` 11 and no preconditions.
-* T2 has `seqNum` 12, `minSeqNum` 10, and `minSeqLedgerGap` 1.
-
-Any validator that receives both of these transactions will keep the
-first one and discard the second one that it receives. However, if a
-validator sees a nomination vote for a block that contains T2 but not
-T1, the validator will nonetheless vote for the block. The logic is
-identical to a situation in which T1 and T2 have the same sequence
-number and fee--validators seeing both will discard the second one
-that they receive.
+discarded and not forwarded--as if its `minTime` has not yet arrived--if either
+A) the appropriate condition (`minSeqAge` or `minSeqLedgerGap`) does not yet
+hold, or B) there are pending valid transactions with lower sequence numbers on
+the same `sourceAccount`. Conversely, after receiving and forwarding a valid
+transaction with a non-zero `minSeqAge` or `minSeqLedgerGap`, subsequently
+received transactions with earlier sequence numbers must be discarded. However,
+a nominated block is valid so long as its transactions can be executed. This
+means a validator can vote for a block containing transactions that the
+validator would have discarded. For example, consider the following two
+transactions on the same `sourceAccount` which currently has `seqNum` 10:
+
+- T1 has `seqNum` 11 and no preconditions.
+- T2 has `seqNum` 12, `minSeqNum` 10, and `minSeqLedgerGap` 1.
+
+Any validator that receives both of these transactions will keep the first one
+and discard the second one that it receives. However, if a validator sees a
+nomination vote for a block that contains T2 but not T1, the validator will
+nonetheless vote for the block. The logic is identical to a situation in which
+T1 and T2 have the same sequence number and fee--validators seeing both will
+discard the second one that they receive.
### Transaction validation
-The `Preconditions` in each transaction of a block are validated
-twice.
-
-The first validation occurs when checking that a block itself is valid
-and can be nominated by the consensus protocol. As part of the
-validation, for each `sourceAccount` there can be only one transaction
-with a given sequence number and the `seqNum` fields must either be
-consecutive or any gaps must be permitted by non-NULL `minSeqNum`
-fields. All but the transaction with the lowest `seqNum` on a given
-`sourceAccount` must have 0 for the `minSeqAge` and `minSeqLedgerGap`
-fields.
-
-Once a block is externalized by the consensus algorithm, the block is
-applied. Before executing any operations, fees are charged to the
-source account for all transactions. Once fees have been deducted
-from all accounts, transactions are one-by-one validated a second time
-then executed. It is possible for a previously valid transaction to
-fail the second validation, for instance if a `BUMP_SEQUENCE`
-operation made the sequence number invalid. Whenever a transaction
-fails validation during execution, the `sourceAccount` loses the fee.
+The `Preconditions` in each transaction of a block are validated twice.
+
+The first validation occurs when checking that a block itself is valid and can
+be nominated by the consensus protocol. As part of the validation, for each
+`sourceAccount` there can be only one transaction with a given sequence number
+and the `seqNum` fields must either be consecutive or any gaps must be
+permitted by non-NULL `minSeqNum` fields. All but the transaction with the
+lowest `seqNum` on a given `sourceAccount` must have 0 for the `minSeqAge` and
+`minSeqLedgerGap` fields.
+
+Once a block is externalized by the consensus algorithm, the block is applied.
+Before executing any operations, fees are charged to the source account for all
+transactions. Once fees have been deducted from all accounts, transactions are
+one-by-one validated a second time then executed. It is possible for a
+previously valid transaction to fail the second validation, for instance if a
+`BUMP_SEQUENCE` operation made the sequence number invalid. Whenever a
+transaction fails validation during execution, the `sourceAccount` loses the
+fee.
Because `PreconditionsV2` specifies multiple pre-conditions, there may be
multiple reasons why a transaction is invalid. If `extraSigners` contains
duplicate signers, the transaction is rejected with `txMALFORMED` (Note that
-signer overlap between `extraSigners` and `AccountEntry` signers is allowed). If
-the `maxTime` (inclusive) or `maxLedger` (exclusive) have not been satisfied--
-then the transaction is rejected with `TransactionResultCode` `txTOO_LATE`. If
-`minTime` or `minLedger` have not been reached yet, then the transaction is
-rejected with `txTOO_EARLY`. If `minSeqNum` is set, but the relaxed sequence
-number validation still fails, then the transaction is rejected with
-`txBAD_SEQ`. If the failure is due to `minSeqAge` or `minSeqLedgerGap`, then the
-transaction is rejected with the new `txBAD_MIN_SEQ_AGE_OR_GAP` error code. If
-none of the above conditions holds (`txTOO_EARLY`, `txBAD_MIN_SEQ_AGE_OR_GAP`,
-`txMALFORMED`, `txBAD_SEQ`, and `txTOO_LATE` do not apply), but one of the
-`extraSigners` is unsatisfied, then the transaction fails with `txBAD_AUTH`.
+signer overlap between `extraSigners` and `AccountEntry` signers is allowed).
+If the `maxTime` (inclusive) or `maxLedger` (exclusive) have not been
+satisfied-- then the transaction is rejected with `TransactionResultCode`
+`txTOO_LATE`. If `minTime` or `minLedger` have not been reached yet, then the
+transaction is rejected with `txTOO_EARLY`. If `minSeqNum` is set, but the
+relaxed sequence number validation still fails, then the transaction is
+rejected with `txBAD_SEQ`. If the failure is due to `minSeqAge` or
+`minSeqLedgerGap`, then the transaction is rejected with the new
+`txBAD_MIN_SEQ_AGE_OR_GAP` error code. If none of the above conditions holds
+(`txTOO_EARLY`, `txBAD_MIN_SEQ_AGE_OR_GAP`, `txMALFORMED`, `txBAD_SEQ`, and
+`txTOO_LATE` do not apply), but one of the `extraSigners` is unsatisfied, then
+the transaction fails with `txBAD_AUTH`.
### XDR diff
@@ -327,11 +315,11 @@ index c870fe09..5c772f1c 100644
+typedef uint64 Duration;
typedef opaque DataValue<64>;
typedef Hash PoolID; // SHA256(LiquidityPoolParameters)
-
+
@@ -133,6 +134,19 @@ const MAX_SIGNERS = 20;
-
+
typedef AccountID* SponsorshipDescriptor;
-
+
+struct AccountEntryExtensionV3
+{
+ // We can use this to add more fields, or because it is first, to
@@ -364,7 +352,7 @@ index 1a4e491a..811e4786 100644
@@ -576,6 +576,58 @@ struct TimeBounds
TimePoint maxTime; // 0 here means no maxTime
};
-
+
+struct LedgerBounds
+{
+ uint32 minLedger;
@@ -419,20 +407,20 @@ index 1a4e491a..811e4786 100644
+
// maximum number of operations per transaction
const MAX_OPS_PER_TX = 100;
-
+
@@ -627,8 +679,8 @@ struct Transaction
// sequence number to consume in the account
SequenceNumber seqNum;
-
+
- // validity range (inclusive) for the last ledger close time
- TimeBounds* timeBounds;
+ // validity conditions
+ Preconditions cond;
-
+
Memo memo;
-
+
@@ -1508,7 +1560,9 @@ enum TransactionResultCode
-
+
txNOT_SUPPORTED = -12, // transaction type not supported
txFEE_BUMP_INNER_FAILED = -13, // fee bump inner transaction failed
- txBAD_SPONSORSHIP = -14 // sponsorship not confirmed
@@ -440,7 +428,7 @@ index 1a4e491a..811e4786 100644
+ txBAD_MIN_SEQ_AGE_OR_GAP = -15, //minSeqAge or minSeqLedgerGap conditions not met
+ txMALFORMED = 16 // precondition is invalid
};
-
+
// InnerTransactionResult must be binary compatible with TransactionResult
@@ -1537,6 +1591,8 @@ struct InnerTransactionResult
case txNOT_SUPPORTED:
@@ -458,7 +446,7 @@ index 8f7d5c20..caa41d7f 100644
@@ -14,6 +14,14 @@ typedef int int32;
typedef unsigned hyper uint64;
typedef hyper int64;
-
+
+// An ExtensionPoint is always marshaled as a 32-bit 0 value. At a
+// later point, it can be replaced by a different union so as to
+// extend a structure.
@@ -474,396 +462,359 @@ index 8f7d5c20..caa41d7f 100644
## Design Rationale
-Relative timelocks are a known mechanism for simplifying payment
-channels, implemented by Bitcoin and used in lightning payment
-channels. Stellar's lack of UTXOs combined with transaction sequence
-numbers make payment channels harder to implement. This proposal
-rectifies the problem in a way that is not too hard to implement in
-stellar-core and provides a good degree of backwards compatibility.
-
-Fundamentally, a payment channel requires a way to enforce a time
-separation between declaring that one wants to execute a pre-signed
-transaction T and actually executing T. Furthermore, between the
-declaration and execution, other parties need a chance to object and
-invalidate T if there is a later T' superseding T. The relative
-timelock provides this separation, while the relaxing of sequence
-numbers makes it easy to object by pre-signing a transaction
-invalidating T that can be submitted at a variety of sequence numbers.
-Without such a mechanism, multiple auxiliary accounts are required.
-
-An earlier version of the proposal did not contain the
-`minSeqLedgerGap` field. However, members of the payment channel
-working group were concerned that the network could, in a worst-case
-scenario, experience downtime right after someone incorrectly closes a
-payment channel, precluding the other party from correcting the
-problem. `minSeqLedgerGap` guarantees that there will be an
-opportunity to correct the problem when the network comes back up,
-because the pre-signed transaction with a `minSeqLedgerGap` will still
-not be immediately executable.
-
-It's worth asking whether we need `minSeqAge` if we have
-`minSeqLedgerGap`. One reason to keep it is that, under heavy load,
-the network could start processing ledgers faster than once every 5
-seconds. This might happen after periods of downtime.
-
-One possible efficiency problem is that transactions with a
-`minSeqAge` or `minSeqLedgerGap` cannot be pipelined behind other
-transactions on the same `sourceAccount`. Though this might seem to
-reduce efficiency, in fact such time-delayed transactions are intended
-to be delayed for some "disclosure period" during which the account
-remains idle. Typically such time-delayed transactions are intended
-to correct an abnormal situation (e.g., one end of a payment channel
-failing, or an account owner losing the key) and so don't actually get
-submitted in the common case.
+Relative timelocks are a known mechanism for simplifying payment channels,
+implemented by Bitcoin and used in lightning payment channels. Stellar's lack
+of UTXOs combined with transaction sequence numbers make payment channels
+harder to implement. This proposal rectifies the problem in a way that is not
+too hard to implement in stellar-core and provides a good degree of backwards
+compatibility.
+
+Fundamentally, a payment channel requires a way to enforce a time separation
+between declaring that one wants to execute a pre-signed transaction T and
+actually executing T. Furthermore, between the declaration and execution, other
+parties need a chance to object and invalidate T if there is a later T'
+superseding T. The relative timelock provides this separation, while the
+relaxing of sequence numbers makes it easy to object by pre-signing a
+transaction invalidating T that can be submitted at a variety of sequence
+numbers. Without such a mechanism, multiple auxiliary accounts are required.
+
+An earlier version of the proposal did not contain the `minSeqLedgerGap` field.
+However, members of the payment channel working group were concerned that the
+network could, in a worst-case scenario, experience downtime right after
+someone incorrectly closes a payment channel, precluding the other party from
+correcting the problem. `minSeqLedgerGap` guarantees that there will be an
+opportunity to correct the problem when the network comes back up, because the
+pre-signed transaction with a `minSeqLedgerGap` will still not be immediately
+executable.
+
+It's worth asking whether we need `minSeqAge` if we have `minSeqLedgerGap`. One
+reason to keep it is that, under heavy load, the network could start processing
+ledgers faster than once every 5 seconds. This might happen after periods of
+downtime.
+
+One possible efficiency problem is that transactions with a `minSeqAge` or
+`minSeqLedgerGap` cannot be pipelined behind other transactions on the same
+`sourceAccount`. Though this might seem to reduce efficiency, in fact such
+time-delayed transactions are intended to be delayed for some "disclosure
+period" during which the account remains idle. Typically such time-delayed
+transactions are intended to correct an abnormal situation (e.g., one end of a
+payment channel failing, or an account owner losing the key) and so don't
+actually get submitted in the common case.
### Two-way payment channel
-The proposed mechanism can be used to implement a payment channel
-between two parties, an initiator I and a responder R. The protocol
-assumes some _synchrony period_, S, such that both parties are
-guaranteed to be able to observe the blockchain state and submit
-transactions within any period of length S.
-
-The payment channel consists of a 2-of-2 multisig escrow account E,
-initially created and configured by I, and a series of pairs of
-_declaration_ and _closing_ transactions on E signed by both parties.
-The two parties maintain the following two variables during the
-lifetime of the channel:
-
-* s - the _starting sequence number_, is initialized to one greater
- than the sequence number of the escrow account E after E has been
- created and configured. It is increased only when withdrawing from
- or topping up the escrow account E.
-
-* i - the _iteration number_ of the payment channel, is initialized to
- (s/2)+1. It is incremented with every off-chain update of the
- payment channel state.
-
-To update the payment channel state, the parties 1) increment i, 2)
-sign and exchange a closing transaction C_i, and finally 3) sign and
-exchange a declaration transaction D_i. The transactions are
-constructed as follows:
-
-* D_i, the _declaration transaction_, declares an intent to execute
- the corresponding closing transaction C_i. D_i has source account
- E, sequence number 2i, and `minSeqNum` set to s. Hence, D_i can
- execute at any time, so long as E's sequence number n satisfies s <=
- n < 2i. D_i always leaves E's sequence number at 2i after
- executing. Because C_i has source account E and sequence number
- 2i+1, D_i leaves E in a state where C_i can execute. Note that D_i
- does not require any operations, but since Stellar disallows empty
+The proposed mechanism can be used to implement a payment channel between two
+parties, an initiator I and a responder R. The protocol assumes some _synchrony
+period_, S, such that both parties are guaranteed to be able to observe the
+blockchain state and submit transactions within any period of length S.
+
+The payment channel consists of a 2-of-2 multisig escrow account E, initially
+created and configured by I, and a series of pairs of _declaration_ and
+_closing_ transactions on E signed by both parties. The two parties maintain
+the following two variables during the lifetime of the channel:
+
+- s - the _starting sequence number_, is initialized to one greater than the
+ sequence number of the escrow account E after E has been created and
+ configured. It is increased only when withdrawing from or topping up the
+ escrow account E.
+
+- i - the _iteration number_ of the payment channel, is initialized to (s/2)+1.
+ It is incremented with every off-chain update of the payment channel state.
+
+To update the payment channel state, the parties 1) increment i, 2) sign and
+exchange a closing transaction C_i, and finally 3) sign and exchange a
+declaration transaction D_i. The transactions are constructed as follows:
+
+- D*i, the \_declaration transaction*, declares an intent to execute the
+ corresponding closing transaction C_i. D_i has source account E, sequence
+ number 2i, and `minSeqNum` set to s. Hence, D_i can execute at any time, so
+ long as E's sequence number n satisfies s <= n < 2i. D_i always leaves E's
+ sequence number at 2i after executing. Because C_i has source account E and
+ sequence number 2i+1, D_i leaves E in a state where C_i can execute. Note
+ that D_i does not require any operations, but since Stellar disallows empty
transactions, it contains a `BUMP_SEQUENCE` operation as a no-op.
-* C_i, the _closing transaction_, disburses funds to R and changes the
- signing weights on E such that I unilaterally controls E. C_i has
- source account E, sequence number 2i+1, and a `minSeqAge` of S (the
- synchrony period). The `minSeqAge` prevents a misbehaving party
- from executing C_i when the channel state has already progressed to
- a later iteration number, as the other party can always invalidate
- C_i by submitting D_i' for some i' > i. C_i contains one or more
- `CREATE_CLAIMABLE_BALANCE` operations disbursing funds to R, plus a
- `SET_OPTIONS` operation adjusting signing weights to give I full
- control of E.
-
-For R to top-up or withdraw excess funds from the escrow account E,
-the participants skip a generation. They set s = 2(i+1), and i = i+2.
-They then exchange C_i and D_i (which unlike the update case, can be
-exchanged in a single phase of communication because D_i is not yet
-executable while E's sequence number is below the new s). Finally,
-they create a top-up transaction (on some source account other than E,
-in case it fails) that atomically adjusts E's balance and uses
-`BUMP_SEQUENCE` to increase E's sequence number to s.
-
-To close the channel cooperatively, the parties re-sign C_i with a
-`minSeqNum` of s and a `minSeqAge` of 0, then submit this transaction.
+- C*i, the \_closing transaction*, disburses funds to R and changes the signing
+ weights on E such that I unilaterally controls E. C_i has source account E,
+ sequence number 2i+1, and a `minSeqAge` of S (the synchrony period). The
+ `minSeqAge` prevents a misbehaving party from executing C_i when the channel
+ state has already progressed to a later iteration number, as the other party
+ can always invalidate C_i by submitting D_i' for some i' > i. C_i contains
+ one or more `CREATE_CLAIMABLE_BALANCE` operations disbursing funds to R, plus
+ a `SET_OPTIONS` operation adjusting signing weights to give I full control of
+ E.
+
+For R to top-up or withdraw excess funds from the escrow account E, the
+participants skip a generation. They set s = 2(i+1), and i = i+2. They then
+exchange C_i and D_i (which unlike the update case, can be exchanged in a
+single phase of communication because D_i is not yet executable while E's
+sequence number is below the new s). Finally, they create a top-up transaction
+(on some source account other than E, in case it fails) that atomically adjusts
+E's balance and uses `BUMP_SEQUENCE` to increase E's sequence number to s.
+
+To close the channel cooperatively, the parties re-sign C_i with a `minSeqNum`
+of s and a `minSeqAge` of 0, then submit this transaction.
### Two-way payment channel supporting uncoordinated deposits
-The proposed mechanism can be used to implement a payment channel
-between two parties, an initiator I and a responder R. The protocol
-assumes some _synchrony period_, S, such that both parties are
-guaranteed to be able to observe the blockchain state and submit
-transactions within any period of length S.
+The proposed mechanism can be used to implement a payment channel between two
+parties, an initiator I and a responder R. The protocol assumes some _synchrony
+period_, S, such that both parties are guaranteed to be able to observe the
+blockchain state and submit transactions within any period of length S.
The payment channel consists of two 2-of-2 multisig escrow accounts:
-* EI - created and configured by I, holding all amounts contributed by
- I.
-
-* ER - created and configured by R, holding all amounts contributed by
- R.
-
-The payment channel updates state using a series of _declaration_ and
-_closing_ transactions with EI as the source account. The two parties
-maintain the following two variables during the lifetime of the
-channel:
-
-* s - the _starting sequence number_, is initialized to one greater
- than the sequence number of the escrow account EI after EI has been
- created and configured. It is increased only when withdrawing.
-
-* i - the _iteration number_ of the payment channel, is initialized to
- (s/2)+1. It is incremented with every off-chain update of the
- payment channel state.
-
-To update the payment channel state, the parties 1) increment i, 2)
-sign and exchange a closing transaction C_i, and finally 3) sign and
-exchange a declaration transaction D_i. The transactions are
-constructed as follows:
-
-* D_i, the _declaration transaction_, declares an intent to execute
- the corresponding closing transaction C_i. D_i has source account
- EI, sequence number 2i, and `minSeqNum` set to s. Hence, D_i can
- execute at any time, so long as EI's sequence number n satisfies s <=
- n < 2i. D_i always leaves EI's sequence number at 2i after
- executing. Because C_i has source account EI and sequence number
- 2i+1, D_i leaves EI in a state where C_i can execute. Note that D_i
- does not require any operations, but since Stellar disallows empty
+- EI - created and configured by I, holding all amounts contributed by I.
+
+- ER - created and configured by R, holding all amounts contributed by R.
+
+The payment channel updates state using a series of _declaration_ and _closing_
+transactions with EI as the source account. The two parties maintain the
+following two variables during the lifetime of the channel:
+
+- s - the _starting sequence number_, is initialized to one greater than the
+ sequence number of the escrow account EI after EI has been created and
+ configured. It is increased only when withdrawing.
+
+- i - the _iteration number_ of the payment channel, is initialized to (s/2)+1.
+ It is incremented with every off-chain update of the payment channel state.
+
+To update the payment channel state, the parties 1) increment i, 2) sign and
+exchange a closing transaction C_i, and finally 3) sign and exchange a
+declaration transaction D_i. The transactions are constructed as follows:
+
+- D*i, the \_declaration transaction*, declares an intent to execute the
+ corresponding closing transaction C_i. D_i has source account EI, sequence
+ number 2i, and `minSeqNum` set to s. Hence, D_i can execute at any time, so
+ long as EI's sequence number n satisfies s <= n < 2i. D_i always leaves EI's
+ sequence number at 2i after executing. Because C_i has source account EI and
+ sequence number 2i+1, D_i leaves EI in a state where C_i can execute. Note
+ that D_i does not require any operations, but since Stellar disallows empty
transactions, it contains a `BUMP_SEQUENCE` operation as a no-op.
-* C_i, the _closing transaction_, disburses funds from EI to ER,
- and/or from ER to EI such that the balances of the escrow accounts
- match the final agreed state of the channel at the time C_i is
- generated. C_i also changes the signing weights on EI and ER such
- that I unilaterally controls EI and R unilaterally controls ER. C_i
- has source account EI, sequence number 2i+1, and a `minSeqAge` of S
- (the synchrony period). The `minSeqAge` prevents a misbehaving
- party from executing C_i when the channel state has already
- progressed to a later iteration number, as the other party can
- always invalidate C_i by submitting D_i' for some i' > i. C_i
- contains one or more `PAYMENT` operations disbursing funds between
- escrow accounts, plus `SET_OPTIONS` operations adjusting signing
- weights of each escrow account.
-
-I and R may top-up their respective escrow accounts by making a
-payment into them directly.
-
-I and R may adjust the relative balances of EI and ER as well as
-withdraw excess funds from these accounts by skipping a
-generation. They set s = 2(i+1), and i = i+2. They then exchange C_i
-and D_i (which unlike the update case, can be exchanged in a single
-phase of communication because D_i is not yet executable while EI's
-sequence number is below the new s). Finally, they create a withdraw
-transaction that atomically shifts funds between EI and ER, withdraws
-any desired excess funds with `CREATE_CLAIMABLE_BALANCE`, and uses
+- C*i, the \_closing transaction*, disburses funds from EI to ER, and/or from
+ ER to EI such that the balances of the escrow accounts match the final agreed
+ state of the channel at the time C_i is generated. C_i also changes the
+ signing weights on EI and ER such that I unilaterally controls EI and R
+ unilaterally controls ER. C_i has source account EI, sequence number 2i+1,
+ and a `minSeqAge` of S (the synchrony period). The `minSeqAge` prevents a
+ misbehaving party from executing C_i when the channel state has already
+ progressed to a later iteration number, as the other party can always
+ invalidate C_i by submitting D_i' for some i' > i. C_i contains one or more
+ `PAYMENT` operations disbursing funds between escrow accounts, plus
+ `SET_OPTIONS` operations adjusting signing weights of each escrow account.
+
+I and R may top-up their respective escrow accounts by making a payment into
+them directly.
+
+I and R may adjust the relative balances of EI and ER as well as withdraw
+excess funds from these accounts by skipping a generation. They set s = 2(i+1),
+and i = i+2. They then exchange C_i and D_i (which unlike the update case, can
+be exchanged in a single phase of communication because D_i is not yet
+executable while EI's sequence number is below the new s). Finally, they create
+a withdraw transaction that atomically shifts funds between EI and ER,
+withdraws any desired excess funds with `CREATE_CLAIMABLE_BALANCE`, and uses
`BUMP_SEQUENCE` to increase EI's sequence number to s.
-To close the channel cooperatively, the parties re-sign C_i with a
-`minSeqNum` of s and a `minSeqAge` of 0, then submit this transaction.
+To close the channel cooperatively, the parties re-sign C_i with a `minSeqNum`
+of s and a `minSeqAge` of 0, then submit this transaction.
### One-way payment channel
-A one-way payment channel enables an initiator I to make repeated
-payments to a recipient R. Unlike the two-way payment channel, I can
-unilaterally set up the payment channel without R's cooperation.
-Moreover, R can unilaterally withdraw funds from the payment channel
-at any point with no close delay.
-
-The channel consists of a an escrow account E, initially created by I.
-Let s be E's sequence number after it has been created and configured.
-Define the following transactions with source account E:
-
-* D, the _disclosure transaction_, has sequence number s+1 and a
- vacuous `BUMP_SEQUENCE` operation.
-
-* C_i, version i of the _closing transaction_, has sequence number
- s+2. It disburses funds to R through one or more
- `CREATE_CLAIMABLE_BALANCE` operations, and uses `SET_OPTIONS` to
- increase I's signing weight to 2. Each C_i disburses more funds to
- R than C_{i-1}. Only one C_i can execute since they all have the
- same sequence number.
-
-* F, the _fault-recovery transaction_, allows I to recover E in case R
- fails. It has sequence number s+2, a `minSeqAge` of S (some
- synchrony period), and gives I signing weight 2 on the account.
-
-After adding appropriate trustlines and funding the escrow account E,
-I issues a transaction configuring E to have signing threshold 2 (for
-low, medium, and high) and to have the following signers all with
-weight 1: I, R, D, and F (the latter two as
-`SIGNER_KEY_TYPE_PRE_AUTH_TX`).
-
-To submit series of payments, I sends R successive C_i transactions
-each of which reflects the cumulative sum of all previous payments. R
-accepts these so long as E has a sufficient balance. To close the
-channel, R submits D and C_i. If R fails, I can close the channel by
-submitting D, waiting S time, and then submitting F.
+A one-way payment channel enables an initiator I to make repeated payments to a
+recipient R. Unlike the two-way payment channel, I can unilaterally set up the
+payment channel without R's cooperation. Moreover, R can unilaterally withdraw
+funds from the payment channel at any point with no close delay.
+
+The channel consists of a an escrow account E, initially created by I. Let s be
+E's sequence number after it has been created and configured. Define the
+following transactions with source account E:
+
+- D, the _disclosure transaction_, has sequence number s+1 and a vacuous
+ `BUMP_SEQUENCE` operation.
+
+- C*i, version i of the \_closing transaction*, has sequence number s+2. It
+ disburses funds to R through one or more `CREATE_CLAIMABLE_BALANCE`
+ operations, and uses `SET_OPTIONS` to increase I's signing weight to 2. Each
+ C*i disburses more funds to R than C*{i-1}. Only one C_i can execute since
+ they all have the same sequence number.
+
+- F, the _fault-recovery transaction_, allows I to recover E in case R fails.
+ It has sequence number s+2, a `minSeqAge` of S (some synchrony period), and
+ gives I signing weight 2 on the account.
+
+After adding appropriate trustlines and funding the escrow account E, I issues
+a transaction configuring E to have signing threshold 2 (for low, medium, and
+high) and to have the following signers all with weight 1: I, R, D, and F (the
+latter two as `SIGNER_KEY_TYPE_PRE_AUTH_TX`).
+
+To submit series of payments, I sends R successive C_i transactions each of
+which reflects the cumulative sum of all previous payments. R accepts these so
+long as E has a sufficient balance. To close the channel, R submits D and C_i.
+If R fails, I can close the channel by submitting D, waiting S time, and then
+submitting F.
### Hash Time Locked Contract (HTLC)
HTLCs are a key building block for many blockchain protocols such as
-cross-chain atomic swaps and payment channels. An HTLC is a
-transaction _T_ characterized by two values: a hash _h_ and an
-expiration time _t_. Before the expiration time, anyone who knows the
-hash preimage of _h_ can execute _T_ in exchange for disclosing that
-preimage. Typically, disclosing the preimage unlocks a different
-transaction on the same or a different blockchain.
+cross-chain atomic swaps and payment channels. An HTLC is a transaction _T_
+characterized by two values: a hash _h_ and an expiration time _t_. Before the
+expiration time, anyone who knows the hash preimage of _h_ can execute _T_ in
+exchange for disclosing that preimage. Typically, disclosing the preimage
+unlocks a different transaction on the same or a different blockchain.
-To make a transaction into an HTLC, the following preconditions should
-be set:
+To make a transaction into an HTLC, the following preconditions should be set:
-* `timeBounds->maxTime` should be set to the expiration time _t_.
+- `timeBounds->maxTime` should be set to the expiration time _t_.
-* `extraSigners[0]` should be set to a `SIGNER_KEY_TYPE_HASH_X` with
- the hash value _h_.
+- `extraSigners[0]` should be set to a `SIGNER_KEY_TYPE_HASH_X` with the hash
+ value _h_.
-Note that the maximum size of a hash pre-image on Stellar is 64
-bytes. On Bitcoin, a hash preimage could potentially be up to 520
-bytes. Hence, when pairing Stellar HTLCs with transactions on other
-blockchains for cross-chain operation, care must be taken to ensure
-that the other blockchain does not accept preimages larger than 64
-bytes. Otherwise, a larger preimage disclosed on another blockchain
-would fail to unlock an HTLC on Stellar.
+Note that the maximum size of a hash pre-image on Stellar is 64 bytes. On
+Bitcoin, a hash preimage could potentially be up to 520 bytes. Hence, when
+pairing Stellar HTLCs with transactions on other blockchains for cross-chain
+operation, care must be taken to ensure that the other blockchain does not
+accept preimages larger than 64 bytes. Otherwise, a larger preimage disclosed
+on another blockchain would fail to unlock an HTLC on Stellar.
### Key recovery
-The owner of account A may wish for a friend with key K to gain access
-to A in the event that the owner loses her keys, but not
-otherwise. This scenario can be accommodated with pre-authorized
-transactions as follows.
+The owner of account A may wish for a friend with key K to gain access to A in
+the event that the owner loses her keys, but not otherwise. This scenario can
+be accommodated with pre-authorized transactions as follows.
-Let s be a sequence number much higher than any that will be used in
-the future on A (e.g., A's current sequence number plus 2^{32}). The
-owner constructs the following 2 transactions:
+Let s be a sequence number much higher than any that will be used in the future
+on A (e.g., A's current sequence number plus 2^{32}). The owner constructs the
+following 2 transactions:
-* The _recovery transaction_ T_R has source account A, sequence number
- s+1, and `minSeqAge` one week. It contains a `SET_OPTIONS`
- operation giving K signing weight on A.
+- The _recovery transaction_ T_R has source account A, sequence number s+1, and
+ `minSeqAge` one week. It contains a `SET_OPTIONS` operation giving K signing
+ weight on A.
-* The _declaration transaction_ T_D has source account A, sequence
- number s, and `minSeqNum` 0. It doesn't need to contain any
- operations, but since Stellar requires at least one operation per
- transaction, it contains a `BUMP_SEQUENCE` as a no-op.
+- The _declaration transaction_ T_D has source account A, sequence number s,
+ and `minSeqNum` 0. It doesn't need to contain any operations, but since
+ Stellar requires at least one operation per transaction, it contains a
+ `BUMP_SEQUENCE` as a no-op.
-The owner of A signs T_R and T_D, and gives them to the friend for
-safe keeping. If the owner loses her keys, the friend submits T_D,
-then a week later submits T_R, and finally uses key K to help the user
-recover her funds.
+The owner of A signs T_R and T_D, and gives them to the friend for safe
+keeping. If the owner loses her keys, the friend submits T_D, then a week later
+submits T_R, and finally uses key K to help the user recover her funds.
-If T_D and K are ever compromised and an attacker unexpectedly submits
-T_D, then the user simply submits any transaction on A to consume
-sequence number s+1 and invalidate T_R.
+If T_D and K are ever compromised and an attacker unexpectedly submits T_D,
+then the user simply submits any transaction on A to consume sequence number
+s+1 and invalidate T_R.
### Parallel transaction submission
-A farm of 100 servers is constantly submitting transactions on the
-same source account, and wishes to coordinate use of sequence numbers.
-This can be achieved by having server number N always submit
-transactions with sequence numbers congruent to N modulo 100. Sending
-the transaction at s with `minSeqNum` s-99 ensures that if any of the
-servers do not submit transactions, the gap will not prevent other
-transactions from executing.
+A farm of 100 servers is constantly submitting transactions on the same source
+account, and wishes to coordinate use of sequence numbers. This can be achieved
+by having server number N always submit transactions with sequence numbers
+congruent to N modulo 100. Sending the transaction at s with `minSeqNum` s-99
+ensures that if any of the servers do not submit transactions, the gap will not
+prevent other transactions from executing.
### Deterministic account sequence numbers at creation
-The proposed `ledgerBounds` field can be used to create an account with
-a predictable sequence number that is guaranteed if the account creation
+The proposed `ledgerBounds` field can be used to create an account with a
+predictable sequence number that is guaranteed if the account creation
succeeds.
-Assuming the user plans to create the account between ledgers 0 and N,
-they can specify `ledgerBounds` as 0 and N + 1, and include a
-`BUMP_SEQUENCE` operation that bumps the sequence of the created account
-to N<<32. The transaction will be guaranteed to only succeed with the
-created account having a sequence number of N<<32.
+Assuming the user plans to create the account between ledgers 0 and N, they can
+specify `ledgerBounds` as 0 and N + 1, and include a `BUMP_SEQUENCE` operation
+that bumps the sequence of the created account to N<<32. The transaction will
+be guaranteed to only succeed with the created account having a sequence number
+of N<<32.
The sequence number is guaranteed because the account is created with a
sequence number derived from the current ledger's sequence number. The
`BUMP_SEQUENCE` operation is a no-op if the account's sequence number is
-greater than the `bumpTo` sequence number. The `ledgerBounds` restricts
-the creation to occur only up to the `bumpTo` to ensure that creation
-results with the account having the determined sequence number.
+greater than the `bumpTo` sequence number. The `ledgerBounds` restricts the
+creation to occur only up to the `bumpTo` to ensure that creation results with
+the account having the determined sequence number.
It is also possible to eliminate the `BUMP_SEQUENCE` operation from the
-transaction is a subsequent transaction uses `minSeqNum` with a value
-matching the `minLedger` of `ledgerBounds`.
+transaction is a subsequent transaction uses `minSeqNum` with a value matching
+the `minLedger` of `ledgerBounds`.
This property makes it possible to setup contracts using pre-authorized
-transactions where the pre-authorized transaction has the created
-account as its source account.
+transactions where the pre-authorized transaction has the created account as
+its source account.
## Protocol Upgrade Transition
### Backwards Incompatibilities
-Previously signed transactions containing time points greater than
-2^{63} are no longer valid with this proposal. However, given that
-the number 0 already represents no time bounds, this is unlikely to
-cause problems in practice.
+Previously signed transactions containing time points greater than 2^{63} are
+no longer valid with this proposal. However, given that the number 0 already
+represents no time bounds, this is unlikely to cause problems in practice.
-The binary XDR of any other previously valid transactions will unmarshal
-to a valid transaction under the current proposal. Obviously legacy
-software will not be able to parse transactions with the new
-preconditions, however.
+The binary XDR of any other previously valid transactions will unmarshal to a
+valid transaction under the current proposal. Obviously legacy software will
+not be able to parse transactions with the new preconditions, however.
### Resource Utilization
-Transaction sizes will increase nominally, but only for transactions
-that use the new preconditions.
+Transaction sizes will increase nominally, but only for transactions that use
+the new preconditions.
-All account ledger entries will increase in size nominally with the
-addition of the account extension.
+All account ledger entries will increase in size nominally with the addition of
+the account extension.
-The maximum number of signatures that must be verified for each
-transaction will not change.
+The maximum number of signatures that must be verified for each transaction
+will not change.
-The introduction of `extraSigners` makes the use of
-`SIGNER_KEY_TYPE_HASH_X` signers more efficient by making them
-stateless, moving them from the account signers to the transaction.
-For any use cases utilizing this signer this may reduce the number of
-ledger entries and reduce the number of transactions since there would
-be no transactions to setup a `SIGNER_KEY_TYPE_HASH_X` signer before
-its use, and no transactions to remove it after its use.
+The introduction of `extraSigners` makes the use of `SIGNER_KEY_TYPE_HASH_X`
+signers more efficient by making them stateless, moving them from the account
+signers to the transaction. For any use cases utilizing this signer this may
+reduce the number of ledger entries and reduce the number of transactions since
+there would be no transactions to setup a `SIGNER_KEY_TYPE_HASH_X` signer
+before its use, and no transactions to remove it after its use.
## Security Concerns
-The security concerns stem primarily from new types of transaction
-making use of the new features. As such, the new preconditions,
-particularly `minSeqNum`, should make pre-signed transactions less
-brittle and simplify protocols. Nonetheless, there is still a lot of
-room for error in protocols.
-
-The fact that `BUMP_SEQUENCE` operations are executed after all
-transactions have been validated leads to a counterintuitive situation
-in which two operations can execute in the same block although both
-may not succeed. This is because the `BUMP_SEQUENCE` can affect is
-the `seqNum` attribute of the `AccountEntry`. This proposal introduces
-two new attributes that may be affected, `seqAge` and `seqLedgerGap`.
-Changes in any of these fields as an effect of a `BUMP_SEQUENCE` may
-cause other transactions that passed validation to fail during apply.
+The security concerns stem primarily from new types of transaction making use
+of the new features. As such, the new preconditions, particularly `minSeqNum`,
+should make pre-signed transactions less brittle and simplify protocols.
+Nonetheless, there is still a lot of room for error in protocols.
+
+The fact that `BUMP_SEQUENCE` operations are executed after all transactions
+have been validated leads to a counterintuitive situation in which two
+operations can execute in the same block although both may not succeed. This is
+because the `BUMP_SEQUENCE` can affect is the `seqNum` attribute of the
+`AccountEntry`. This proposal introduces two new attributes that may be
+affected, `seqAge` and `seqLedgerGap`. Changes in any of these fields as an
+effect of a `BUMP_SEQUENCE` may cause other transactions that passed validation
+to fail during apply.
An example of this is if a source account has a valid transaction with
-`minSeqAge` or `minSeqLedgerGap` and a second transaction, containing
-a `BUMP_SEQUENCE` that bumps the sequence of the source account, is
-created that is also valid. Any protocol that does this risks both
-transactions being accepted as valid in the same ledger. If both
-transactions execute in the same ledger and the bump sequence
-transaction is executed first, the other transaction will fail as its
-`minSeqAge` or `minSeqLedgerGap` will no longer be satisfied.
-
-Any protocol that specifies for a source account a transaction with
-`minSeqAge` or `minSeqLedgerGap`, should not allow another transaction
-to be valid at the same moment unless the intent of that other
-transaction is to cause the first to fail or become invalid. Any
-transaction that is valid in the same moment as a transaction with
-`minSeqAge` or `minSeqLedgerGap` can cause the transaction to fail
-during execution even if it passed validation. Once the transaction
-has failed during execution it cannot be executed again as its
-sequence number will have been consumed.
-
-Fortunately, it appears that in most useful protocols time-delayed
-"closing" transactions use a NULL `minSeqNum`, while transactions with
-non-NULL `minSeqNum` are "disclosure" transactions intended to be
-valid at any time.
-
-The design rationale includes several multi-party protocols that
-require all parties to sign a transaction for it to be valid.
-This section does not discuss all possible security concerns with
-these protocols. It is at least worth noting that like most
-multi-party protocols there exists a period of time where a
-free-option may exist, where one party has authorized a transaction
-and another party can wait some period of time to decide if they
-also will authorize it, or fallback to some previously valid
-transaction.
+`minSeqAge` or `minSeqLedgerGap` and a second transaction, containing a
+`BUMP_SEQUENCE` that bumps the sequence of the source account, is created that
+is also valid. Any protocol that does this risks both transactions being
+accepted as valid in the same ledger. If both transactions execute in the same
+ledger and the bump sequence transaction is executed first, the other
+transaction will fail as its `minSeqAge` or `minSeqLedgerGap` will no longer be
+satisfied.
+
+Any protocol that specifies for a source account a transaction with `minSeqAge`
+or `minSeqLedgerGap`, should not allow another transaction to be valid at the
+same moment unless the intent of that other transaction is to cause the first
+to fail or become invalid. Any transaction that is valid in the same moment as
+a transaction with `minSeqAge` or `minSeqLedgerGap` can cause the transaction
+to fail during execution even if it passed validation. Once the transaction has
+failed during execution it cannot be executed again as its sequence number will
+have been consumed.
+
+Fortunately, it appears that in most useful protocols time-delayed "closing"
+transactions use a NULL `minSeqNum`, while transactions with non-NULL
+`minSeqNum` are "disclosure" transactions intended to be valid at any time.
+
+The design rationale includes several multi-party protocols that require all
+parties to sign a transaction for it to be valid. This section does not discuss
+all possible security concerns with these protocols. It is at least worth
+noting that like most multi-party protocols there exists a period of time where
+a free-option may exist, where one party has authorized a transaction and
+another party can wait some period of time to decide if they also will
+authorize it, or fallback to some previously valid transaction.
## Test Cases
diff --git a/core/cap-0022.md b/core/cap-0022.md
index f57aacb1e..83b2fe543 100644
--- a/core/cap-0022.md
+++ b/core/cap-0022.md
@@ -12,164 +12,144 @@ Protocol version: TBD
## Simple Summary
-Invalid transactions lack sufficient signatures for proper
-authorization, have sequence numbers that have already been used, or
-have invalid time bounds. Certain edge conditions cause invalid
-transactions to be included in the transaction set produced by
-consensus. Currently, Stellar executes and fails invalid transactions
-in the transaction set, which changes ledger state by increasing
-sequence numbers and debiting transaction fees. This specification
-changes Stellar so that invalid transactions have no effect on the
-ledger.
+Invalid transactions lack sufficient signatures for proper authorization, have
+sequence numbers that have already been used, or have invalid time bounds.
+Certain edge conditions cause invalid transactions to be included in the
+transaction set produced by consensus. Currently, Stellar executes and fails
+invalid transactions in the transaction set, which changes ledger state by
+increasing sequence numbers and debiting transaction fees. This specification
+changes Stellar so that invalid transactions have no effect on the ledger.
## Motivation
-Allowing invalid--especially unauthorized--transactions to change
-ledger state can lead to vulnerabilities in higher-layer protocols.
-Consider a protocol in which two parties, A and B, execute a
-transaction that contributes funds to an escrow account E and makes E
-into a 2-of-2 multisig account. The parties might be willing to this
-given post-dated pre-signed transactions T1, T2 on E that allow each
-of A and B to recover escrowed funds after a certain delay.
-Unfortunately, if the previous owner of E can cause an invalid
-(unauthorized) transaction to use up T1's sequence number, one of the
-two users will be unable to recover funds.
+Allowing invalid--especially unauthorized--transactions to change ledger state
+can lead to vulnerabilities in higher-layer protocols. Consider a protocol in
+which two parties, A and B, execute a transaction that contributes funds to an
+escrow account E and makes E into a 2-of-2 multisig account. The parties might
+be willing to this given post-dated pre-signed transactions T1, T2 on E that
+allow each of A and B to recover escrowed funds after a certain delay.
+Unfortunately, if the previous owner of E can cause an invalid (unauthorized)
+transaction to use up T1's sequence number, one of the two users will be unable
+to recover funds.
### Goals Alignment
-The changes described in this document help network security by
-preventing unauthorized transactions from changing account state.
-They also help scalability by facilitating the design of robust
-payment channels.
+The changes described in this document help network security by preventing
+unauthorized transactions from changing account state. They also help
+scalability by facilitating the design of robust payment channels.
## Abstract
-We define three categories of transaction: successful, failed, and
-invalid. Invalid transactions must never change ledger state, even if
-they are included in the transaction set output by consensus. New,
-conservative restrictions rule out current ways of passing invalid
-transactions through consensus. However, if invalid transactions do
-make it through consensus through oversight or subsequently added
-features, they must have no effects on the ledger.
+We define three categories of transaction: successful, failed, and invalid.
+Invalid transactions must never change ledger state, even if they are included
+in the transaction set output by consensus. New, conservative restrictions rule
+out current ways of passing invalid transactions through consensus. However, if
+invalid transactions do make it through consensus through oversight or
+subsequently added features, they must have no effects on the ledger.
## Specification
-We define the following categories of transaction at the time of
-execution:
-
-* A **successful** transaction is one that meets all transaction- and
- operation-level prerequisites at the time of its execution. The
- effects of a successful transaction's operations are applied to the
- ledger. Successful transactions yield a result in which
- `TransactionResult.code` is `txSUCCESS`. This document does not
- change which transactions are considered successful or the effects
- of executing such transactions.
-
-* A **failed** transaction meets all valid transaction-level
- preconditions, including a valid transaction-level source account,
- valid sequence number, valid time bounds, and low signing weight for
- the source account. However, one or more of a failed transaction's
- operations cannot execute---for instance because the `destination`
- of a `PAYMENT` no longer exists. Failed transactions increase the
- sequence number of the transaction-level `sourceAccount`, charge a
- fee to the transaction-level `sourceAccount`, and yield a result
- with `TransactionResult.code` `txFAILED`.
-
-* An **invalid** transaction fails to meet transaction-level
- preconditions at the time of execution, for instance because the
- sequence number is invalid or the signatures are insufficient to
- meet the `sourceAccount`'s low signing threshold. Currently,
- invalid transactions are executed just like failed ones. This
- document changes that behavior so that invalid transactions no
- longer have an effect on the ledger and do not produce a
- `TransactionResult`. If an invalid transaction later becomes valid,
- it may be included again in a subsequent ledger.
-
-A ledger that contains too many invalid transactions will reduce the
-number of operations available to successful transactions and
-negatively impact the ledger. Since the source of an invalid
-transaction does not incur any transaction cost, and since it may be
-difficult in the general case to decide if a transaction is valid at
-the time its fee is charged, we place restrictions on the composition
-of the transaction set in a single ledger. These rules are
-conservative; they may force two valid transactions that could have
-succeeded in the same ledger to be included in different ledgers.
-
-If a transaction set S contains a transaction T, we use S[<=T] to
-denote the subset of S consisting of transactions with the same
-`sourceAccount` as T and a `seqNum` less than or equal to that of T (a
-set that includes T itself). A set S is **admissible**, and hence can
-be a candidate for the consensus transaction set, if all of the
-following hold for each transaction T in S:
-
-* T's `sourceAccount` must exist and must have sufficient funds to pay
- the offered transaction fees for all of S[<=T]. (This restriction
- exists today.)
-
-* T's sequence number will be valid if all other transactions in
- S[<=T] are executed. (This restriction exists today, and among other
- things implies a transaction set may not include a transaction with
- a stale sequence number or two distinct transactions with the same
- source account and sequence number.)
-
-* Any transaction in S other than T that contains a `SET_OPTIONS` or
+We define the following categories of transaction at the time of execution:
+
+- A **successful** transaction is one that meets all transaction- and
+ operation-level prerequisites at the time of its execution. The effects of a
+ successful transaction's operations are applied to the ledger. Successful
+ transactions yield a result in which `TransactionResult.code` is `txSUCCESS`.
+ This document does not change which transactions are considered successful or
+ the effects of executing such transactions.
+
+- A **failed** transaction meets all valid transaction-level preconditions,
+ including a valid transaction-level source account, valid sequence number,
+ valid time bounds, and low signing weight for the source account. However,
+ one or more of a failed transaction's operations cannot execute---for
+ instance because the `destination` of a `PAYMENT` no longer exists. Failed
+ transactions increase the sequence number of the transaction-level
+ `sourceAccount`, charge a fee to the transaction-level `sourceAccount`, and
+ yield a result with `TransactionResult.code` `txFAILED`.
+
+- An **invalid** transaction fails to meet transaction-level preconditions at
+ the time of execution, for instance because the sequence number is invalid or
+ the signatures are insufficient to meet the `sourceAccount`'s low signing
+ threshold. Currently, invalid transactions are executed just like failed
+ ones. This document changes that behavior so that invalid transactions no
+ longer have an effect on the ledger and do not produce a `TransactionResult`.
+ If an invalid transaction later becomes valid, it may be included again in a
+ subsequent ledger.
+
+A ledger that contains too many invalid transactions will reduce the number of
+operations available to successful transactions and negatively impact the
+ledger. Since the source of an invalid transaction does not incur any
+transaction cost, and since it may be difficult in the general case to decide
+if a transaction is valid at the time its fee is charged, we place restrictions
+on the composition of the transaction set in a single ledger. These rules are
+conservative; they may force two valid transactions that could have succeeded
+in the same ledger to be included in different ledgers.
+
+If a transaction set S contains a transaction T, we use S[<=T] to denote the
+subset of S consisting of transactions with the same `sourceAccount` as T and a
+`seqNum` less than or equal to that of T (a set that includes T itself). A set
+S is **admissible**, and hence can be a candidate for the consensus transaction
+set, if all of the following hold for each transaction T in S:
+
+- T's `sourceAccount` must exist and must have sufficient funds to pay the
+ offered transaction fees for all of S[<=T]. (This restriction exists today.)
+
+- T's sequence number will be valid if all other transactions in S[<=T] are
+ executed. (This restriction exists today, and among other things implies a
+ transaction set may not include a transaction with a stale sequence number or
+ two distinct transactions with the same source account and sequence number.)
+
+- Any transaction in S other than T that contains a `SET_OPTIONS` or
`BUMP_SEQUENCE` operation on T's `sourceAccount` must have the same
- `sourceAccount` as T and a greater sequence number than T. This
- guarantees that a `SET_OPTIONS` or `BUMP_SEQUENCE` on T's
- `sourceAccount` cannot be ordered before T. This new restriction
- specified by this document is not enforced today.
-
-In addition to placing these restrictions on transaction sets, we
-restore the behavior prior to `BUMP_SEQUENCE` in which sequence
-numbers are increased at exactly the same time that the fee is
-debited, before any operations are executed. Currently, fees are
-charged up front before any operations from any transactions execute,
-while sequence numbers are increased at the time an operation
-executes. Now that we know a `BUMP_SEQUENCE` cannot be ordered before
-an operation on the source account whose sequence is being bumped, we
-can increase the sequence numbers up front when charging fees.
+ `sourceAccount` as T and a greater sequence number than T. This guarantees
+ that a `SET_OPTIONS` or `BUMP_SEQUENCE` on T's `sourceAccount` cannot be
+ ordered before T. This new restriction specified by this document is not
+ enforced today.
+
+In addition to placing these restrictions on transaction sets, we restore the
+behavior prior to `BUMP_SEQUENCE` in which sequence numbers are increased at
+exactly the same time that the fee is debited, before any operations are
+executed. Currently, fees are charged up front before any operations from any
+transactions execute, while sequence numbers are increased at the time an
+operation executes. Now that we know a `BUMP_SEQUENCE` cannot be ordered before
+an operation on the source account whose sequence is being bumped, we can
+increase the sequence numbers up front when charging fees.
## Design Rationale
-This design prevents people who are no longer authorized signers an on
-account from affecting the account state. However, it does so in a
-way that avoids the ability to clog the ledger with invalid
-transactions.
+This design prevents people who are no longer authorized signers an on account
+from affecting the account state. However, it does so in a way that avoids the
+ability to clog the ledger with invalid transactions.
## Backwards Incompatibilities
-There are some minor incompatibilities that should not impact users.
-First, certain invalid transactions will no longer execute as failed
-transactions because they will not execute at all. However, in all
-such cases, there was no guarantee the transactions would execute
-anyway, because they could only execute when they happened to land in
-the same ledger as another transaction with a `SET_OPTIONS` or
-`BUMP_SEQUENCE` operation. Hence the most likely effect of this
-change is to thwart attacks on vulnerable higher-layer protocols.
+There are some minor incompatibilities that should not impact users. First,
+certain invalid transactions will no longer execute as failed transactions
+because they will not execute at all. However, in all such cases, there was no
+guarantee the transactions would execute anyway, because they could only
+execute when they happened to land in the same ledger as another transaction
+with a `SET_OPTIONS` or `BUMP_SEQUENCE` operation. Hence the most likely effect
+of this change is to thwart attacks on vulnerable higher-layer protocols.
-Second, certain transactions can no longer execute in the same
-ledger. Again, there is never a guarantee that an operation can
-execute in a particular ledger, so applications could not have relied
-on this happening anyway.
+Second, certain transactions can no longer execute in the same ledger. Again,
+there is never a guarantee that an operation can execute in a particular
+ledger, so applications could not have relied on this happening anyway.
-Finally, sequence number increases will appear in
-`TransactionMetaV1.txChanges` instead of
-`TransactionMetaV1.operations[0]`. This should be more intuitive.
+Finally, sequence number increases will appear in `TransactionMetaV1.txChanges`
+instead of `TransactionMetaV1.operations[0]`. This should be more intuitive.
## Security Concerns
-The changes outlined in this document should for the most part improve
-security by providing more consistent authorization of changes to
-accounts. The one potential risk is that attackers could clog the
-network with invalid transactions without paying nearly the
-corresponding amount of transaction fees. That risk is mitigated by
-the restrictions on transaction sets, but is something that must be
-kept in mind when future CAPs add additional operations.
-
-Any future operations or other transaction features added to the
-network that can render transactions invalid may need to revise the
-transaction set restrictions to prevent invalid transactions from
-being admissible.
+The changes outlined in this document should for the most part improve security
+by providing more consistent authorization of changes to accounts. The one
+potential risk is that attackers could clog the network with invalid
+transactions without paying nearly the corresponding amount of transaction
+fees. That risk is mitigated by the restrictions on transaction sets, but is
+something that must be kept in mind when future CAPs add additional operations.
+
+Any future operations or other transaction features added to the network that
+can render transactions invalid may need to revise the transaction set
+restrictions to prevent invalid transactions from being admissible.
## Test Cases
diff --git a/core/cap-0023.md b/core/cap-0023.md
index 835161836..bf77dd1f0 100644
--- a/core/cap-0023.md
+++ b/core/cap-0023.md
@@ -12,6 +12,7 @@ Protocol version: 14
```
## Simple Summary
+
Payments can fail depending on the state of the destination account. This
proposal introduces new operations that separate sending a payment from
receiving the payment. Then the success of sending depends only on the state of
@@ -19,15 +20,17 @@ the sending account and success of receiving depends only on the state of the
receiving account.
## Motivation
+
This proposal seeks to solve the following problem: it should be easy to send a
payment to an account that is not necessarily prepared to receive the payment.
There are several manifestations of this problem, the two most important being
1. it should be easy for protocols (like an implementation of payment channels)
-to pay out to participants, and
+ to pay out to participants, and
2. it should be easy for issuers to issue assets non-interactively.
### Goals Alignment
+
This proposal is aligned with several Stellar Network Goals, among them:
- The Stellar Network should facilitate simplicity and interoperability with
@@ -35,26 +38,30 @@ This proposal is aligned with several Stellar Network Goals, among them:
- The Stellar Network should enable cross-border payments, i.e. payments via
exchange of assets, throughout the globe, enabling users to make payments
between assets in a manner that is fast, cheap, and highly usable.
- - In support of this, the Stellar Network should enable asset issuance, but
- as a means of enabling cross-border payments.
+ - In support of this, the Stellar Network should enable asset issuance, but
+ as a means of enabling cross-border payments.
## Abstract
+
We introduce `ClaimableBalanceEntry` as a new type of `LedgerEntry` which
represents the transfer of ownership of some amount of an asset. The operations
`CreateClaimableBalanceOp` and `ClaimClaimableBalanceOp` allow the creation and
consumption of claimable balance entries, which permits temporal separation of
initiating and reciving a payment. Existing proposals, such as those for
deterministic accounts, can provide a similar mechanism but are not able to
-handle authorization restricted assets as easily. A specific and simple protocol
-that will be facilitated is the asset issuance protocol that issues an asset to
-a given account, regardless of whether it exists or is prepared to receive the
-funds.
+handle authorization restricted assets as easily. A specific and simple
+protocol that will be facilitated is the asset issuance protocol that issues an
+asset to a given account, regardless of whether it exists or is prepared to
+receive the funds.
## Specification
### XDR
-First, we introduce `ClaimableBalanceEntry`(Note that the XDR was updated in https://github.com/stellar/stellar-protocol/blob/master/core/cap-0033.md#claimablebalanceentry) and the corresponding changes for
-`LedgerEntryType` and `LedgerEntry`.
+
+First, we introduce `ClaimableBalanceEntry`(Note that the XDR was updated in
+https://github.com/stellar/stellar-protocol/blob/master/core/cap-0033.md#claimablebalanceentry)
+and the corresponding changes for `LedgerEntryType` and `LedgerEntry`.
+
```c++
enum LedgerEntryType
{
@@ -178,8 +185,10 @@ case CLAIMABLE_BALANCE:
Second, we introduce the new operations `CreateClaimableBalanceOp` and
`ClaimClaimableBalanceOp` as well as the corresponding changes to
-`OperationType` and `Operation`. We also introduce the type `OperationID`
-to represent the hash preimage for `ClaimableBalanceID`, along with a new `EnvelopeType`.
+`OperationType` and `Operation`. We also introduce the type `OperationID` to
+represent the hash preimage for `ClaimableBalanceID`, along with a new
+`EnvelopeType`.
+
```c++
enum OperationType
{
@@ -240,6 +249,7 @@ enum EnvelopeType
Third, we introduce the result types `CreateClaimableBalanceResult` and
`ClaimClaimableBalanceResult` as well as the corresponding changes to
`OperationResult`.
+
```c++
enum CreateClaimableBalanceResultCode
{
@@ -299,6 +309,7 @@ struct OperationResult
### Semantics
#### CreateClaimableBalanceOp
+
A `ClaimableBalanceEntry` can only be created by the `CreateClaimableBalanceOp`
operation. `CreateClaimableBalanceOp` is invalid with
`CREATE_CLAIMABLE_BALANCE_MALFORMED` if
@@ -310,39 +321,40 @@ operation. `CreateClaimableBalanceOp` is invalid with
- `claimants[i].predicate` has depth greater than 4 (for any `i`)
- `claimants[i].predicate` contains a predicate of type `CLAIM_PREDICATE_AND`
with `andPredicates.size() != 2`, `CLAIM_PREDICATE_OR` with
- `orPredicates.size() != 2`, or `CLAIM_PREDICATE_NOT` with a null `notPredicate` (for any `i`)
+ `orPredicates.size() != 2`, or `CLAIM_PREDICATE_NOT` with a null
+ `notPredicate` (for any `i`)
- `claimants[i].predicate` contains a predicate of type
`CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME` or
- `CLAIM_PREDICATE_BEFORE_RELATIVE_TIME` with
- `absBefore < 0` or `relBefore < 0` (for any `i`)
+ `CLAIM_PREDICATE_BEFORE_RELATIVE_TIME` with `absBefore < 0` or
+ `relBefore < 0` (for any `i`)
The behavior of `CreateClaimableBalanceOp` is as follows:
1. Fail with `CREATE_CLAIMABLE_BALANCE_LOW_RESERVE` if the `sourceAccount` does
not have at least `claimants.size() * baseReserve` available balance of
native asset
-2. Deduct `claimants.size() * baseReserve` of native asset from
- `sourceAccount`
-3. Fail with `CREATE_CLAIMABLE_BALANCE_NO_TRUST` if the `sourceAccount` does not
- have a trust line for `asset`
+2. Deduct `claimants.size() * baseReserve` of native asset from `sourceAccount`
+3. Fail with `CREATE_CLAIMABLE_BALANCE_NO_TRUST` if the `sourceAccount` does
+ not have a trust line for `asset`
4. Fail with `CREATE_CLAIMABLE_BALANCE_NOT_AUTHORIZED` if the `sourceAccount`
trust line for `asset` does not have `AUTHORIZED_FLAG` set
5. Fail with `CREATE_CLAIMABLE_BALANCE_UNDERFUNDED` if the `sourceAccount` does
not have at least `amount` available balance of `asset`
6. Deduct `amount` of `asset` from `sourceAccount`
7. Create a claimable balance entry with the following properties:
- - `balanceID` of type `CLAIMABLE_BALANCE_ID_TYPE_V0`.[^id-arithmatic]
- - `createdBy = sourceAccount` (of the transaction, not the operation)
- - `claimants` as specified, with the exception that
- - `CLAIM_PREDICATE_BEFORE_RELATIVE_TIME` will be converted to
- `CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME` by adding `relBefore` to
- the `closeTime` in the `LedgerHeader`. If this addition exceeds
- `INT64_MAX` then use `INT64_MAX`.
- - `asset` as specified in the operation
- - `amount` as specified in the operation
- - `reserve` equal to `claimants.size() * baseReserve`
-
-[^id-arithmatic]: ```javascript
+ - `balanceID` of type `CLAIMABLE_BALANCE_ID_TYPE_V0`.[^id-arithmatic]
+ - `createdBy = sourceAccount` (of the transaction, not the operation)
+ - `claimants` as specified, with the exception that
+ - `CLAIM_PREDICATE_BEFORE_RELATIVE_TIME` will be converted to
+ `CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME` by adding `relBefore` to the
+ `closeTime` in the `LedgerHeader`. If this addition exceeds `INT64_MAX`
+ then use `INT64_MAX`.
+ - `asset` as specified in the operation
+ - `amount` as specified in the operation
+ - `reserve` equal to `claimants.size() * baseReserve`
+
+[^id-arithmatic]:
+ ```javascript
HashIDPreimage = (
sourceAccount;
seqNum;
@@ -352,18 +364,21 @@ The behavior of `CreateClaimableBalanceOp` is as follows:
balanceID.v0() = OperationID // Hash
clientDisplayID = hex(HashIDPreimage)
```
+
- `HashIDPreimage`: a switch within the new `type` `ENVELOPE_TYPE_OP_ID`
- `OperationID`: hash of `ENVELOPE_TYPE_OP_ID` precomputation data
- `sourceAccount`: unmuxed public key of the transaction's source
- `seqNum`: the transaction source account's sequence number
- `opNum`: position index of this operation in the transaction
-8. Succeed with `CREATE_CLAIMABLE_BALANCE_SUCCESS` and the `balanceID` from the previous step.
+8. Succeed with `CREATE_CLAIMABLE_BALANCE_SUCCESS` and the `balanceID` from the
+ previous step.
`CreateClaimableBalanceOp` requires medium threshold because it can be used to
send funds.
#### ClaimClaimableBalanceOp
+
A `ClaimableBalanceEntry` can only be deleted by the `ClaimClaimableBalanceOp`
operation. `ClaimClaimableBalanceOp` cannot be invalid.
@@ -371,22 +386,23 @@ The behavior of `ClaimClaimableBalanceOp` is as follows:
1. Fail with `CLAIM_CLAIMABLE_BALANCE_DOES_NOT_EXIST` if there is no
`ClaimableBalanceEntry` matching `balanceID`.
-2. Fail with `CLAIM_CLAIMABLE_BALANCE_CANNOT_CLAIM` if there is no `i` such that
- `claimants[i].destination = sourceAccount` or if `claimants[i].predicate`
- is not satisfied
+2. Fail with `CLAIM_CLAIMABLE_BALANCE_CANNOT_CLAIM` if there is no `i` such
+ that `claimants[i].destination = sourceAccount` or if
+ `claimants[i].predicate` is not satisfied
3. Skip to step 7 if `createdBy` does not exist
4. Skip to step 7 if `createdBy` does not have at least `reserve` available
limit of native asset
5. Add `reserve` of native asset to `createdBy`
6. Skip to step 9
-7. Fail with `CLAIM_CLAIMABLE_BALANCE_LINE_FULL` if the `sourceAccount` does not
- have at least `reserve` available limit of native asset
+7. Fail with `CLAIM_CLAIMABLE_BALANCE_LINE_FULL` if the `sourceAccount` does
+ not have at least `reserve` available limit of native asset
8. Add `reserve` of native asset to `sourceAccount`
-9. Fail with `CLAIM_CLAIMABLE_BALANCE_NO_TRUST` if `asset` is not of type
- `ASSET_TYPE_NATIVE` and the `sourceAccount` trust line for `asset` does not exist
-10. Fail with `CLAIM_CLAIMABLE_BALANCE_NOT_AUTHORIZED` if `asset` is not of type
+9. Fail with `CLAIM_CLAIMABLE_BALANCE_NO_TRUST` if `asset` is not of type
`ASSET_TYPE_NATIVE` and the `sourceAccount` trust line for `asset` does not
- have the `AUTHORIZED_FLAG` flag set
+ exist
+10. Fail with `CLAIM_CLAIMABLE_BALANCE_NOT_AUTHORIZED` if `asset` is not of
+ type `ASSET_TYPE_NATIVE` and the `sourceAccount` trust line for `asset`
+ does not have the `AUTHORIZED_FLAG` flag set
11. Fail with `CLAIM_CLAIMABLE_BALANCE_LINE_FULL` if the `sourceAccount` does
not have at least `amount` available limit of `asset`
12. Add `amount` of `asset` to the `sourceAccount`
@@ -399,6 +415,7 @@ transfer funds from a `ClaimableBalanceEntry` to a trust line.
## Design Rationale
### ClaimableBalanceEntry is not a sub-entry
+
Each `ClaimableBalanceEntry` exists as an independent entity on the ledger. It
is clear that a `ClaimableBalanceEntry` cannot be a sub-entry of any its
`claimants`, because it is a security risk for accounts to be able to add
@@ -408,10 +425,11 @@ them? There are two main benefits of this design:
1. Sending accounts are not limited in the number of claimable balance entries
they can create
-2. Sending accounts can be merged even if they created claimable balance entries
- that have not yet been claimed
+2. Sending accounts can be merged even if they created claimable balance
+ entries that have not yet been claimed
### ClaimableBalanceEntry claimants are accounts
+
For each `ClaimableBalanceEntry`, `claimants` contains a finite and immutable
list of accounts that could potentially claim the `ClaimableBalanceEntry`. Even
if the conditions are satisfiable (which is not guaranteed), it is still
@@ -428,9 +446,9 @@ would fail. This would allow the appropriate party to claim the
`ClaimableBalanceEntry` into any account that they control. But this would also
make it considerably easier to circumvent authorization restrictions on assets.
For instance, an authorized account could create a `ClaimableBalanceEntry` with
-a recipient public key whose private key is known only to some other party. That
-party would then control the funds in the `ClaimableBalanceEntry` and could
-claim them into any account that is authorized. A similar scheme could be
+a recipient public key whose private key is known only to some other party.
+That party would then control the funds in the `ClaimableBalanceEntry` and
+could claim them into any account that is authorized. A similar scheme could be
executed today by changing the signers on an account, but this would only be
possible once per authorized account and cannot separate out a fraction of the
funds. In summary, an approach that could allow `ClaimableBalanceEntry` to be
@@ -438,35 +456,42 @@ claimable into any account would significantly weaken the strength of
authorization restrictions.
### Should it be possible to increase the amount of a ClaimableBalanceEntry?
+
One issue which has been discussed during the development of this proposal is
the absence of a mechanism to increase the `amount` of a
`ClaimableBalanceEntry`. The specific scenario which would warrant this
-functionality is when a single account sends many identical payments to a single
-account that is not prepared to receive them and does not claim any of the
-payments. However, this case is sufficiently specific that we recommend pursuing
-it in a separate proposal once this proposal has been implemented. Delaying this
-feature presents minimal additional difficulty because `ClaimableBalanceEntry`
-has an extension point.
+functionality is when a single account sends many identical payments to a
+single account that is not prepared to receive them and does not claim any of
+the payments. However, this case is sufficiently specific that we recommend
+pursuing it in a separate proposal once this proposal has been implemented.
+Delaying this feature presents minimal additional difficulty because
+`ClaimableBalanceEntry` has an extension point.
-This issue has also been slightly mitigated relative to earlier versions of this
-proposal because `ClaimableBalanceEntry` now returns the reserve to the sending
-account, whenever possible.
+This issue has also been slightly mitigated relative to earlier versions of
+this proposal because `ClaimableBalanceEntry` now returns the reserve to the
+sending account, whenever possible.
### Memo
+
Everything proposed in this document takes the same stance as existing features
-of the protocol with regard to memo: memo is a property of a transaction, not of
-an operation or a ledger entry.
+of the protocol with regard to memo: memo is a property of a transaction, not
+of an operation or a ledger entry.
## Backwards Incompatibilities
+
All downstream systems will need updated XDR in order to recognize the new
operations and ledger entries.
## Security Concerns
-This proposal will slightly reduce the efficacy of base reserve changes, because
-a `ClaimableBalanceEntry` that has insufficient reserve is still usable.
+
+This proposal will slightly reduce the efficacy of base reserve changes,
+because a `ClaimableBalanceEntry` that has insufficient reserve is still
+usable.
## Test Cases
+
None yet.
## Implementation
+
https://github.com/stellar/stellar-core/pull/2591
diff --git a/core/cap-0024.md b/core/cap-0024.md
index 79f6e803c..68fbac675 100644
--- a/core/cap-0024.md
+++ b/core/cap-0024.md
@@ -11,50 +11,56 @@ Protocol version: 12
```
## Simple Summary
+
Right now `PathPaymentOp` lets you specify how much the recipient will receive
while the amount sent can vary. There are use cases where you really want to
specify the amount send while the amount received can vary.
## Motivation
+
The motivation to add `PathPaymentStrictSendOp` is analogous to that for adding
-`ManageBuyOfferOp`: many financial institutions have an obligation to faithfully
-carry out customer orders. As a concrete example, suppose that an individual
-whose native currency is X is selling a house. A potential buyer whose native
-currency is Y approaches the seller. They agree to a contract where the buyer
-must deposit a certain amount of currency X with an escrow agent chosen by the
-seller. The escrowed amount will contribute to the purchase price if the
-transaction is completed, will be returned to the buyer (in currency Y) in the
-event that seller defaults, and will be credited to the seller as damages in the
-event that buyer defaults. The buyer can easily use `PathPaymentOp` to deposit
-the agreed amount of currency X with the escrow agent. If the escrow agent needs
-to transfer the escrowed funds to the seller, this can be done with `PaymentOp`.
-But if the escrow agent needs to transfer the escrowed funds to the buyer (in
-currency Y) then `PathPaymentOp` will not be effective because the escrow agent
-would be required to guess the `destAmount` in order to send the entire escrowed
-amount. In general, there is no value for the `destAmount` that guarantees that
-the entire `maxSend` will be sent, so the escrow agent will not be able to meet
-his obligations.
+`ManageBuyOfferOp`: many financial institutions have an obligation to
+faithfully carry out customer orders. As a concrete example, suppose that an
+individual whose native currency is X is selling a house. A potential buyer
+whose native currency is Y approaches the seller. They agree to a contract
+where the buyer must deposit a certain amount of currency X with an escrow
+agent chosen by the seller. The escrowed amount will contribute to the purchase
+price if the transaction is completed, will be returned to the buyer (in
+currency Y) in the event that seller defaults, and will be credited to the
+seller as damages in the event that buyer defaults. The buyer can easily use
+`PathPaymentOp` to deposit the agreed amount of currency X with the escrow
+agent. If the escrow agent needs to transfer the escrowed funds to the seller,
+this can be done with `PaymentOp`. But if the escrow agent needs to transfer
+the escrowed funds to the buyer (in currency Y) then `PathPaymentOp` will not
+be effective because the escrow agent would be required to guess the
+`destAmount` in order to send the entire escrowed amount. In general, there is
+no value for the `destAmount` that guarantees that the entire `maxSend` will be
+sent, so the escrow agent will not be able to meet his obligations.
### Goals Alignment
+
This proposal is aligned with the following Stellar Network Goal: The Stellar
Network should enable cross-border payments, i.e. payments via exchange of
-assets, throughout the globe, enabling users to make payments between assets
-in a manner that is fast, cheap, and highly usable.
+assets, throughout the globe, enabling users to make payments between assets in
+a manner that is fast, cheap, and highly usable.
## Abstract
+
This proposal introduces a new operation `PathPaymentStrictSendOp` that allows
-a form of path payment with a strict equality constraint on the send amount
-and an inequality constraint on the destination amount. The existing
-`PathPayment` operation is renamed to `PathPaymentStrictReceiveOp` in order to
-reflect that it allows a form of path payment with a strict equality constraint
-on the destination amount and an inequality constraint on the send amount.
+a form of path payment with a strict equality constraint on the send amount and
+an inequality constraint on the destination amount. The existing `PathPayment`
+operation is renamed to `PathPaymentStrictReceiveOp` in order to reflect that
+it allows a form of path payment with a strict equality constraint on the
+destination amount and an inequality constraint on the send amount.
## Specification
### XDR
+
We first introduce the XDR for operations `PathPaymentStrictReceiveOp` and
`PathPaymentStrictSendOp`, as well as the corresponding changes to
`OperationType` and `Operation`.
+
```c++
enum OperationType
{
@@ -125,6 +131,7 @@ struct Operation
We next introduce the result types `PathPaymentStrictReceiveResult` and
`PathPaymentStrictSendResult`, as well as the corresponding changes to
`OperationResult`.
+
```c++
enum PathPaymentStrictReceiveResultCode
{
@@ -213,78 +220,87 @@ default:
```
### Semantics
+
`PathPaymentStrictSendOp` returns
- `PATH_PAYMENT_STRICT_SEND_NO_DESTINATION` if issuer checks are not bypassed
-and `destAccount` does not exist
-- `PATH_PAYMENT_STRICT_SEND_NO_ISSUER` if issuer checks are not bypassed and the
-issuer of `sendAsset` does not exist
+ and `destAccount` does not exist
+- `PATH_PAYMENT_STRICT_SEND_NO_ISSUER` if issuer checks are not bypassed and
+ the issuer of `sendAsset` does not exist
- `PATH_PAYMENT_STRICT_SEND_SRC_NO_TRUST` if `sendAsset` is not native and
-`sourceAccount` does not own a trust line for `sendAsset`
-- `PATH_PAYMENT_STRICT_SEND_SRC_NOT_AUTHORIZED` if `sendAsset` is not native and
-the trust line for `sendAsset` owned by `sourceAccount` is not authorized
-- `PATH_PAYMENT_STRICT_SEND_UNDERFUNDED` if `sourceAccount` does not have sufficient
-available balance of `sendAsset` after accounting for selling liabilities and
-base reserve (if `sendAsset` is native)
-- For each `(S, R)` in `(sendAsset, path[0]), (path[0], path[1]), ...,
-(path[n-1], path[n]), (path[n], destAsset)` with `S != R`
- - `PATH_PAYMENT_STRICT_SEND_NO_ISSUER` if `R` is not native and the issuer
-of `R` does not exist
- - `PATH_PAYMENT_STRICT_SEND_OFFER_CROSS_SELF` if an offer owned by
-`sourceAccount` was encountered between `S` and `R` before sending the required
-amount of `S`
- - `PATH_PAYMENT_STRICT_SEND_TOO_FEW_OFFERS` if less than the required amount
-of `S` was sent before exhausting all offers between `S` and `R`
-- `PATH_PAYMENT_STRICT_SEND_UNDER_DESTMIN` if the amount sent to the destination
-does not exceed `destMin`
-- `PATH_PAYMENT_STRICT_SEND_NO_ISSUER` if issuer checks are not bypassed and the
-issuer of `destAsset` does not exist
-- `PATH_PAYMENT_STRICT_SEND_NO_TRUST` if `destAsset` is not native and `destAccount`
-does not own a trust line for `destAsset`
-- `PATH_PAYMENT_STRICT_SEND_NOT_AUTHORIZED` if `destAsset` is not native and the
-trust line for `destAsset` owned by `destAccount` is not authorized
-- `PATH_PAYMENT_STRICT_SEND_LINE_FULL` if `destAccount` does not have sufficient
-available limit of `destAsset` after accounting for buying liabilities
+ `sourceAccount` does not own a trust line for `sendAsset`
+- `PATH_PAYMENT_STRICT_SEND_SRC_NOT_AUTHORIZED` if `sendAsset` is not native
+ and the trust line for `sendAsset` owned by `sourceAccount` is not authorized
+- `PATH_PAYMENT_STRICT_SEND_UNDERFUNDED` if `sourceAccount` does not have
+ sufficient available balance of `sendAsset` after accounting for selling
+ liabilities and base reserve (if `sendAsset` is native)
+- For each `(S, R)` in
+ `(sendAsset, path[0]), (path[0], path[1]), ..., (path[n-1], path[n]), (path[n], destAsset)`
+ with `S != R` - `PATH_PAYMENT_STRICT_SEND_NO_ISSUER` if `R` is not native and
+ the issuer of `R` does not exist -
+ `PATH_PAYMENT_STRICT_SEND_OFFER_CROSS_SELF` if an offer owned by
+ `sourceAccount` was encountered between `S` and `R` before sending the
+ required amount of `S` - `PATH_PAYMENT_STRICT_SEND_TOO_FEW_OFFERS` if less
+ than the required amount of `S` was sent before exhausting all offers between
+ `S` and `R`
+- `PATH_PAYMENT_STRICT_SEND_UNDER_DESTMIN` if the amount sent to the
+ destination does not exceed `destMin`
+- `PATH_PAYMENT_STRICT_SEND_NO_ISSUER` if issuer checks are not bypassed and
+ the issuer of `destAsset` does not exist
+- `PATH_PAYMENT_STRICT_SEND_NO_TRUST` if `destAsset` is not native and
+ `destAccount` does not own a trust line for `destAsset`
+- `PATH_PAYMENT_STRICT_SEND_NOT_AUTHORIZED` if `destAsset` is not native and
+ the trust line for `destAsset` owned by `destAccount` is not authorized
+- `PATH_PAYMENT_STRICT_SEND_LINE_FULL` if `destAccount` does not have
+ sufficient available limit of `destAsset` after accounting for buying
+ liabilities
- `PATH_PAYMENT_STRICT_SEND_SUCCESS` otherwise
-If `PathPaymentStrictSendOp` succeeds then the source account is debited exactly
-`sendAmount` of `sendAsset` and the `destAccount` is credited at least `destMin`
-of `destAsset`. In order to achieve this goal, it was required to introduce a
-new rounding mode (see CAP-0004 for context). We provide an informal description
-of what occurs and why; for a detailed proof please refer to the implementation.
-This rounding mode may only differ from the typical rounding behavior if the offer
-is larger than the remnants of the conversion (this can only occur for the last
-offer crossed in each step of the path). In this case, the remaining quantity of
-`S` will be converted into the maximum amount of `R` that is possible (given
-limits on the conversion and the offer) such that the price remains favorable to
-the owner of the offer. This is the correct behavior because
-
-- The entire amount available to be sent is in fact sent if it is possible to do
-so
+If `PathPaymentStrictSendOp` succeeds then the source account is debited
+exactly `sendAmount` of `sendAsset` and the `destAccount` is credited at least
+`destMin` of `destAsset`. In order to achieve this goal, it was required to
+introduce a new rounding mode (see CAP-0004 for context). We provide an
+informal description of what occurs and why; for a detailed proof please refer
+to the implementation. This rounding mode may only differ from the typical
+rounding behavior if the offer is larger than the remnants of the conversion
+(this can only occur for the last offer crossed in each step of the path). In
+this case, the remaining quantity of `S` will be converted into the maximum
+amount of `R` that is possible (given limits on the conversion and the offer)
+such that the price remains favorable to the owner of the offer. This is the
+correct behavior because
+
+- The entire amount available to be sent is in fact sent if it is possible to
+ do so
- The received amount is the maximum possible while continuing to meet the
-requirement that it must favor the owner of the offer
+ requirement that it must favor the owner of the offer
## Design Rationale
-Every design decision concerning `PathPaymentStrictSendOp` was decided to make the
-operation "dual" to `PathPaymentStrictReceiveOp` in as many ways as possible. With
-that context in mind, the central design decision for `PathPaymentStrictSendOp`
-was enforcing the requirement that it must send exactly the specified `sendAmount`.
-This decision has the following advantages, analogous to the advantages of having
-`PathPaymentStrictReceiveOp` receive exactly the specified `destAmount`
+
+Every design decision concerning `PathPaymentStrictSendOp` was decided to make
+the operation "dual" to `PathPaymentStrictReceiveOp` in as many ways as
+possible. With that context in mind, the central design decision for
+`PathPaymentStrictSendOp` was enforcing the requirement that it must send
+exactly the specified `sendAmount`. This decision has the following advantages,
+analogous to the advantages of having `PathPaymentStrictReceiveOp` receive
+exactly the specified `destAmount`
- The semantics of the operation are simple to explain
- The state of the `sourceAccount` and `destAccount` is predictable after the
-operation executes
+ operation executes
## Backwards Incompatibilities
+
All downstream systems will need updated XDR in order to recognize the new
operation.
## Security Concerns
+
None yet.
## Test Cases
+
None yet.
## Implementation
+
None yet.
diff --git a/core/cap-0025.md b/core/cap-0025.md
index 04c1e9dc0..9543fa8f3 100644
--- a/core/cap-0025.md
+++ b/core/cap-0025.md
@@ -12,114 +12,133 @@ Protocol version: 12
## Simple Summary
-This proposal makes simplifications to the bucket list data structure in stellar-core, such that the
-new bucket merge logic results in different buckets, than ones produced by previous versions.
+This proposal makes simplifications to the bucket list data structure in
+stellar-core, such that the new bucket merge logic results in different
+buckets, than ones produced by previous versions.
-The main simplification is removal of `shadows`, which are older versions of the bucket list.
-Shadows are used to avoid storing all updates to the same ledger entry, and keeping only the most
-recent one instead.
+The main simplification is removal of `shadows`, which are older versions of
+the bucket list. Shadows are used to avoid storing all updates to the same
+ledger entry, and keeping only the most recent one instead.
## Abstract
-Bucket list is a log-structured merge (LSM) tree used to store the complete state of stellar-core.
-As new entries are added, bucket list merges its existing buckets to produce new buckets, which
-incorporate the new data. Additionally, bucket list stores its previous versions, called shadows.
-The purpose of shadows is to handle high volume of changes to the same ledger entries (i.e., entries
-of `LIVEENTRY` type). That is, if a `LIVEENTRY` entry exists at a higher (younger) level, another
-`LIVEENTRY` at a lower (older) level is elided during a merge. This proposal aims to remove shadows,
-by showing that they:
+Bucket list is a log-structured merge (LSM) tree used to store the complete
+state of stellar-core. As new entries are added, bucket list merges its
+existing buckets to produce new buckets, which incorporate the new data.
+Additionally, bucket list stores its previous versions, called shadows. The
+purpose of shadows is to handle high volume of changes to the same ledger
+entries (i.e., entries of `LIVEENTRY` type). That is, if a `LIVEENTRY` entry
+exists at a higher (younger) level, another `LIVEENTRY` at a lower (older)
+level is elided during a merge. This proposal aims to remove shadows, by
+showing that they:
1. Have a high storage cost.
2. Significantly increase bucket merge latency.
-3. Are only valuable for a niche range of scenarios. Additionally, their usefulness degrades
-at lower levels.
+3. Are only valuable for a niche range of scenarios. Additionally, their
+ usefulness degrades at lower levels.
-The changes proposed will not affect bucket semantics. It will merely change how buckets are merged,
-producing different outputs compared to previous protocol versions.
+The changes proposed will not affect bucket semantics. It will merely change
+how buckets are merged, producing different outputs compared to previous
+protocol versions.
## Motivation
-This change was motivated by the performance analysis done on shadows. In the current protocol, for
-level i, shadows consist of all levels above level i - 1. Every level, starting with level 2, stores
-such a list of shadows. During bucket merge, aside from reading the contents of the two buckets
-being merged, all shadows are iterated through to check whether an elision of a particular ledger
-entry is needed. This slows down the merge latency significantly. The assessment of current public
-network traffic shows that merge times for lowest-level buckets complete approximately 3 times
-faster without shadows than with shadows.
-
-Additionally, shadows create a significant storage burden. On one hand, they prevent constantly
-churning ledger entries from propagating all the way down to lowest levels, avoiding redundant
-entries. On the other hand, the data from the public network suggests that the overhead of such
-spilling is not particularly significant. For example, the resulting buckets without shadows were at
-most twice bigger than ones that used shadows. However, the total size of shadows stored for lower
-levels still outweighed the overhead of larger resulting buckets.
-
-Lastly, there are a few very specific cases when a particular level can be fully shadowed. That is,
-for shadows to be the most useful at level i, it must be assumed that nothing above level i is
-shadowed. This also means that storing shadows for all previous levels is wasteful, since nothing is
-shadowed. At lower levels, shadows are less useful, since the bucket list incurs the overhead of
-unnecessary storage of shadows at higher levels. Moreover, the argument is based on an assumption
-that all the network traffic follows a specific, unlikely pattern: it updates a certain number of
-same ledger entries continuously.
+This change was motivated by the performance analysis done on shadows. In the
+current protocol, for level i, shadows consist of all levels above level i - 1.
+Every level, starting with level 2, stores such a list of shadows. During
+bucket merge, aside from reading the contents of the two buckets being merged,
+all shadows are iterated through to check whether an elision of a particular
+ledger entry is needed. This slows down the merge latency significantly. The
+assessment of current public network traffic shows that merge times for
+lowest-level buckets complete approximately 3 times faster without shadows than
+with shadows.
+
+Additionally, shadows create a significant storage burden. On one hand, they
+prevent constantly churning ledger entries from propagating all the way down to
+lowest levels, avoiding redundant entries. On the other hand, the data from the
+public network suggests that the overhead of such spilling is not particularly
+significant. For example, the resulting buckets without shadows were at most
+twice bigger than ones that used shadows. However, the total size of shadows
+stored for lower levels still outweighed the overhead of larger resulting
+buckets.
+
+Lastly, there are a few very specific cases when a particular level can be
+fully shadowed. That is, for shadows to be the most useful at level i, it must
+be assumed that nothing above level i is shadowed. This also means that storing
+shadows for all previous levels is wasteful, since nothing is shadowed. At
+lower levels, shadows are less useful, since the bucket list incurs the
+overhead of unnecessary storage of shadows at higher levels. Moreover, the
+argument is based on an assumption that all the network traffic follows a
+specific, unlikely pattern: it updates a certain number of same ledger entries
+continuously.
## Specification
-The ledger protocol number will be increased, to version 12. This is a breaking change. The bucket
-list will produce buckets differing from buckets of previous protocols. This difference will affect
-the bucket list hash, which is used for consensus.
+The ledger protocol number will be increased, to version 12. This is a breaking
+change. The bucket list will produce buckets differing from buckets of previous
+protocols. This difference will affect the bucket list hash, which is used for
+consensus.
### Bucket Content
-Bucket semantics remain unchanged. The only difference is potentially more frequent
-occurrences of `LIVEENTRY` types of bucket entries across multiple buckets for the same ledger entry.
+Bucket semantics remain unchanged. The only difference is potentially more
+frequent occurrences of `LIVEENTRY` types of bucket entries across multiple
+buckets for the same ledger entry.
### Merge Algorithm Changes
-Under protocol 12, new style bucket merges will not consider shadows, so less bucket entries will be
-elided. In order to perform a new style merge, nodes will compare versions of the inputs.
-Specifically, a new style merge will be performed if the input snap is of ledger version 12 or
-higher. This implies that nodes will keep performing old style merges with shadows until input snaps
+Under protocol 12, new style bucket merges will not consider shadows, so less
+bucket entries will be elided. In order to perform a new style merge, nodes
+will compare versions of the inputs. Specifically, a new style merge will be
+performed if the input snap is of ledger version 12 or higher. This implies
+that nodes will keep performing old style merges with shadows until input snaps
are of the right version.
### Protocol Upgrade Changes
-To avoid delays while adding the new data to the bucket list, old buckets are merged in the
-background as early as possible. Some of those merges are completed long before they are needed.
-After the protocol 12 upgrade, all existing in-progress or completed (but not yet needed) merges
-will remain relevant. The implementation will use these merges at appropriate ledgers, then,
-depending on the input versions for each level, will start old style or new style merges.
-Eventually, all bucket list levels will be performing new style merges. Because new style merges are
-started gradually, letting merges of older versions finish, nodes may upgrade at any ledger without
-delays.
+To avoid delays while adding the new data to the bucket list, old buckets are
+merged in the background as early as possible. Some of those merges are
+completed long before they are needed. After the protocol 12 upgrade, all
+existing in-progress or completed (but not yet needed) merges will remain
+relevant. The implementation will use these merges at appropriate ledgers,
+then, depending on the input versions for each level, will start old style or
+new style merges. Eventually, all bucket list levels will be performing new
+style merges. Because new style merges are started gradually, letting merges of
+older versions finish, nodes may upgrade at any ledger without delays.
### History Archive Changes
-Starting protocol 12, nodes will not publish inputs or outputs to new style merges. With shadows
-removed, inputs to bucket merges are completely reconstructible from the bucket list itself. Note,
-inputs to merges remain stable and derivable from the current bucket list until the future bucket is
-promoted into the bucket list. Due to this property, it is possible to re-start merges at any ledger
-between the initial merge trigger and the completed merge promotion.
+Starting protocol 12, nodes will not publish inputs or outputs to new style
+merges. With shadows removed, inputs to bucket merges are completely
+reconstructible from the bucket list itself. Note, inputs to merges remain
+stable and derivable from the current bucket list until the future bucket is
+promoted into the bucket list. Due to this property, it is possible to re-start
+merges at any ledger between the initial merge trigger and the completed merge
+promotion.
## Design Rationale
-The initial bucket list design did not have a comprehensive analysis of shadows,
-showing that it is not a general optimization, but rather an improvement to a very specific ledger
-traffic scenario.
+The initial bucket list design did not have a comprehensive analysis of
+shadows, showing that it is not a general optimization, but rather an
+improvement to a very specific ledger traffic scenario.
-Additionally, other known implementations of LSM trees suggest that per-level compaction is
-sufficient. For example, LevelDB gives an overview of its compactions:
+Additionally, other known implementations of LSM trees suggest that per-level
+compaction is sufficient. For example, LevelDB gives an overview of its
+compactions:
https://github.com/google/leveldb/blob/master/doc/impl.md#compactions
## Backwards Incompatibilities
-This is a breaking change, even though there are no semantic changes to the bucket list. The updated
-merge mechanism can be regarded as an “implementation detail”. Because of that, older versions will
-not error when processing new buckets. However, they will produce a different ledger hash,
-preventing them from reaching consensus, and making progress.
+This is a breaking change, even though there are no semantic changes to the
+bucket list. The updated merge mechanism can be regarded as an “implementation
+detail”. Because of that, older versions will not error when processing new
+buckets. However, they will produce a different ledger hash, preventing them
+from reaching consensus, and making progress.
-The new versions of stellar-core will be able to process old and new buckets, and pick the
-appropriate merge technique based on the version number in the ledger header.
+The new versions of stellar-core will be able to process old and new buckets,
+and pick the appropriate merge technique based on the version number in the
+ledger header.
## Test Cases
diff --git a/core/cap-0026.md b/core/cap-0026.md
index a02e332ef..1ae7d1433 100644
--- a/core/cap-0026.md
+++ b/core/cap-0026.md
@@ -17,40 +17,43 @@ This CAP disables the inflation mechanism.
## Motivation
-Inflation mechanism was originally planned as a simple way for users to support important
-ecosystem projects and keep the overall XLM supply slightly inflationary.
-
-- Currently, it doesn’t serve its original purpose, as users mostly prefer to claim the
-inflation payouts through the inflation pools instead of sponsoring ecosystem projects.
-As a result, inflation micropayouts from XLM pools generate a significant validators
-load and clog the network.
-- Payouts get more and more resource consuming for validators over time due to the total
-lumens circulation supply increase.
-- Once the validators vote for the significant base fee increase, such payouts became much
-less profitable for most lumen holders, and XLM pools will be forced to raise the minimum
-required account balance to more than 1000XLM to cover transaction fee loses.
-- Inflation payouts may lead to additional complications in some edge-cases when
-programming smart contracts.
-- Inflation deprecation opens new possibilities for the more effective targeted reward
-distributions that require complex logic, like stimulating DEX liquidity providers.
+Inflation mechanism was originally planned as a simple way for users to support
+important ecosystem projects and keep the overall XLM supply slightly
+inflationary.
+
+- Currently, it doesn’t serve its original purpose, as users mostly prefer to
+ claim the inflation payouts through the inflation pools instead of sponsoring
+ ecosystem projects. As a result, inflation micropayouts from XLM pools
+ generate a significant validators load and clog the network.
+- Payouts get more and more resource consuming for validators over time due to
+ the total lumens circulation supply increase.
+- Once the validators vote for the significant base fee increase, such payouts
+ became much less profitable for most lumen holders, and XLM pools will be
+ forced to raise the minimum required account balance to more than 1000XLM to
+ cover transaction fee loses.
+- Inflation payouts may lead to additional complications in some edge-cases
+ when programming smart contracts.
+- Inflation deprecation opens new possibilities for the more effective targeted
+ reward distributions that require complex logic, like stimulating DEX
+ liquidity providers.
## Abstract
-Turning off inflation requires a minor change in Stellar Core, namely `Inflation`
-operation behavior. At the same time, it can be implemented without XDR changes and
-breaking protocol changes.
+Turning off inflation requires a minor change in Stellar Core, namely
+`Inflation` operation behavior. At the same time, it can be implemented without
+XDR changes and breaking protocol changes.
## Specification
-This proposal requires a single Core behavior change.
-Transaction containing `Inflation` operation should always return
-`opNOT_SUPPORTED` result code.
+This proposal requires a single Core behavior change. Transaction containing
+`Inflation` operation should always return `opNOT_SUPPORTED` result code.
## Design Rationale
-The proposed approach does not require breaking protocol changes and allows turning on
-the inflation mechanism in the future if needed. Due to the simplicity of proposed changes,
-the implementation potentially requires minimum efforts.
+The proposed approach does not require breaking protocol changes and allows
+turning on the inflation mechanism in the future if needed. Due to the
+simplicity of proposed changes, the implementation potentially requires minimum
+efforts.
## Security Concerns
@@ -58,4 +61,4 @@ None.
## Backwards Incompatibilities
-This CAP contains no breaking changes and is fully backward compatible.
\ No newline at end of file
+This CAP contains no breaking changes and is fully backward compatible.
diff --git a/core/cap-0027.md b/core/cap-0027.md
index b382699e5..a32c92cc9 100644
--- a/core/cap-0027.md
+++ b/core/cap-0027.md
@@ -12,52 +12,49 @@ Protocol version: 13
## Simple Summary
-A new type of Account ID includes a 64-bit memo ID. This memo ID has
-no effect on the semantics of operations or their authorization, but
-it facilitates multiplexing a single account across multiple users.
+A new type of Account ID includes a 64-bit memo ID. This memo ID has no effect
+on the semantics of operations or their authorization, but it facilitates
+multiplexing a single account across multiple users.
## Motivation
-A common pattern in the Stellar ecosystem is for services to share a
-single Stellar account ID across many users, relying on the memo ID to
-disambiguate incoming payments.
+A common pattern in the Stellar ecosystem is for services to share a single
+Stellar account ID across many users, relying on the memo ID to disambiguate
+incoming payments.
Experience shows that people frequently forget to include the memo ID,
-resulting in either lost funds or onerous support calls. Moreover,
-memo IDs are per transaction, not per occurrence of an account ID,
-which imposes restrictions on the use of multiplexed accounts. For
-example, it is not possible to include multiple payments to different
-multiplexed accounts in the same transaction. Similarly, it is not
-possible to refund payments from a multiplexed account ID, as the
-transaction's memo ID by convention describes only the destination,
-not the source of funds.
-
-By adding an optional memo ID to the account ID type, we make
-multiplexed accounts a first-class abstraction that can be used
-anywhere a normal account ID can be used.
+resulting in either lost funds or onerous support calls. Moreover, memo IDs are
+per transaction, not per occurrence of an account ID, which imposes
+restrictions on the use of multiplexed accounts. For example, it is not
+possible to include multiple payments to different multiplexed accounts in the
+same transaction. Similarly, it is not possible to refund payments from a
+multiplexed account ID, as the transaction's memo ID by convention describes
+only the destination, not the source of funds.
+
+By adding an optional memo ID to the account ID type, we make multiplexed
+accounts a first-class abstraction that can be used anywhere a normal account
+ID can be used.
### Goals Alignment
-First-class multiplexed accounts help scalability by eliminating an
-incentive for users to create many accounts when virtual accounts
-would suffice. They also significantly improve usability by
-addressing the pain point of support costs for users who forget memo
-IDs.
+First-class multiplexed accounts help scalability by eliminating an incentive
+for users to create many accounts when virtual accounts would suffice. They
+also significantly improve usability by addressing the pain point of support
+costs for users who forget memo IDs.
## Abstract
-A new type, `MuxedAccount`, replaces `AccountID` in many places. A
-`MuxedAccount` can contain either a plain Ed25519 public key, or a
-public key and a 64-bit subaccount ID. The subaccount ID has no
-effect on the semantics of transactions, but can be used by
-higher-layer software to multiplex a single stellar account among
-multiple users.
+A new type, `MuxedAccount`, replaces `AccountID` in many places. A
+`MuxedAccount` can contain either a plain Ed25519 public key, or a public key
+and a 64-bit subaccount ID. The subaccount ID has no effect on the semantics of
+transactions, but can be used by higher-layer software to multiplex a single
+stellar account among multiple users.
## Specification
The multiplexed account type is represented by a new XDR union:
-~~~ {.c}
+```{.c}
enum CryptoKeyType
{
KEY_TYPE_ED25519 = 0,
@@ -76,80 +73,74 @@ union MuxedAccount switch (CryptoKeyType type) {
uint256 ed25519;
} med25519;
};
-~~~
-
-The following fields, which were previously an `AccountID` or
-`AccountID*`, are now a `MuxedAccount` or `MuxedAccount*`
-(respectively):
-
-* `PaymentOp::destination`
-* `PathPaymentStrictReceiveOp::destination`
-* `PathPaymentStrictSendOp::destination`
-* `Operation::sourceAccount`
-* `Operation::destination` (for `ACCOUNT_MERGE`)
-* `Transaction::sourceAccount`
-* `FeeBumpTransaction::feeSource`
-
-Note, however, that this must not be implemented before CAP-0015 or
-CAP-0019 (which updates the transaction format), as these other CAPs
-depend on all existing transactions starting with 4 zero bytes (which
-will no longer be the case when the transaction's `sourceAccount` is a
-`MuxedAccount`).
+```
+
+The following fields, which were previously an `AccountID` or `AccountID*`, are
+now a `MuxedAccount` or `MuxedAccount*` (respectively):
+
+- `PaymentOp::destination`
+- `PathPaymentStrictReceiveOp::destination`
+- `PathPaymentStrictSendOp::destination`
+- `Operation::sourceAccount`
+- `Operation::destination` (for `ACCOUNT_MERGE`)
+- `Transaction::sourceAccount`
+- `FeeBumpTransaction::feeSource`
+
+Note, however, that this must not be implemented before CAP-0015 or CAP-0019
+(which updates the transaction format), as these other CAPs depend on all
+existing transactions starting with 4 zero bytes (which will no longer be the
+case when the transaction's `sourceAccount` is a `MuxedAccount`).
## Design Rationale
-A previous ecosystem-only proposal had too many limitations, including
-the inability to send payments to several virtual accounts, the
-inability to specify virtual accounts as a source, and increased
-cognitive load for developers. The CAP approach seems cleaner, with
-very little added complexity in the server.
+A previous ecosystem-only proposal had too many limitations, including the
+inability to send payments to several virtual accounts, the inability to
+specify virtual accounts as a source, and increased cognitive load for
+developers. The CAP approach seems cleaner, with very little added complexity
+in the server.
-The particular set of fields promoted to `MuxedAccount` were chosen to
-avoid any changes to the database. Hence, assets and offers are still
-associated with an `AccountID` rather than a `MuxedAccount`.
+The particular set of fields promoted to `MuxedAccount` were chosen to avoid
+any changes to the database. Hence, assets and offers are still associated with
+an `AccountID` rather than a `MuxedAccount`.
-As for the strkey update, there were 3 unused bits at the bottom of
-each version byte that do not affect the first character, so we
-decided to reserve these to facilitate future crypto agility. A
-different letter was chosen for multiplexed accounts because these are
-not completely interchangable with traditional `AccountID` values.
-(For instance, a multiplexed account cannot issue assets, only the
-base account can do so.)
+As for the strkey update, there were 3 unused bits at the bottom of each
+version byte that do not affect the first character, so we decided to reserve
+these to facilitate future crypto agility. A different letter was chosen for
+multiplexed accounts because these are not completely interchangable with
+traditional `AccountID` values. (For instance, a multiplexed account cannot
+issue assets, only the base account can do so.)
## Backwards Incompatibilities
-All existing transactions will continue to be valid, but transactions
-with a `MuxedAccount` will not be parsable by old software. Hence,
-the best time to roll out this change will be at the same time as
-CAP-0015. Software that converts new-style transactions to old can
-also strip out the identifiers from `MuxedAccount` structures.
+All existing transactions will continue to be valid, but transactions with a
+`MuxedAccount` will not be parsable by old software. Hence, the best time to
+roll out this change will be at the same time as CAP-0015. Software that
+converts new-style transactions to old can also strip out the identifiers from
+`MuxedAccount` structures.
## Security Concerns
-Certain addresses that are identical may not look identical. This
-could confuse client software. For instance, sending an asset to a
-multiplexed account tied to the issuer will destroy the asset.
+Certain addresses that are identical may not look identical. This could confuse
+client software. For instance, sending an asset to a multiplexed account tied
+to the issuer will destroy the asset.
Code that uses a denylist to block specific accounts from receiving
-`AUTH\_REQUIRED` assets will need to ensure that multiplexed
-accounts cannot be used to bypass the denylist. This is not an
-issue for non-`AUTH\_REQUIRED` assets, as anyone can already evade
-such denylists by creating new accounts. There is no issue for
-allowlists, since failure to accommodate multiplexed accounts just
-prohibits people from using them, preserving the status quo ante.
-
-People commonly assume that base-32 decoding libraries will reject
-inputs with invalid lengths or invalid trailing bits, but this is
-often not the case, particularly when padding is disabled. If
-implementations forget to check this, they will parse invalid strkeys.
-This could lead to problems, particularly if strkeys are used as
-indexes in a hash table, where an attacker could create multiple
-entries for the same account.
+`AUTH\_REQUIRED` assets will need to ensure that multiplexed accounts cannot be
+used to bypass the denylist. This is not an issue for non-`AUTH\_REQUIRED`
+assets, as anyone can already evade such denylists by creating new accounts.
+There is no issue for allowlists, since failure to accommodate multiplexed
+accounts just prohibits people from using them, preserving the status quo ante.
+
+People commonly assume that base-32 decoding libraries will reject inputs with
+invalid lengths or invalid trailing bits, but this is often not the case,
+particularly when padding is disabled. If implementations forget to check this,
+they will parse invalid strkeys. This could lead to problems, particularly if
+strkeys are used as indexes in a hash table, where an attacker could create
+multiple entries for the same account.
## Implementation
-An implementation can be fetched to `FETCH_HEAD` with the following
-command:
+An implementation can be fetched to `FETCH_HEAD` with the following command:
```
git fetch git@github.com:xdrpp/stellar-core dm/muxacct
diff --git a/core/cap-0028.md b/core/cap-0028.md
index 85780aef7..a5e3da0ac 100644
--- a/core/cap-0028.md
+++ b/core/cap-0028.md
@@ -11,53 +11,113 @@ Protocol version: 13
```
## Simple Summary
-Pre-auth signers are only removed from the source account if signature verification on the transaction succeeds [during transaction application](#Caveat). The signer is left on the account otherwise. This proposal will remove the signer from the source account when the transaction is being applied, regardless of the outcome of that transaction.
+
+Pre-auth signers are only removed from the source account if signature
+verification on the transaction succeeds
+[during transaction application](#Caveat). The signer is left on the account
+otherwise. This proposal will remove the signer from the source account when
+the transaction is being applied, regardless of the outcome of that
+transaction.
## Motivation
-If the pre-auth transaction fails before signature verification succeeds, the pre-auth signer needs to be removed manually using the Set Options operation. The fee for that transaction will have already been consumed, so it should never be applied again. The pre-auth signer can never be used again due to this, so there's no reason to keep it around.
-CAP-0015 (Fee-Bump Transactions) also introduces behavior that can make it harder to clean up the obsolete pre-auth signers. If the outer fee bump transaction has an invalid signature, the inner transaction will still need to be applied. The pre-auth signer will not be removed due to the outer transaction failing, but the inner transaction can return `txSUCCESS`, so the account owner won't know to remove the signer.
+If the pre-auth transaction fails before signature verification succeeds, the
+pre-auth signer needs to be removed manually using the Set Options operation.
+The fee for that transaction will have already been consumed, so it should
+never be applied again. The pre-auth signer can never be used again due to
+this, so there's no reason to keep it around.
+
+CAP-0015 (Fee-Bump Transactions) also introduces behavior that can make it
+harder to clean up the obsolete pre-auth signers. If the outer fee bump
+transaction has an invalid signature, the inner transaction will still need to
+be applied. The pre-auth signer will not be removed due to the outer
+transaction failing, but the inner transaction can return `txSUCCESS`, so the
+account owner won't know to remove the signer.
### Goals Alignment
-The Stellar Network should facilitate simplicity and interoperability with other protocols and networks.
+
+The Stellar Network should facilitate simplicity and interoperability with
+other protocols and networks.
## Specification
-This proposal will add functionality to remove a pre-auth signer even if the signer has not been checked yet. A method will be added to `TransactionFrame` that will check every transaction and operation source account for the pre-auth signer, and remove it if found. This method will be called every time a transaction enters the **application stage** (even if the transaction fails for any reason).
+
+This proposal will add functionality to remove a pre-auth signer even if the
+signer has not been checked yet. A method will be added to `TransactionFrame`
+that will check every transaction and operation source account for the pre-auth
+signer, and remove it if found. This method will be called every time a
+transaction enters the **application stage** (even if the transaction fails for
+any reason).
### Caveat
-The existing functionality along with what's being added is only executed when the transaction is in the **application stage**. The application stage is where the transaction will be used to execute it's operations and modify the ledger. The transaction will not enter the application stage if it isn't accepted into the transaction set, which means the signatures on the source accounts of the transaction/operations won't be considered for removal. The transactions must have valid signatures and sequence number at the time of submission to be accepted into the transaction set. Here are some examples to make this clearer.
+
+The existing functionality along with what's being added is only executed when
+the transaction is in the **application stage**. The application stage is where
+the transaction will be used to execute it's operations and modify the ledger.
+The transaction will not enter the application stage if it isn't accepted into
+the transaction set, which means the signatures on the source accounts of the
+transaction/operations won't be considered for removal. The transactions must
+have valid signatures and sequence number at the time of submission to be
+accepted into the transaction set. Here are some examples to make this clearer.
#### An example of signer NOT being removed on sequence number failure
+
1. Source account `B` with a pre-auth signer is at sequence number `X`.
-2. A transaction that corresponds to the signer on `B` is submitted for source account `B` with sequence number `X`.
-3. This sequence number is invalid, so the transaction will not be included in the transaction set to be applied. The signer will not be removed.
+2. A transaction that corresponds to the signer on `B` is submitted for source
+ account `B` with sequence number `X`.
+3. This sequence number is invalid, so the transaction will not be included in
+ the transaction set to be applied. The signer will not be removed.
#### An example of signer being removed on sequence number failure
+
1. Source account `B` with a pre auth signer is at sequence number `X`.
-2. A transaction for source account B is submitted with sequence number `X + 1`, with a `BUMP_SEQUENCE` operation that sets the sequence number to `X + 2`. This transaction makes it into the transaction set. **It hasn't been applied yet.**
-3. A transaction that corresponds to the signer on `B` is submitted for source account `B` with sequence number `X + 2`. This transaction makes it into the transaction set because the sequence number is right.
-4. Transaction set is being applied. The first transaction with the `BUMP_SEQUENCE` operation is applied succesfully, which sets the sequence number of source account `B` to `X + 2`.
-5. Now the second transaction is being applied. The sequence number check on the transaction will fail because it is equivalent to the sequence number on the account (both are `X + 2`). The signer will be removed in this case.
+2. A transaction for source account B is submitted with sequence number
+ `X + 1`, with a `BUMP_SEQUENCE` operation that sets the sequence number to
+ `X + 2`. This transaction makes it into the transaction set. **It hasn't
+ been applied yet.**
+3. A transaction that corresponds to the signer on `B` is submitted for source
+ account `B` with sequence number `X + 2`. This transaction makes it into the
+ transaction set because the sequence number is right.
+4. Transaction set is being applied. The first transaction with the
+ `BUMP_SEQUENCE` operation is applied succesfully, which sets the sequence
+ number of source account `B` to `X + 2`.
+5. Now the second transaction is being applied. The sequence number check on
+ the transaction will fail because it is equivalent to the sequence number on
+ the account (both are `X + 2`). The signer will be removed in this case.
#### Removing signer on signature verification failure
-A similar scenario to the sequence number failure above can happen with a signature verification failure. To remove a signer on signature verification failure -
-1. Source account `B` has pre-auth signer with weight `1`. All thresholds on account are also set to `1`.
-2. A transaction set with two transactions for source account `B` are being applied.
- 1. Transaction #1 - `SET_OPTIONS` operation that sets all thresholds to some high value (say `255`)
- 2. Transaction #2 - Pre-auth transaction that corresponds to the signer that was added to `B`
-3. Transaction #2 will fail during application because the signer weight isn't high enough, and the pre-auth signer will be removed from account `B`.
+
+A similar scenario to the sequence number failure above can happen with a
+signature verification failure. To remove a signer on signature verification
+failure -
+
+1. Source account `B` has pre-auth signer with weight `1`. All thresholds on
+ account are also set to `1`.
+2. A transaction set with two transactions for source account `B` are being
+ applied.
+ 1. Transaction #1 - `SET_OPTIONS` operation that sets all thresholds to some
+ high value (say `255`)
+ 2. Transaction #2 - Pre-auth transaction that corresponds to the signer that
+ was added to `B`
+3. Transaction #2 will fail during application because the signer weight isn't
+ high enough, and the pre-auth signer will be removed from account `B`.
## Design Rationale
+
The proposed solution is simple and maintains backwards compatibility.
## Backwards Incompatibilities
+
None
## Security Concerns
-There are no security concerns here, as this is a small change that removes a signer from a source account.
+
+There are no security concerns here, as this is a small change that removes a
+signer from a source account.
## Test Cases
+
None yet
## Implementation
+
https://github.com/stellar/stellar-core/pull/2379
diff --git a/core/cap-0029.md b/core/cap-0029.md
index c2235314d..75769a81e 100644
--- a/core/cap-0029.md
+++ b/core/cap-0029.md
@@ -14,44 +14,85 @@ Protocol version: 16
```
## Simple Summary
+
This CAP addresses the following authorization semantics requirements:
-- An issuer should always be able to authorize a trustline regardless of any issuer flags.
-- An issuer should always be able to revoke a trustline for an asset that is set as revocable.
+
+- An issuer should always be able to authorize a trustline regardless of any
+ issuer flags.
+- An issuer should always be able to revoke a trustline for an asset that is
+ set as revocable.
## Working Group
-This protocol change was authored by Tomer Weller, with input from the consulted individuals
-mentioned at the top of this document.
+This protocol change was authored by Tomer Weller, with input from the
+consulted individuals mentioned at the top of this document.
## Motivation
-Trustline authorization is an important feature of the Stellar protocol. It allows issuers to handle various regulatory requirements. However, it's current behavior is not sensible with regards to configuration changes and revocable assets:
-- The authorize (`ALLOW_TRUST, Authorize=AUTHORIZED_FLAG`) operation fails if the issuer does not have any flags set, even when the trustline is unauthorized. This complicates asset configuration changes.
-- The revoke (`ALLOW_TRUST, Authorize=0` or `ALLOW_TRUST, Authorize=AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` when `AUTHORIZED_FLAG` is set) operation fails if the issuer does not have the `AUTH_REQUIRED` flag set. This defeats the purpose of having revocable assets that default to authorized trustlines ("Blacklist authorization").
+
+Trustline authorization is an important feature of the Stellar protocol. It
+allows issuers to handle various regulatory requirements. However, it's current
+behavior is not sensible with regards to configuration changes and revocable
+assets:
+
+- The authorize (`ALLOW_TRUST, Authorize=AUTHORIZED_FLAG`) operation fails if
+ the issuer does not have any flags set, even when the trustline is
+ unauthorized. This complicates asset configuration changes.
+- The revoke (`ALLOW_TRUST, Authorize=0` or
+ `ALLOW_TRUST, Authorize=AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` when
+ `AUTHORIZED_FLAG` is set) operation fails if the issuer does not have the
+ `AUTH_REQUIRED` flag set. This defeats the purpose of having revocable assets
+ that default to authorized trustlines ("Blacklist authorization").
### Goals Alignment
+
This CAP is aligned with the following Stellar Network Goals:
-- The Stellar Network should make it easy for developers of Stellar projects to create highly usable products
+- The Stellar Network should make it easy for developers of Stellar projects to
+ create highly usable products
## Specification
-This CAP introduces two changes to `ALLOW_TRUST` semantics:
-- Allow `ALLOW_TRUST` operations that upgrade authorization (`Authorize=AUTHORIZED_FLAG` or `Authorize=AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` when the trustline is unauthorized) to be performed regardless of any issuer account flags.
-- Allow `ALLOW_TRUST` revoke (`ALLOW_TRUST, Authorize=0` or `ALLOW_TRUST, Authorize=AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` when `AUTHORIZED_FLAG` is set) operations to be performed on trustlines to assets that have the `AUTH_REVOCABLE` flag set, **regardless of whether or not their `AUTH_REQUIRED` flag is set**.
+
+This CAP introduces two changes to `ALLOW_TRUST` semantics:
+
+- Allow `ALLOW_TRUST` operations that upgrade authorization
+ (`Authorize=AUTHORIZED_FLAG` or
+ `Authorize=AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` when the trustline is
+ unauthorized) to be performed regardless of any issuer account flags.
+- Allow `ALLOW_TRUST` revoke (`ALLOW_TRUST, Authorize=0` or
+ `ALLOW_TRUST, Authorize=AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` when
+ `AUTHORIZED_FLAG` is set) operations to be performed on trustlines to assets
+ that have the `AUTH_REVOCABLE` flag set, **regardless of whether or not their
+ `AUTH_REQUIRED` flag is set**.
## Backwards Incompatibilities
-This change is backward compatible for sensible consumers. It is not backward compatible for consumers that rely on the above operations failing and returning the `ALLOW_TRUST_TRUST_NOT_REQUIRED` result code.
-## Design Rationale
+This change is backward compatible for sensible consumers. It is not backward
+compatible for consumers that rely on the above operations failing and
+returning the `ALLOW_TRUST_TRUST_NOT_REQUIRED` result code.
-[CAP-0035](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md) introduces the `SET_TRUST_LINE_FLAGS` operation, which provides a better way to interface with the trustline flags. This operation will allow the authorization transitions specified in this CAP that currently return `ALLOW_TRUST_TRUST_NOT_REQUIRED`. While we could just leave `ALLOW_TRUST` the way it is and use `SET_TRUST_LINE_FLAGS` to get the desired behavior, making sure the possible authorization transitions are the same between operations would make it easier for the ecosystem to continue using `ALLOW_TRUST`. They can choose which operation to use, instead of being forced to switch. Keeping the logic the same will also make it easier for everyone to reason about the possible authorization transitions.
+## Design Rationale
+[CAP-0035](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md)
+introduces the `SET_TRUST_LINE_FLAGS` operation, which provides a better way to
+interface with the trustline flags. This operation will allow the authorization
+transitions specified in this CAP that currently return
+`ALLOW_TRUST_TRUST_NOT_REQUIRED`. While we could just leave `ALLOW_TRUST` the
+way it is and use `SET_TRUST_LINE_FLAGS` to get the desired behavior, making
+sure the possible authorization transitions are the same between operations
+would make it easier for the ecosystem to continue using `ALLOW_TRUST`. They
+can choose which operation to use, instead of being forced to switch. Keeping
+the logic the same will also make it easier for everyone to reason about the
+possible authorization transitions.
## Security Concerns
-There are no security concerns here. This CAP just allows the issuer to perform more sensible authorization transitions for trustlines.
+There are no security concerns here. This CAP just allows the issuer to perform
+more sensible authorization transitions for trustlines.
## Test Cases
+
//TBD
## Implementation
+
https://github.com/stellar/stellar-core/pull/2988
diff --git a/core/cap-0030.md b/core/cap-0030.md
index e6eb8c374..442be63f3 100644
--- a/core/cap-0030.md
+++ b/core/cap-0030.md
@@ -11,29 +11,60 @@ Protocol version: 13
```
## Simple Summary
-There are operations that unnecessarily load the issuer, and return an error result if the issuer of an asset was not found. These errors are not necessary, and this proposal aims to remove them.
+
+There are operations that unnecessarily load the issuer, and return an error
+result if the issuer of an asset was not found. These errors are not necessary,
+and this proposal aims to remove them.
## Motivation
-The following operation results indicate that an operation failed because the issuer of an asset does not exist: `PAYMENT_NO_ISSUER`, `PATH_PAYMENT_STRICT_RECEIVE_NO_ISSUER`, `PATH_PAYMENT_STRICT_SEND_NO_ISSUER`, `MANAGE_SELL_OFFER_SELL_NO_ISSUER`, `MANAGE_SELL_OFFER_BUY_NO_ISSUER`, `MANAGE_BUY_OFFER_SELL_NO_ISSUER`, `MANAGE_BUY_OFFER_BUY_NO_ISSUER`, and `CHANGE_TRUST_NO_ISSUER`. Each of the operations that can produce these results other than `ChangeTrustOp` does not otherwise depend on the existence of the issuer. The issuer is only required when creating a trustline with `ChangeTrustOp`. Removing the issuer check from all other operations will standardize the protocol on not requiring the issuer outside of this one case.
-Removing the issuer from the operations that produce the `*_NO_ISSUER` results will also prevent `stellar-core` developers from using the `AUTH_REQUIRED` flag incorrectly. If `AUTH_REQUIRED` is not set on the issuer, then the `TrustLine` will be authorized on it's creation. The name `AUTH_REQUIRED`, however, implies that the flag determines if authorization should be checked, which is not the actual logic.
+The following operation results indicate that an operation failed because the
+issuer of an asset does not exist: `PAYMENT_NO_ISSUER`,
+`PATH_PAYMENT_STRICT_RECEIVE_NO_ISSUER`, `PATH_PAYMENT_STRICT_SEND_NO_ISSUER`,
+`MANAGE_SELL_OFFER_SELL_NO_ISSUER`, `MANAGE_SELL_OFFER_BUY_NO_ISSUER`,
+`MANAGE_BUY_OFFER_SELL_NO_ISSUER`, `MANAGE_BUY_OFFER_BUY_NO_ISSUER`, and
+`CHANGE_TRUST_NO_ISSUER`. Each of the operations that can produce these results
+other than `ChangeTrustOp` does not otherwise depend on the existence of the
+issuer. The issuer is only required when creating a trustline with
+`ChangeTrustOp`. Removing the issuer check from all other operations will
+standardize the protocol on not requiring the issuer outside of this one case.
+
+Removing the issuer from the operations that produce the `*_NO_ISSUER` results
+will also prevent `stellar-core` developers from using the `AUTH_REQUIRED` flag
+incorrectly. If `AUTH_REQUIRED` is not set on the issuer, then the `TrustLine`
+will be authorized on it's creation. The name `AUTH_REQUIRED`, however, implies
+that the flag determines if authorization should be checked, which is not the
+actual logic.
-This proposal will also make the CAP-0023 (Two-Part Payments) implementation simpler because we won't need to return the `*_NO_ISSUER` results there to maintain consistency.
+This proposal will also make the CAP-0023 (Two-Part Payments) implementation
+simpler because we won't need to return the `*_NO_ISSUER` results there to
+maintain consistency.
### Goals Alignment
-The Stellar Network should facilitate simplicity and interoperability with other protocols and networks.
+
+The Stellar Network should facilitate simplicity and interoperability with
+other protocols and networks.
## Specification
-For every operation that can return a `*_NO_ISSUER` result except `ChangeTrustOp`, don't load the issuer or return the `*_NO_ISSUER` result.
+
+For every operation that can return a `*_NO_ISSUER` result except
+`ChangeTrustOp`, don't load the issuer or return the `*_NO_ISSUER` result.
## Backwards Incompatibilities
-The `*_NO_ISSUER` results will no longer be returned for the affected operations, so anyone relying on these results will need to modify their behavior. These error results aren't meaningful anyways because anyone can create the issuer account to bypass the issuer check.
+
+The `*_NO_ISSUER` results will no longer be returned for the affected
+operations, so anyone relying on these results will need to modify their
+behavior. These error results aren't meaningful anyways because anyone can
+create the issuer account to bypass the issuer check.
## Security Concerns
+
None
## Test Cases
+
None yet
## Implementation
+
https://github.com/stellar/stellar-core/pull/2389
diff --git a/core/cap-0031.md b/core/cap-0031.md
index f0acb7036..5406c2d07 100644
--- a/core/cap-0031.md
+++ b/core/cap-0031.md
@@ -11,9 +11,11 @@ Protocol version: TBD
```
## Simple Summary
+
This proposal makes it possible to pay reserves for another account.
## Motivation
+
This proposal seeks to solve the following problem: an entity should be able to
provide the reserve for accounts controlled by other parties without giving
those parties control of the reserve.
@@ -30,32 +32,36 @@ the trust line and merging the account.
This proposal is in many ways analogous to CAP-0015:
- CAP-0015 makes it possible to pay transaction fees for other accounts without
-giving control of the underlying funds
+ giving control of the underlying funds
- CAP-0031 makes it possible to pay reserves for other accounts without giving
-control of the underlying funds
+ control of the underlying funds
-The combination of these two proposals should greatly facilitate the development
-of non-custodial uses of the Stellar Network.
+The combination of these two proposals should greatly facilitate the
+development of non-custodial uses of the Stellar Network.
### Goals Alignment
+
This proposal is aligned with the following Stellar Network Goal:
- The Stellar Network should make it easy for developers of Stellar projects to
create highly usable products.
## Abstract
+
We introduce `SponsorshipEntry` as a new type of `LedgerEntry` which represents
an offer to pay the reserve for a `LedgerEntry` described by `descriptor`. The
-operation `CreateSponsorshipOp` makes it possible to create a `SponsorshipEntry`
-whereas the operation `RemoveSponsorshipOp` makes it possible to remove a
-`SponsorshipEntry`. These operations are the only ways in which a
-`SponsorshipEntry` can be created or removed, and they are otherwise immutable.
+operation `CreateSponsorshipOp` makes it possible to create a
+`SponsorshipEntry` whereas the operation `RemoveSponsorshipOp` makes it
+possible to remove a `SponsorshipEntry`. These operations are the only ways in
+which a `SponsorshipEntry` can be created or removed, and they are otherwise
+immutable.
## Specification
### XDR
#### AccountEntry
+
```c++
struct AccountEntry
{
@@ -85,6 +91,7 @@ struct AccountEntry
```
#### SponsorshipEntry
+
```c++
struct LedgerEntryType
{
@@ -215,6 +222,7 @@ case SPONSORSHIP:
```
#### Operations
+
```c++
enum OperationType
{
@@ -256,6 +264,7 @@ struct Operation
```
#### Operation Results
+
```c++
enum CreateSponsorshipResultCode
{
@@ -311,38 +320,41 @@ struct OperationResult
### Semantics
#### Available Balance and Limit of Native Asset
+
This proposal changes the definition of available balance of native asset to:
`balance - (2 + numSubEntries - sponsoredReserves) * baseReserve`. The
definition of available limit of native asset is unchanged, and remains
`INT64_MAX - balance`.
#### CreateSponsorshipOp
-A `SponsorshipEntry` can only be created by the `CreateSponsorshipOp` operation.
-`CreateSponsorshipOp` is invalid with `CREATE_SPONSORSHIP_MALFORMED` if
+
+A `SponsorshipEntry` can only be created by the `CreateSponsorshipOp`
+operation. `CreateSponsorshipOp` is invalid with `CREATE_SPONSORSHIP_MALFORMED`
+if
- `descriptor.type() == TRUSTLINE` and any of
- - `descriptor.trustLine().asset` is of type `ASSET_TYPE_NATIVE`
- - `descriptor.trustLine().asset` is invalid
+ - `descriptor.trustLine().asset` is of type `ASSET_TYPE_NATIVE`
+ - `descriptor.trustLine().asset` is invalid
- `descriptor.type() == OFFER` and any of
- - `descriptor.offer().buying` is invalid
- - `descriptor.offer().selling` is invalid
- - `descriptor.offer().buying == descriptor.offer().selling`
+ - `descriptor.offer().buying` is invalid
+ - `descriptor.offer().selling` is invalid
+ - `descriptor.offer().buying == descriptor.offer().selling`
- `descriptor.type() == DATA` and
- - `descriptor.data().dataName` is empty or invalid
+ - `descriptor.data().dataName` is empty or invalid
The behavior of `CreateSponsorshipOp` is as follows:
1. Calculate `Multiplier` as
- - 3 if `descriptor.type() == ACCOUNT`
- - 2 otherwise
+ - 3 if `descriptor.type() == ACCOUNT`
+ - 2 otherwise
2. Fail with `CREATE_SPONSORSHIP_LOW_RESERVE` if the `sourceAccount` does not
have at least `Multiplier * baseReserve` available balance of native asset
3. Deduct `Multiplier * baseReserve` of native asset from `sourceAccount`
4. Create a `SponsorshipEntry` as `sponsorship` with the following properties:
- - `sponsorship.createdBy = sourceAccount`
- - `sponsorship.sponsorshipID` as the next available
- - `sponsorship.descriptor = descriptor`
- - `sponsorship.reserve = Multiplier * baseReserve`
+ - `sponsorship.createdBy = sourceAccount`
+ - `sponsorship.sponsorshipID` as the next available
+ - `sponsorship.descriptor = descriptor`
+ - `sponsorship.reserve = Multiplier * baseReserve`
5. Count the number of existing `LedgerEntry` described by `descriptor` as
`Sponsorable`
6. Count the number of existing `SponsorshipEntry` with
@@ -350,11 +362,11 @@ The behavior of `CreateSponsorshipOp` is as follows:
`sponsorshipID < sponsorship.sponsorshipID` as `Sponsors`
7. Succeed with `CREATE_SPONSORSHIP_SUCCESS` if `Sponsors >= Sponsorable`
8. Let `SponsoredAccount` be
- - `descriptor.account().accountID` if `descriptor.type() == ACCOUNT`
- - `descriptor.signer().accountID` if `descriptor.type() == SIGNER`
- - `descriptor.trustLine().accountID` if `descriptor.type() == TRUSTLINE`
- - `descriptor.offer().sellerID` if `descriptor.type() == OFFER`
- - `descriptor.data().accountID` if `descriptor.type() == DATA`
+ - `descriptor.account().accountID` if `descriptor.type() == ACCOUNT`
+ - `descriptor.signer().accountID` if `descriptor.type() == SIGNER`
+ - `descriptor.trustLine().accountID` if `descriptor.type() == TRUSTLINE`
+ - `descriptor.offer().sellerID` if `descriptor.type() == OFFER`
+ - `descriptor.data().accountID` if `descriptor.type() == DATA`
9. Succeed with `CREATE_SPONSORSHIP_SUCCESS` if `SponsoredAccount` does not
exist
10. Load `SponsoredAccount` and increment `SponsoredAccount.sponsoredReserves`
@@ -365,8 +377,10 @@ The behavior of `CreateSponsorshipOp` is as follows:
funds.
#### RemoveSponsorshipOp
-A `SponsorshipEntry` can only be removed by the `RemoveSponsorshipOp` operation.
-`RemoveSponsorshipOp` is invalid with `REMOVE_SPONSORSHIP_MALFORMED` if
+
+A `SponsorshipEntry` can only be removed by the `RemoveSponsorshipOp`
+operation. `RemoveSponsorshipOp` is invalid with `REMOVE_SPONSORSHIP_MALFORMED`
+if
- `sponsorshipID <= 0`
@@ -377,28 +391,30 @@ The behavior of `RemoveSponsorshipOp` is as follows:
2. Load the `SponsorshipEntry` as `sponsorship`
3. Fail with `REMOVE_SPONSORSHIP_NOT_CREATOR` if
`sponsorship.createdBy != sourceAccount`
-3. Count the number of existing `LedgerEntry` described by `descriptor` as
+4. Count the number of existing `LedgerEntry` described by `descriptor` as
`Sponsorable`
-4. Count the number of existing `SponsorshipEntry` with
+5. Count the number of existing `SponsorshipEntry` with
`descriptor = sponsorship.descriptor` and
`sponsorshipID < sponsorship.sponsorshipID` as `Sponsors`
-5. Fail with `REMOVE_SPONSORSHIP_IN_USE` if `Sponsorable > Sponsors`
-6. Fail with `REMOVE_SPONSORSHIP_LINE_FULL` if the `sourceAccount` does not have
- `reserve` available limit of native asset
-7. Add `reserve` of native asset to `sourceAccount`
-8. Remove `sponsorship`
-9. Succeed with `REMOVE_SPONSORSHIP_SUCCESS`
+6. Fail with `REMOVE_SPONSORSHIP_IN_USE` if `Sponsorable > Sponsors`
+7. Fail with `REMOVE_SPONSORSHIP_LINE_FULL` if the `sourceAccount` does not
+ have `reserve` available limit of native asset
+8. Add `reserve` of native asset to `sourceAccount`
+9. Remove `sponsorship`
+10. Succeed with `REMOVE_SPONSORSHIP_SUCCESS`
`RemoveSponsorshipOp` requires medium threshold because it is related to
`CreateSponsorshipOp`.
#### CreateAccountOp
+
We now return `CREATE_ACCOUNT_LOW_RESERVE` conditionally
1. ...
2. Skip to step 4 if a `SponsorshipEntry` with `descriptor.type() == ACCOUNT`
and `descriptor.account().accountID = destination` exists
-3. Fail with `CREATE_ACCOUNT_LOW_RESERVE` if `startingBalance < 2 * baseReserve`
+3. Fail with `CREATE_ACCOUNT_LOW_RESERVE` if
+ `startingBalance < 2 * baseReserve`
4. ...
When creating the account, we now
@@ -410,12 +426,13 @@ When creating the account, we now
3. ...
#### SetOptionsOp
+
We now return `SET_OPTIONS_LOW_RESERVE` conditionally
1. ...
2. Count the number of signers on `sourceAccount` as `Signers`
-3. Count the number of `SponsorshipEntry` with `descriptor.type() == SIGNER` and
- `descriptor.signer().accountID = sourceAccount` as `Sponsors`
+3. Count the number of `SponsorshipEntry` with `descriptor.type() == SIGNER`
+ and `descriptor.signer().accountID = sourceAccount` as `Sponsors`
4. Skip to step 7 if `Signers >= Sponsors`
5. Increment `sourceAccount.sponsoredReserves`
6. Skip to step 8
@@ -428,28 +445,31 @@ When deleting a signer, we now
1. ...
2. Count the number of signers on `sourceAccount` as `Signers`
-3. Count the number of `SponsorshipEntry` with `descriptor.type() == SIGNER` and
- `descriptor.signer().accountID = sourceAccount` as `Sponsors`
+3. Count the number of `SponsorshipEntry` with `descriptor.type() == SIGNER`
+ and `descriptor.signer().accountID = sourceAccount` as `Sponsors`
4. Decrement `sourceAccount.sponsoredReserves` if `Signers <= Sponsors`
5. Decrement `sourceAccount.numSubEntries`
6. ...
#### Removing Used One-Time Signers
+
When deleting a signer, we now
1. ...
2. Count the number of signers on `sourceAccount` as `Signers`
-3. Count the number of `SponsorshipEntry` with `descriptor.type() == SIGNER` and
- `descriptor.signer().accountID = sourceAccount` as `Sponsors`
+3. Count the number of `SponsorshipEntry` with `descriptor.type() == SIGNER`
+ and `descriptor.signer().accountID = sourceAccount` as `Sponsors`
4. Decrement `sourceAccount.sponsoredReserves` if `Signers <= Sponsors`
5. Decrement `sourceAccount.numSubEntries`
6. ...
#### ChangeTrustOp
+
We now return `CHANGE_TRUST_LOW_RESERVE` conditionally
1. ...
-2. Skip to step 5 if a `SponsorshipEntry` with `descriptor.type() == TRUSTLINE`,
+2. Skip to step 5 if a `SponsorshipEntry` with
+ `descriptor.type() == TRUSTLINE`,
`descriptor.trustLine().accountID = sourceAccount`, and
`descriptor.trustLine().asset = asset` does not exist
3. Increment `sourceAccount.sponsoredReserves`
@@ -470,26 +490,28 @@ When deleting a trust line, we now
4. ...
#### AllowTrustOp
+
When deleting offers after revoking authorization, we now
1. ...
-2. For each asset pair `(Buying, Selling)` of an offer that was deleted
- a. Count the number of offers that were deleted as `OffersDeleted`
- b. Count the number of `SponsorshipEntry` with `descriptor.type() == OFFER`,
- `descriptor.offer().sellerID = trustor`,
- `descriptor.offer().buying = Buying`, and
- `descriptor.offer().selling = Selling` as `Sponsors`
- c. Decrement `trustor.sponsoredReserves` by `min(OffersDeleted, Sponsors)`
- d. Decrement `trustor.numSubEntries` by `OffersDeleted`
-5. ...
+2. For each asset pair `(Buying, Selling)` of an offer that was deleted a.
+ Count the number of offers that were deleted as `OffersDeleted` b. Count the
+ number of `SponsorshipEntry` with `descriptor.type() == OFFER`,
+ `descriptor.offer().sellerID = trustor`,
+ `descriptor.offer().buying = Buying`, and
+ `descriptor.offer().selling = Selling` as `Sponsors` c. Decrement
+ `trustor.sponsoredReserves` by `min(OffersDeleted, Sponsors)` d. Decrement
+ `trustor.numSubEntries` by `OffersDeleted`
+3. ...
#### ManageSellOfferOp, ManageBuyOfferOp, and CreatePassiveSellOfferOp
+
When releasing liabilities before modifying an existing offer with asset pair
`(Buying, Selling)`, we now
1. ...
-2. Count the number of offers with `sellerID = sourceAccount`, `buying = Buying`,
- and `selling = Selling` as `Sponsorable`
+2. Count the number of offers with `sellerID = sourceAccount`,
+ `buying = Buying`, and `selling = Selling` as `Sponsorable`
3. Count the number of `SponsorshipEntry` with `descriptor.type() == OFFER`,
`descriptor.offer().sellerID = sourceAccount`,
`descriptor.offer().buying = Buying`, and
@@ -499,12 +521,12 @@ When releasing liabilities before modifying an existing offer with asset pair
6. ...
When computing the amount of `Buying` that can be bought and the amount of
-`Selling` that can be sold (with `Buying` and `Selling` not necessarily equal to
-the above), we now
+`Selling` that can be sold (with `Buying` and `Selling` not necessarily equal
+to the above), we now
1. ...
-2. Count the number of offers with `sellerID = sourceAccount`, `buying = Buying`,
- and `selling = Selling` as `Sponsorable`
+2. Count the number of offers with `sellerID = sourceAccount`,
+ `buying = Buying`, and `selling = Selling` as `Sponsorable`
3. Count the number of `SponsorshipEntry` with `descriptor.type() == OFFER`,
`descriptor.offer().sellerID = sourceAccount`,
`descriptor.offer().buying = Buying`, and
@@ -531,10 +553,11 @@ adjusted to 0, we now
6. ...
When creating the offer, we now follow the same process as for computing the
-amount that can be bought and sold. Note that the failure in step 6 should never
-happen in this case.
+amount that can be bought and sold. Note that the failure in step 6 should
+never happen in this case.
#### PathPaymentStrictSend and PathPaymentStrictReceive
+
When erasing an offer that was either taken entirely or partially taken and
adjusted to 0, we now
@@ -550,6 +573,7 @@ adjusted to 0, we now
6. ...
#### Increasing the Base Reserve
+
When preparing to update offers, the balance above reserve should be calculated
as `balance - (2 + numSubEntries - sponsoredReserves) * baseReserve`.
@@ -567,6 +591,7 @@ When erasing an offer or adjusting an offer to 0, we now
6. ...
#### ManageDataOp
+
We now return `MANAGE_DATA_LOW_RESERVE` conditionally
1. ...
@@ -592,6 +617,7 @@ When deleting data, we now
## Design Rationale
### How are sponsorship entries paired with the ledger entries they sponsor?
+
Sponsorship entries are paired with the ledger entries they sponsor implicitly.
Sponsorship entries do not record what ledger entry they are currently
sponsoring, nor do ledger entries record what sponsorship entry is currently
@@ -620,6 +646,7 @@ created, this is equivalent to the `N` oldest sponsorship entries that still
exist.
### Why do sponsorship entries sponsor reserve for only a single ledger entry?
+
It is reasonable to think that a single sponsorship entry might be able to
sponsor reserves for multiple ledger entries, such as a single sponsorship
entry sponsoring up to 5 signers. In fact, the earliest drafts of this proposal
@@ -635,38 +662,42 @@ sponsorship entry can actually sponsor reserves for multiple ledger entries.
This led conceptually to two categories of sponsorship entries, which increased
the complexity of both the mental model and the implementation.
-Second, it led to the introduction of a `ManageSponsorshipOp` operation
-that was able to modify the number of reserves that a given sponsorship entry
-could sponsor. This was required because it is not permitted to delete a
-sponsorship entry that is providing reserves, so without the operation a
-sponsorship entry that was configured to provide 10 reserves but was actually
-only providing 1 could not be modified to release the other 9 reserves. The
-semantics of this operation were quite complex, since it contained aspects of
-both `CreateSponsorshipOp` and `RemoveSponsorshipOp`.
+Second, it led to the introduction of a `ManageSponsorshipOp` operation that
+was able to modify the number of reserves that a given sponsorship entry could
+sponsor. This was required because it is not permitted to delete a sponsorship
+entry that is providing reserves, so without the operation a sponsorship entry
+that was configured to provide 10 reserves but was actually only providing 1
+could not be modified to release the other 9 reserves. The semantics of this
+operation were quite complex, since it contained aspects of both
+`CreateSponsorshipOp` and `RemoveSponsorshipOp`.
### Sponsorship entries always provide a full reserve
+
Suppose there exists an entity that creates accounts and trust lines for its
-clients, using sponsorship entry to retain control of the reserve. At some point
-in the future, the base reserve is increased. If the sponsorship entry provided
-an actual amount of reserve based on the base reserve when it was created,
-rather than a full reserve unconditionally, then the entity would now need to
-increase the sponsorship for every client. For a large entity, this could be
-many thousands of sponsorships to manage which would take time. In the interim,
-their service would be degraded and the user experience for their clients
-greatly harmed. The solution in this proposal avoids this issue entirely.
+clients, using sponsorship entry to retain control of the reserve. At some
+point in the future, the base reserve is increased. If the sponsorship entry
+provided an actual amount of reserve based on the base reserve when it was
+created, rather than a full reserve unconditionally, then the entity would now
+need to increase the sponsorship for every client. For a large entity, this
+could be many thousands of sponsorships to manage which would take time. In the
+interim, their service would be degraded and the user experience for their
+clients greatly harmed. The solution in this proposal avoids this issue
+entirely.
### Accounts record the total number of sponsored reserves
+
Theoretically, it is not necessary for accounts to record the total number of
sponsored reserves. The data is purely derived and can always be calculated at
any time. That being said, calculating it on demand is not particularly easy
-because it requires iterating over all of the sub-entries of the account.
-It is therefore likely that any performant implementation would store this data
+because it requires iterating over all of the sub-entries of the account. It is
+therefore likely that any performant implementation would store this data
either persistently or in a large-scale cache. But if the data is going to be
stored anyway, then it might as well be recorded in the ledger where it can be
utilized by downstream systems. Exactly the same arguments apply, for example,
to liabilities and `numSubEntries`.
## Backwards Incompatibilities
+
All downstream systems will need updated XDR in order to recognize the new
operations and ledger entries.
@@ -675,10 +706,13 @@ potentially calculate a value that is too high. While inconvenient, this should
not cause any systems to fail.
## Security Concerns
+
None.
## Test Cases
+
None yet.
## Implementation
+
None yet.
diff --git a/core/cap-0032.md b/core/cap-0032.md
index 859dca62c..d9e42f137 100644
--- a/core/cap-0032.md
+++ b/core/cap-0032.md
@@ -11,9 +11,11 @@ Protocol version: TBD
```
## Simple Summary
+
This proposal makes it possible to authorize a trust line before it is created.
## Motivation
+
This proposal seeks to solve the following problem: an issuer should be able to
authorize a trust line without waiting for the trust line to be created.
@@ -22,10 +24,11 @@ an account that does not have a trust line for the asset. With the current
version of the protocol, the issuer will need to wait until the account has
created a trust line before authorizing the trust line. This presents friction
to the issuer, which must now employ additional machinery to either monitor the
-ledger or use pre-signed/pre-authorized transactions. In either case, complexity
-is increased for an operation fundamental to the network.
+ledger or use pre-signed/pre-authorized transactions. In either case,
+complexity is increased for an operation fundamental to the network.
### Goals Alignment
+
This proposal is aligned with several Stellar Network Goals, among them:
- The Stellar Network should make it easy for developers of Stellar projects to
@@ -33,25 +36,27 @@ This proposal is aligned with several Stellar Network Goals, among them:
- The Stellar Network should enable cross-border payments, i.e. payments via
exchange of assets, throughout the globe, enabling users to make payments
between assets in a manner that is fast, cheap, and highly usable.
- - In support of this, the Stellar Network should enable asset issuance, but
- as a means of enabling cross-border payments.
+ - In support of this, the Stellar Network should enable asset issuance, but
+ as a means of enabling cross-border payments.
## Abstract
+
We introduce `PreauthorizationEntry` as a new type of `LedgerEntry` which
represents authorization to hold an asset, potentially before the corresponding
trust line has been created. The operation `CreatePreauthorizationOp` makes it
possible to create a `PreauthorizationEntry` whereas the operation
-`RemovePreauthorizationOp` makes it possible to remove a `PreauthorizationEntry`.
-When creating a trust line with `ChangeTrustOp`, the `flags` will now be set
-from a corresponding `PreauthorizationEntry` if it exists. `AllowTrustOp` will
-also be updated to modify just a trust line, just a `PreauthorizationEntry`, or
-both depending on which exist.
+`RemovePreauthorizationOp` makes it possible to remove a
+`PreauthorizationEntry`. When creating a trust line with `ChangeTrustOp`, the
+`flags` will now be set from a corresponding `PreauthorizationEntry` if it
+exists. `AllowTrustOp` will also be updated to modify just a trust line, just a
+`PreauthorizationEntry`, or both depending on which exist.
## Specification
### XDR
#### Refactor AllowTrustOp
+
```c++
union NonNativeAssetCode switch (AssetType type)
{
@@ -74,6 +79,7 @@ struct AllowTrustOp
```
#### PreauthorizationEntry
+
```c++
enum LedgerEntryType
{
@@ -139,6 +145,7 @@ case PREAUTHORIZATION:
```
#### Operations
+
```c++
enum OperationType
{
@@ -186,6 +193,7 @@ struct Operation
```
#### Operation Results
+
```c++
enum CreatePreauthorizationResultCode
{
@@ -240,6 +248,7 @@ struct OperationResult
### Semantics
#### CreatePreauthorizationOp
+
A `PreauthorizationEntry` can only be created by the `CreatePreauthorizationOp`
operation. `CreatePreauthorizationOp` is invalid with
`CREATE_PREAUTHORIZATION_MALFORMED` if `asset` is invalid.
@@ -254,10 +263,10 @@ The behavior of `CreatePreauthorizationOp` is as follows:
3. Deduct `baseReserve` of native asset from `sourceAccount`
4. Create a `PreauthorizationEntry` as `preauthorization` with the following
properties:
- - `preauthorization.accountID = accountID`
- - `preauthorization.asset = asset`
- - `preauthorization.flags = 0` if `sourceAccount.flags & AUTH_REQUIRED_FLAG`
- and `preauthorization.flags = AUTHORIZED_FLAG` otherwise
+ - `preauthorization.accountID = accountID`
+ - `preauthorization.asset = asset`
+ - `preauthorization.flags = 0` if `sourceAccount.flags & AUTH_REQUIRED_FLAG`
+ and `preauthorization.flags = AUTHORIZED_FLAG` otherwise
5. If a trust line `tl` with the specified `accountID` and `asset` exists, then
set `preauthorization.flags = tl.flags`
6. Succeed with `CREATE_PREAUTHORIZATION_SUCCESS`
@@ -266,6 +275,7 @@ The behavior of `CreatePreauthorizationOp` is as follows:
send funds.
#### RemovePreauthorizationOp
+
A `PreauthorizationEntry` can only be removed by the `RemovePreauthorizationOp`
operation. `RemovePreauthorizationOp` is invalid with
`REMOVE_PREAUTHORIZATION_MALFORMED` if `asset` is invalid.
@@ -274,8 +284,8 @@ The behavior of `RemovePreauthorizationOp` is as follows:
1. Fail with `REMOVE_PREAUTHORIZATION_DOES_NOT_EXIST` if there does not exist a
`PreauthorizationEntry` with the specified `accountID` and `asset`
-2. Fail with `REMOVE_PREAUTHORIZATION_LINE_FULL` if the `sourceAccount` does not
- have at least `reserve` available limit of native asset
+2. Fail with `REMOVE_PREAUTHORIZATION_LINE_FULL` if the `sourceAccount` does
+ not have at least `reserve` available limit of native asset
3. Add `reserve` of native asset to `sourceAccount`
4. Remove the specified `PreauthorizationEntry`
5. Succeed with `REMOVE_PREAUTHORIZATION_SUCCESS`
@@ -284,6 +294,7 @@ The behavior of `RemovePreauthorizationOp` is as follows:
send funds.
#### ChangeTrustOp
+
The behavior of `ChangeTrustOp` is unchanged except when creating a trust line.
In this case, we add one additional step immediately before success:
@@ -293,6 +304,7 @@ In this case, we add one additional step immediately before success:
3. Succeed with `CHANGE_TRUST_SUCCESS`
#### AllowTrustOp
+
The behavior of `AllowTrustOp` must be modified to maintain the invariant that
a trust line and a preauthorization entry for the same `accountID` and `asset`
must have the same `flags`.
@@ -306,8 +318,8 @@ and preauthorization entry do not exist:
3. Fail with `ALLOW_TRUST_NO_TRUST_LINE` if neither exists
4. ...
-Second, immediately before success we must set the `flags` on the trust line and
-the preauthorization entry if they exist:
+Second, immediately before success we must set the `flags` on the trust line
+and the preauthorization entry if they exist:
1. ...
2. If the trust line exists with the `sourceAccount` and specified `asset`
@@ -319,32 +331,39 @@ the preauthorization entry if they exist:
## Design Rationale
### PreauthorizationEntry is not a sub-entry
+
Each `PreauthorizationEntry` exists as an independent entity on the ledger. It
is clear that a `PreauthorizationEntry` cannot be a sub-entry of `accountID`,
-because it is a security risk for accounts to be able to add
-sub-entries to other accounts. But why should these entries be independent
-entities on the ledger rather than sub-entries of the accounts that created
-them? The main benefit of this design is that issuers are not limited in the
-number of preauthorization entries they can create.
+because it is a security risk for accounts to be able to add sub-entries to
+other accounts. But why should these entries be independent entities on the
+ledger rather than sub-entries of the accounts that created them? The main
+benefit of this design is that issuers are not limited in the number of
+preauthorization entries they can create.
### TrustLines and Preauthorizations Can Exist Simultaneously
+
This proposal makes it possible for a trust line and a preauthorization entry
with the same `accountID` and `asset` to exist simultaneously. We avoid any
ambiguity in the authorization state by guaranteeing that if both do exist
-simultaneously, then they must have the same `flags`. It is possible to maintain
-this property because there is at most one trust line and one preauthorization
-entry for a given `accountID` and `asset`.
+simultaneously, then they must have the same `flags`. It is possible to
+maintain this property because there is at most one trust line and one
+preauthorization entry for a given `accountID` and `asset`.
## Backwards Incompatibilities
+
All downstream systems will need updated XDR in order to recognize the new
operations and ledger entries.
## Security Concerns
-This proposal will slightly reduce the efficacy of base reserve changes, because
-a `PreauthorizationEntry` that has insufficient reserve is still usable.
+
+This proposal will slightly reduce the efficacy of base reserve changes,
+because a `PreauthorizationEntry` that has insufficient reserve is still
+usable.
## Test Cases
+
None yet.
## Implementation
+
None yet.
diff --git a/core/cap-0033.md b/core/cap-0033.md
index f83d53e3e..87005b73a 100644
--- a/core/cap-0033.md
+++ b/core/cap-0033.md
@@ -12,9 +12,11 @@ Protocol version: 14/15
```
## Simple Summary
+
This proposal makes it possible to pay reserves for another account.
## Motivation
+
This proposal seeks to solve the following problem: an entity should be able to
provide the reserve for accounts controlled by other parties without giving
those parties control of the reserve.
@@ -31,39 +33,43 @@ the trust line and merging the account.
This proposal is in many ways analogous to CAP-0015:
- CAP-0015 makes it possible to pay transaction fees for other accounts without
-giving control of the underlying funds
+ giving control of the underlying funds
- CAP-0033 makes it possible to pay reserves for other accounts without giving
-control of the underlying funds
+ control of the underlying funds
-The combination of these two proposals should greatly facilitate the development
-of non-custodial uses of the Stellar Network.
+The combination of these two proposals should greatly facilitate the
+development of non-custodial uses of the Stellar Network.
### Goals Alignment
+
This proposal is aligned with the following Stellar Network Goal:
- The Stellar Network should make it easy for developers of Stellar projects to
create highly usable products.
## Abstract
+
We introduce the sponsoring-future-reserves relation, in which an account (the
-sponsoring account) pays any reserve that another account (the sponsored account)
-would have to pay. This relation is initiated by `BeginSponsoringFutureReservesOp`,
-where the **sponsoring** account is the source account, and is terminated by
-`EndSponsoringFutureReservesOp`, where the **sponsored** account is the source
-account. Both operations must appear in a single transaction, which guarantees
-that both the sponsoring and sponsored accounts agree to every sponsorship. We
-also introduce `RevokeSponsorshipOp`, which can be used to modify the
-sponsorship of existing ledger entries and signers. To support this, we add new
-extensions to `AccountEntry` and `LedgerEntry` which record pertinent
-information about sponsorships.
+sponsoring account) pays any reserve that another account (the sponsored
+account) would have to pay. This relation is initiated by
+`BeginSponsoringFutureReservesOp`, where the **sponsoring** account is the
+source account, and is terminated by `EndSponsoringFutureReservesOp`, where the
+**sponsored** account is the source account. Both operations must appear in a
+single transaction, which guarantees that both the sponsoring and sponsored
+accounts agree to every sponsorship. We also introduce `RevokeSponsorshipOp`,
+which can be used to modify the sponsorship of existing ledger entries and
+signers. To support this, we add new extensions to `AccountEntry` and
+`LedgerEntry` which record pertinent information about sponsorships.
## Specification
+
This specification assumes CAP-0023, in order to show how sponsorships would
work for claimable balance entries.
### XDR
#### AccountEntry
+
```c++
typedef AccountID* SponsorshipDescriptor;
@@ -117,8 +123,9 @@ struct AccountEntry
```
#### ClaimableBalanceEntry
-Note that `ClaimableBalanceEntry` is not in the current protocol, so the XDR can
-still be modified. `reserve` has been removed, and `sponsoringID` has been
+
+Note that `ClaimableBalanceEntry` is not in the current protocol, so the XDR
+can still be modified. `reserve` has been removed, and `sponsoringID` has been
replaced by `sponsoringID` in `LedgerEntryExtensionV1`.
```c++
@@ -147,6 +154,7 @@ struct ClaimableBalanceEntry
```
#### LedgerEntry
+
```c++
struct LedgerEntryExtensionV1
{
@@ -178,6 +186,7 @@ struct LedgerEntry
```
#### Operations
+
```c++
enum OperationType
{
@@ -238,6 +247,7 @@ struct Operation
```
#### Operation Results
+
```c++
enum AccountMergeResultCode
{
@@ -329,6 +339,7 @@ default:
```
#### Transaction Results
+
```c++
enum TransactionResultCode
{
@@ -357,24 +368,29 @@ struct InnerTransactionResult
### Semantics
#### Reserve Requirement
+
No operation can cause an account to have
`balance < (2 + numSubEntries + numSponsoring - numSponsored) * baseReserve + liabilities.selling`.
#### Sponsoring-Future-Reserves
+
When account `A` is sponsoring-future-reserves for account `B`, any reserve
requirements that would normally accumulate on `B` will instead accumulate on
-`A`. This is achieved by updating `A.numSponsoring` and `B.numSponsored` (unless
-the reserve requirement is accumulating due to a `ClaimableBalance` entry).
+`A`. This is achieved by updating `A.numSponsoring` and `B.numSponsored`
+(unless the reserve requirement is accumulating due to a `ClaimableBalance`
+entry).
An account `A` begins sponsoring-future-reserves for an account `B` upon a
successful `BeginSponsoringFutureReservesOp` with `sourceAccount = A` and
`sponsoredID = B`. `A` stops sponsoring-future-reserves for `B` upon a
-successful `EndSponsoringFutureReserves` with `sourceAccount = B`. These are the
-only two operations which can impact the sponsoring-future-reserves state of an
-account.
+successful `EndSponsoringFutureReserves` with `sourceAccount = B`. These are
+the only two operations which can impact the sponsoring-future-reserves state
+of an account.
#### Sponsoring-Future-Reserves Invariants
+
There are two invariants related to sponsoring-future-reserves:
+
- If an account `A` is sponsoring-future-reserves for an account `B`, then `B`
is not sponsoring-future-reserves for any account. Specifically, this
prevents an account from sponsoring-future-reserves for itself.
@@ -383,35 +399,40 @@ There are two invariants related to sponsoring-future-reserves:
account).
#### Sponsorship Invariants
+
There are three invariants related to sponsorships:
+
- (Global) The sum of all `numSponsoring` is equal to the sum of all
`numSponsored` plus the sum of all `ClaimableBalance.claimants.size()`.
- (Local) For an account `A`, `numSponsoring` is equal to the count of all
`LedgerEntry le` with `le.ext.v1().sponsoringID == A` weighted by the number
- of reserves required for that `LedgerEntry` (2 for `ACCOUNT`, `claimants.size()`
- for `CLAIMABLE_BALANCE`, 1 otherwise) plus the count of all `A` in
- `signerSponsoringIDs`.
+ of reserves required for that `LedgerEntry` (2 for `ACCOUNT`,
+ `claimants.size()` for `CLAIMABLE_BALANCE`, 1 otherwise) plus the count of
+ all `A` in `signerSponsoringIDs`.
- (Local) For an account `A`, `numSponsored` is equal to the count of all
- `LedgerEntry le` which are sub-entries of `A` (including `A` itself) that have
- `le.ext.v1().sponsoringID != null` weighted by the number of reserves
- required for that `LedgerEntry` (2 for `ACCOUNT`, 1 otherwise) plus the count of all `s`
- in `A.ext.v1().ext.v2().signerSponsoringIDs` with `s != null`. Note that
- Claimable balances are not sub-entries, so they do not affect `numSponsored`.
-
+ `LedgerEntry le` which are sub-entries of `A` (including `A` itself) that
+ have `le.ext.v1().sponsoringID != null` weighted by the number of reserves
+ required for that `LedgerEntry` (2 for `ACCOUNT`, 1 otherwise) plus the count
+ of all `s` in `A.ext.v1().ext.v2().signerSponsoringIDs` with `s != null`.
+ Note that Claimable balances are not sub-entries, so they do not affect
+ `numSponsored`.
#### BeginSponsoringFutureReservesOp
+
`BeginSponsoringFutureReservesOp` is the only operation that can initiate the
sponsoring-future-reserves relation. It is forbidden for A to be
sponsoring-future-reserves for B, and B to be sponsoring-future-reserves for C,
and this operation enforces this constraint.
To check validity of `BeginSponsoringFutureReservesOp op`:
+
```
If op.sponsoredID == op.sourceAccount
Invalid with BEGIN_SPONSORING_FUTURE_RESERVES_MALFORMED
```
The behavior of `BeginSponsoringFutureReservesOp op` is:
+
```
If an account is sponsoring future reserves for op.sponsoredID
Fail with BEGIN_SPONSORING_FUTURE_RESERVES_ALREADY_SPONSORED
@@ -428,6 +449,7 @@ Succeed with BEGIN_SPONSORING_FUTURE_RESERVES_SUCCESS
`BeginSponsoringFutureReservesOp` requires medium threshold.
#### EndSponsoringFutureReservesOp
+
`EndSponsoringFutureReservesOp` is the only operation that can terminate the
sponsoring-future-reserves relation. This can only be done if some account is
sponsoring-future-reserves for the source account.
@@ -435,6 +457,7 @@ sponsoring-future-reserves for the source account.
`EndSponsoringFutureReservesOp` is always valid.
The behavior of `EndSponsoringFutureReservesOp op` is:
+
```
If an account is not sponsoring future reserves for op.sourceAccount
Fail with END_SPONSORING_FUTURE_RESERVES_NOT_SPONSORED
@@ -446,6 +469,7 @@ Succeed with END_SPONSORING_FUTURE_RESERVES_SUCCESS
`EndSponsoringFutureReservesOp` requires medium threshold.
#### RevokeSponsorshipOp
+
`RevokeSponsorshipOp` allows a ledger entry or signer
- that is not sponsored to be sponsored,
@@ -453,6 +477,7 @@ Succeed with END_SPONSORING_FUTURE_RESERVES_SUCCESS
- that is sponsored to no longer be sponsored.
To check validity of `RevokeSponsorshipOp op`:
+
```
If ledgerVersion != 14
If op.type() == REVOKE_SPONSORSHIP_LEDGER_ENTRY
@@ -477,6 +502,7 @@ If ledgerVersion != 14
The behavior of `RevokeSponsorshipOp op` is as follows if
`op.type() == REVOKE_SPONSORSHIP_LEDGER_ENTRY`:
+
```
Load op.ledgerKey() as le
If le does not exist
@@ -548,6 +574,7 @@ Succeed with REVOKE_SPONSORSHIP_SUCCESS
The behavior of `RevokeSponsorshipOp op` is as follows if
`op.type() == REVOKE_SPONSORSHIP_SIGNER`:
+
```
Load op.signer().accountID as le
If le does not exist
@@ -627,6 +654,7 @@ Succeed with REVOKE_SPONSORSHIP_SUCCESS
`RevokeSponsorshipOp` requires medium threshold.
#### AccountMergeOp
+
`AccountMergeOp` will fail with `ACCOUNT_MERGE_IS_SPONSOR` if attempting to
merge an account that is-sponsoring-future-reserves-for another account. This
guarantees that the sponsoring account always exists.
@@ -636,21 +664,25 @@ attempting to merge an account that has non-zero `numSponsoring`. This is
required for sponsorship bookkeeping.
#### CreateAccountOp
+
`CreateAccountOp` will now be valid if `startingBalance >= 0`, whereas prior to
this proposal it was valid if `startingBalance > 0`.
#### ManageSellOfferOp and ManageBuyOfferOp
-Starting in protocol version 15, `ManageSellOfferOp` and `ManageBuyOfferOp` will
-be invalid if `offerID < 0`.
+
+Starting in protocol version 15, `ManageSellOfferOp` and `ManageBuyOfferOp`
+will be invalid if `offerID < 0`.
#### Operations that can change numSubEntries
+
Any operation that can change `numSubEntries` can now fail with
-`opTOO_MANY_SPONSORING`, if any `numSponsoring` or
-`numSponsored` would exceed `UINT32_MAX`.
+`opTOO_MANY_SPONSORING`, if any `numSponsoring` or `numSponsored` would exceed
+`UINT32_MAX`.
## Design Rationale
### Sponsorship Logic is Off-Chain
+
In CAP-0031, an alternative approach to sponsorship, the logic for determining
what can be sponsored is stored on the ledger. Not only is this complicated to
implement and reason about, but it also introduces a variety of limitations in
@@ -659,6 +691,7 @@ the "sandwich approach", analogous to what is done in CAP-0018, eliminates all
of these disadvantages.
### Why Should Sponsorship be Ephemeral?
+
There are a variety of reasons that the sponsoring-future-reserves relation is
ephemeral (by which I mean contained within a single transaction). From a
practical perspective, it would be deeply unwise to delegate to another party
@@ -680,6 +713,7 @@ relation, and if you didn't want revocation to occur then you shouldn't have
accepted the sponsorship in the first place.
### Sponsoring Accounts Cannot be Merged
+
An account that is sponsoring-future-reserves for another account cannot be
merged. This does not constrain functionality at all, but simplifies the
implementation and reduces the number of possible errors that can be
@@ -694,6 +728,7 @@ be merged. If you want to merge such an account, then you can use
this restriction greatly simplifies the implementation and semantics.
### Sequence Number Utilization and Sponsoring Account Creation
+
A typical use case for sponsorship is an enterprise sponsoring the reserves for
customers. Because sponsorship requires signatures from both the sponsoring
account and the sponsored account, this requires multi-signature coordination.
@@ -705,35 +740,38 @@ number. This will likely require a pool of channel accounts, which is a source
of complexity.
Despite the complexity that will be required for large-scale creation of
-sponsored accounts, we still believe that this approach is justified by its many
-benefits. Furthermore, the need for channel accounts during multi-signature
-coordination is an area in which the Stellar protocol can be generally improved.
-Already there are discussions of ideas which may mitigate some of these issues
-such as David Mazieres' proposal for Authenticated Operations, which can be found
-at https://groups.google.com/forum/#!msg/stellar-dev/zpO0Eppn8ks/e1ULbV_lAgAJ.
+sponsored accounts, we still believe that this approach is justified by its
+many benefits. Furthermore, the need for channel accounts during
+multi-signature coordination is an area in which the Stellar protocol can be
+generally improved. Already there are discussions of ideas which may mitigate
+some of these issues such as David Mazieres' proposal for Authenticated
+Operations, which can be found at
+https://groups.google.com/forum/#!msg/stellar-dev/zpO0Eppn8ks/e1ULbV_lAgAJ.
### Why are Signer Sponsoring IDs Stored Separately from Signers?
-For ledger entries, the sponsoring ID is stored within the ledger entry. But for
-signers, the sponsoring ID is stored separately. Why take different approaches?
-The reason is that `LedgerEntry` has an extension point but `Signer` does not.
-So if we wanted to store the sponsoring ID in the signer, then we would need to
-extend the `SignerKey` union. But this would force any downstream system that
-interacts with signers to update as well, introducing complexity throughout the
-ecosystem. Storing the sponsoring IDs for signers separately avoids this problem
-entirely, but it is perhaps slightly less elegant. Still, the trade-off is more
-than justified.
+
+For ledger entries, the sponsoring ID is stored within the ledger entry. But
+for signers, the sponsoring ID is stored separately. Why take different
+approaches? The reason is that `LedgerEntry` has an extension point but
+`Signer` does not. So if we wanted to store the sponsoring ID in the signer,
+then we would need to extend the `SignerKey` union. But this would force any
+downstream system that interacts with signers to update as well, introducing
+complexity throughout the ecosystem. Storing the sponsoring IDs for signers
+separately avoids this problem entirely, but it is perhaps slightly less
+elegant. Still, the trade-off is more than justified.
### Why Doesn't RevokeSponsorshipOp Obey Typical Rules of Sponsorship?
+
The relationship between sponsoring account and sponsored account can only be
created by mutual agreement. When this relationship is established, the
sponsoring account is endowed with the right to unilaterally end the
-relationship (ie. revoke the sponsorship) as long as doing so will not leave the
-sponsored account below the reserve requirement. But revoking the sponsorship
-is strictly worse for the sponsored account than transferring the sponsorship,
-because the worst thing that the new sponsoring account can do is revoke the
-sponsorship. Therefore, the sponsoring account should be able to transfer the
-sponsorship with the consent of the new sponsoring account but unilaterally with
-respect to the sponsored account.
+relationship (ie. revoke the sponsorship) as long as doing so will not leave
+the sponsored account below the reserve requirement. But revoking the
+sponsorship is strictly worse for the sponsored account than transferring the
+sponsorship, because the worst thing that the new sponsoring account can do is
+revoke the sponsorship. Therefore, the sponsoring account should be able to
+transfer the sponsorship with the consent of the new sponsoring account but
+unilaterally with respect to the sponsored account.
So how would a transfer work if `RevokeSponsorshipOp` were to obey the typical
rules of sponsorship? Let `S1` be the current sponsor, `S2` be the new sponsor,
@@ -752,11 +790,13 @@ This also resolves an issue for `ClaimableBalanceEntry`, because there is no
notion of an account that controls a `ClaimableBalanceEntry`.
### There is no opTOO_MANY_SPONSORED
-It initially seems strange that there is no `opTOO_MANY_SPONSORED` in analogy to
-`opTOO_MANY_SPONSORING`. But this case is already covered by the constraint that
-`numSponsored <= numSubEntries + 2 < UINT32_MAX`.
+
+It initially seems strange that there is no `opTOO_MANY_SPONSORED` in analogy
+to `opTOO_MANY_SPONSORING`. But this case is already covered by the constraint
+that `numSponsored <= numSubEntries + 2 < UINT32_MAX`.
### Example: Sponsoring Account Creation
+
In this example, we demonstrate how an account can be sponsored upon creation.
Let `S` be the sponsoring account, `C` be the creating account, and `A` the
newly created account (`S` and `C` may be the same account). Then the following
@@ -786,11 +826,12 @@ operations[2]:
type: END_SPONSORING_FUTURE_RESERVES
```
-where ``, ``, ``, and `` should all
-be substituted appropriately. Note that this requires a signature from `A` even
-though that account is being created.
+where ``, ``, ``, and `` should
+all be substituted appropriately. Note that this requires a signature from `A`
+even though that account is being created.
### Example: Two Trust Lines with Different Sponsors
+
In this example, we demonstrate how a single account can create two trust lines
which are sponsored by different accounts in a single transaction. Let `S1` and
`S2` be the sponsoring accounts. Let `A` be the sponsored account. Then the
@@ -839,10 +880,11 @@ where ``, ``, and `` should all be substituted
appropriately.
### Example: Revoke Sponsorship
-In this example, we demonstrate how a sponsorship can be revoked. Let `S`
-be the sponsoring account. Let `K` be the `LedgerKey` for an entry which
-is currently sponsored by `S`. Then the following transaction achieves the
-desired goal:
+
+In this example, we demonstrate how a sponsorship can be revoked. Let `S` be
+the sponsoring account. Let `K` be the `LedgerKey` for an entry which is
+currently sponsored by `S`. Then the following transaction achieves the desired
+goal:
```
sourceAccount: S
@@ -862,6 +904,7 @@ where ``, ``, and `` should all be substituted
appropriately.
### Example: Transfer Sponsorship
+
In this example, we demonstrate how a sponsorship can be transferred. Let `S1`
and `S2` be the sponsoring accounts. Let `K` be the `LedgerKey` for an entry
which is currently sponsored by `S1`. Then the following transaction achieves
@@ -896,22 +939,27 @@ appropriately.
## Backwards Incompatibilities
### New XDR
+
All downstream systems will need updated XDR in order to recognize the new
operations and the modified ledger entries.
### Operation Validity Changes
+
Any downstream system relying on any of the following facts will be affected:
- `CreateAccountOp` is invalid if `startingBalance = 0`
- Manage offer operations can be valid if `offerID < 0`
## Security Concerns
-This proposal will slightly reduce the efficacy of base reserve changes, because
-sponsored ledger entries cannot cause an account to pass below the reserve
-requirement.
+
+This proposal will slightly reduce the efficacy of base reserve changes,
+because sponsored ledger entries cannot cause an account to pass below the
+reserve requirement.
## Test Cases
+
None yet.
## Implementation
+
None yet.
diff --git a/core/cap-0034.md b/core/cap-0034.md
index 77de352a2..21ae5bfd2 100644
--- a/core/cap-0034.md
+++ b/core/cap-0034.md
@@ -26,8 +26,8 @@ that smart contracts can not, with the current protocol, safely be written as
chains of dependent transactions with incrementing sequence numbers, because
there is no way for a client to guarantee that a transaction will succeed if it
consumes a sequence number. There is always a possibility that it will return
-`txTOO_LATE`. But returning `txTOO_LATE` while consuming a sequence number
-(and being charged a fee) violates our
+`txTOO_LATE`. But returning `txTOO_LATE` while consuming a sequence number (and
+being charged a fee) violates our
[API documentation](https://www.stellar.org/developers/guides/concepts/transactions.html),
which states that "If the transaction is invalid, it will be immediately
rejected by stellar-core based on the validity rules of a transaction, the
@@ -35,10 +35,10 @@ account’s sequence number will not be incremented, and no fee will be consumed
from the source account", where the referenced
["validity rules of a transaction"](https://www.stellar.org/developers/guides/concepts/transactions.html#validity-of-a-transaction)
include "The transaction must be submitted within the set time bounds of the
-transaction, otherwise it will be considered invalid".
-There may therefore _be_ smart contracts which are structured this way, and are
-allowed to be structured this way according to our published API, yet are
-vulnerable to inconsistencies because of this race.
+transaction, otherwise it will be considered invalid". There may therefore _be_
+smart contracts which are structured this way, and are allowed to be structured
+this way according to our published API, yet are vulnerable to inconsistencies
+because of this race.
A less catastrophic problem, but an inefficiency, is that externalizing
transaction sets that contain significant numbers of transactions that are
@@ -63,15 +63,15 @@ desirable effects on both good-faith transactions and bad-faith (spam)
transactions:
- Good-faith transactions should not be charged a fee without being included in
-a ledger, and should certainly not be subject to potential inconsistencies when
-part of smart contracts that depend upon sequence numbers to manage
-dependencies.
+ a ledger, and should certainly not be subject to potential inconsistencies
+ when part of smart contracts that depend upon sequence numbers to manage
+ dependencies.
- Bad-faith transactions should not be allowed into fill up ledgers, starving
-out good-faith transactions. This CAP ensures that consensus gets the
-opportunity to select transaction sets from among transactions that would be
-guaranteed not to return `txTOO_LATE` if they were externalized, so ledgers
-would only be filled by transactions that could actually be applied.
+ out good-faith transactions. This CAP ensures that consensus gets the
+ opportunity to select transaction sets from among transactions that would be
+ guaranteed not to return `txTOO_LATE` if they were externalized, so ledgers
+ would only be filled by transactions that could actually be applied.
### Goals Alignment
@@ -89,22 +89,22 @@ It also aligns with the following Stellar Network Value:
Currently, the nomination protocol produces, from a set of candidate
`StellarValue`s, a nominated `StellarValue` comprising the composited
-transaction set (as selected by a deterministic heuristic which chooses
-one particular input set and favors larger sets), the maximum `closeTime` of any
+transaction set (as selected by a deterministic heuristic which chooses one
+particular input set and favors larger sets), the maximum `closeTime` of any
candidate, and a set of "maximal" ledger `upgrades` from all candidates.
We propose to change the protocol to take the `closeTime` from the same
candidate `StellarValue` as the chosen transaction set (the heuristic which
decides which transaction set to choose does not change; nor does the method of
-generating `upgrades`). The protocol would then validate all transaction sets
+generating `upgrades`). The protocol would then validate all transaction sets
against their associated `closeTime`s, and could use `SIGNED` `StellarValue`s
throughout the ballot protocol as well as the nomination protocol.
The effect is that all trimming and validating of transaction sets during the
nomination and ballot protocols use the exact same conditions as the validation
-of transactions does during ledger close. (Today, they differ: the nomination
+of transactions does during ledger close. (Today, they differ: the nomination
and ballot protocols use the last ledger close time, whereas ledger close uses
-the new ledger close time. This CAP allows this precise alignment because it
+the new ledger close time. This CAP allows this precise alignment because it
allows the nomination and ballot protocols to predict exactly what the next
ledger close time will be _if_ the transaction set that they are validating is
ultimately externalized.)
@@ -113,7 +113,7 @@ In particular, this affects how soon the core notices when transactions expire.
## Specification
-There are no changes to any XDR in this CAP. The treatment of the `closeTime`
+There are no changes to any XDR in this CAP. The treatment of the `closeTime`
in the `StellarValue` XDR in some code paths changes, as does the use of
`SIGNED` `StellarValue`s in some places where `BASIC` ones are currently used,
but the XDR itself does not change.
@@ -121,43 +121,43 @@ but the XDR itself does not change.
This CAP proposes to change the compositing function, which chooses a
`StellarValue` to start the ballot protocol on, to remove the current combining
of closetimes and simply select the one from the same `StellarValue` as the
-selected transaction set. (This CAP proposes to preserve the existing selection
+selected transaction set. (This CAP proposes to preserve the existing selection
heuristic for "composited transaction set" unchanged.)
This CAP also proposes to trim and validate transaction sets in `StellarValue`s
(in all places where such trimming or validating is currently done) against the
-`closeTime` in the same `StellarValue`. The `closeTime` affects whether a
+`closeTime` in the same `StellarValue`. The `closeTime` affects whether a
transaction with an upper time bound (`maxTime`) returns `txTOO_LATE`, and
-whether a transaction with a lower time bound (`minTime`) returns `txTOO_EARLY`.
-Those are therefore the two transaction-validity tests which are affected by
-the change in which `closeTime` is used to validate transactions.
+whether a transaction with a lower time bound (`minTime`) returns
+`txTOO_EARLY`. Those are therefore the two transaction-validity tests which are
+affected by the change in which `closeTime` is used to validate transactions.
-This CAP also proposes to make the nomination protocol produce a signed
-value (currently we nominate an unsigned, or "basic", value, because, as a
+This CAP also proposes to make the nomination protocol produce a signed value
+(currently we nominate an unsigned, or "basic", value, because, as a
combination of candidate values, it may not be equal to any one candidate value
for which we have a signature). It does not propose any specific use of that
-signature yet, but preserving the signature as part of the same protocol
-change that allows it to be preserved will allow us to use it in the future if
-we discover a way to do so.
+signature yet, but preserving the signature as part of the same protocol change
+that allows it to be preserved will allow us to use it in the future if we
+discover a way to do so.
-As with any protocol change, the new code must remember and maintain
-older protocols' behavior, and continue to use old behavior until after
-the network externalizes the ledger upgrade to the new protocol, through
-consensus.
+As with any protocol change, the new code must remember and maintain older
+protocols' behavior, and continue to use old behavior until after the network
+externalizes the ledger upgrade to the new protocol, through consensus.
The changes in this CAP build upon changes already made (without a CAP, since
they were not protocol changes, but changes to the heuristics that a node uses
to decide which transactions to flood and which to include in its first
nominated transaction set in a new ledger) in
-[PR 2608](https://github.com/stellar/stellar-core/pull/2608). Those changes
-already reduce the potential network burden of flooding transactions that appear
-likely to expire before getting into transaction sets, and they introduce some
-interfaces into the code which the code for this CAP can naturally re-use.
+[PR 2608](https://github.com/stellar/stellar-core/pull/2608). Those changes
+already reduce the potential network burden of flooding transactions that
+appear likely to expire before getting into transaction sets, and they
+introduce some interfaces into the code which the code for this CAP can
+naturally re-use.
-Therefore, we present the semantics in three stages: the behavior as released,
+Therefore, we present the semantics in three stages: the behavior as released,
the latest unreleased behavior (which contains heuristic but no protocol
changes, and therefore involved no CAP), and the behavior as proposed in this
-CAP. **Bold** text indicates a semantic change from the previous stage. The
+CAP. **Bold** text indicates a semantic change from the previous stage. The
proposal in this CAP embodies the **bold** semantics in the last column only;
the **bold** semantics in the middle column are for context.
@@ -179,156 +179,161 @@ set of candidates, and all of those questions must be answered by the Core
through consensus.
1. The proposal prevents the race that motivated the CAP; it ensures in all
-cases that a transaction that returns `txTOO_LATE` does not consume a sequence
-number and does not pay a fee. This is ensured because before a transaction
-set gets to the point of having its transaction fees paid (which happens when a
-ledger is closing), we composite the candidate `` pairs into
-one, and with the CAP behavior we do so by simply selecting one candidate.
-And every candidate has by the time of compositing been validated, including
-against the new condition that it contains no transactions that are expired
-with respect to its `closeTime`.
+ cases that a transaction that returns `txTOO_LATE` does not consume a
+ sequence number and does not pay a fee. This is ensured because before a
+ transaction set gets to the point of having its transaction fees paid (which
+ happens when a ledger is closing), we composite the candidate
+ `` pairs into one, and with the CAP behavior we do so by
+ simply selecting one candidate. And every candidate has by the time of
+ compositing been validated, including against the new condition that it
+ contains no transactions that are expired with respect to its `closeTime`.
2. The proposal prevents an ill-behaved validator from intentionally triggering
-the race that motivated the CAP through what we shall call "maximum `closeTime`
-injection": the ill-behaved validator could previously try to force transactions
-in transaction sets proposed by other nodes to fail during ledger close by
-proposing as large a `closeTime` as allowed (the maximum time slip is currently
-one minute).
+ the race that motivated the CAP through what we shall call "maximum
+ `closeTime` injection": the ill-behaved validator could previously try to
+ force transactions in transaction sets proposed by other nodes to fail
+ during ledger close by proposing as large a `closeTime` as allowed (the
+ maximum time slip is currently one minute).
3. This proposal refines
-[PR 2608](https://github.com/stellar/stellar-core/pull/2608)'s
-trimming of transactions prior to initial nomination by changing the
-next-ledger-`closeTime` estimate (which PR2608 itself had changed from
-last-ledger-`closeTime`) to `closeTime`-within-`StellarValue`. Given this CAP's
-semantics, we know at this point that that `closeTime` is the _exact_ one that
-will be chosen as the ledger close time _if_ the transaction set is ultimately
-externalized, so we can trim precisely the most efficient set of transactions:
-we trim all of those which would have later returned `txTOO_LATE` if the
-transaction set were ultimately externalized, and we are guaranteed that none of
-those which survives trimming will return `txTOO_LATE` if the transaction set is
-ultimately externalized.
-
-4. This CAP in general makes changes to transaction set composition and validity
-in the earliest protocol phases possible, making use of knowledge as soon as we
-have it. The previous point is an example -- we trim a transaction set prior to
-first nomination precisely when we decide on which `closeTime` to nominate
-in the same `StellarValue` with it, which also tells us precisely which
-transactions we may as well trim because they'll be expired at ledger close time
-if that `StellarValue` is ultimately externalized. The change in the validation
-of transaction sets in the nomination and ballot protocols is a similar example;
-we can do more accurate validation than we could before this CAP, when all we
-could validate against was the last ledger close time.
+ [PR 2608](https://github.com/stellar/stellar-core/pull/2608)'s trimming of
+ transactions prior to initial nomination by changing the
+ next-ledger-`closeTime` estimate (which PR2608 itself had changed from
+ last-ledger-`closeTime`) to `closeTime`-within-`StellarValue`. Given this
+ CAP's semantics, we know at this point that that `closeTime` is the _exact_
+ one that will be chosen as the ledger close time _if_ the transaction set is
+ ultimately externalized, so we can trim precisely the most efficient set of
+ transactions: we trim all of those which would have later returned
+ `txTOO_LATE` if the transaction set were ultimately externalized, and we are
+ guaranteed that none of those which survives trimming will return
+ `txTOO_LATE` if the transaction set is ultimately externalized.
+
+4. This CAP in general makes changes to transaction set composition and
+ validity in the earliest protocol phases possible, making use of knowledge
+ as soon as we have it. The previous point is an example -- we trim a
+ transaction set prior to first nomination precisely when we decide on which
+ `closeTime` to nominate in the same `StellarValue` with it, which also tells
+ us precisely which transactions we may as well trim because they'll be
+ expired at ledger close time if that `StellarValue` is ultimately
+ externalized. The change in the validation of transaction sets in the
+ nomination and ballot protocols is a similar example; we can do more
+ accurate validation than we could before this CAP, when all we could
+ validate against was the last ledger close time.
5. This CAP makes the `closeTime`-related transaction validity checks (the
-tests for `txTOO_EARLY` and `txTOO_LATE`) throughout consensus exactly
-equivalent to those used during ledger close. Previously, the tests used during
-consensus (which this CAP changes to align with those used during ledger close)
-were weaker than the tests used during ledger close with respect to
-`maxTime`/`txTOO_LATE` (the consensus checks could admit transactions which,
-if they were ultimately externalized, ledger close would later reject with
-`txTOO_LATE`), and stronger than the tests used during ledger close with respect
-to `minTime`/`txTOO_EARLY` (the consensus checks could reject transactions with
-`txTOO_EARLY` which, had they been allowed and ultimately externalized, would
-_not_ have been rejected with `txTOO_EARLY` during ledger close).
+ tests for `txTOO_EARLY` and `txTOO_LATE`) throughout consensus exactly
+ equivalent to those used during ledger close. Previously, the tests used
+ during consensus (which this CAP changes to align with those used during
+ ledger close) were weaker than the tests used during ledger close with
+ respect to `maxTime`/`txTOO_LATE` (the consensus checks could admit
+ transactions which, if they were ultimately externalized, ledger close would
+ later reject with `txTOO_LATE`), and stronger than the tests used during
+ ledger close with respect to `minTime`/`txTOO_EARLY` (the consensus checks
+ could reject transactions with `txTOO_EARLY` which, had they been allowed
+ and ultimately externalized, would _not_ have been rejected with
+ `txTOO_EARLY` during ledger close).
6. This CAP, in both the change it proposes to protocol semantics and the
-changes it would induce in the code structure, facilitates the option to
-perform a follow-on change to the work done in PR 2608 which would further
-refine our heuristic for choosing which transactions to flood by allowing
-some transactions to be flooded which would be `txTOO_EARLY` if the next ledger
-were to close immediately, but which we estimate will no longer be `txTOO_EARLY`
-by the time the next ledger actually does close. (That would not introduce any
-race analogous to the one that this CAP fixes with respect to `txTOO_LATE`,
-because the nomination and ballot protocols would still discover that a
-transaction would be invalid with `txTOO_EARLY` before it reached the point of
-being in a closing ledger and therefore failing validation while nevertheless
-consuming a sequence number and being charged a fee.)
+ changes it would induce in the code structure, facilitates the option to
+ perform a follow-on change to the work done in PR 2608 which would further
+ refine our heuristic for choosing which transactions to flood by allowing
+ some transactions to be flooded which would be `txTOO_EARLY` if the next
+ ledger were to close immediately, but which we estimate will no longer be
+ `txTOO_EARLY` by the time the next ledger actually does close. (That would
+ not introduce any race analogous to the one that this CAP fixes with respect
+ to `txTOO_LATE`, because the nomination and ballot protocols would still
+ discover that a transaction would be invalid with `txTOO_EARLY` before it
+ reached the point of being in a closing ledger and therefore failing
+ validation while nevertheless consuming a sequence number and being charged
+ a fee.)
7. This CAP has had a modest "test" in that it is the third proposal that we
-have considered, and appears not to suffer from any of the weaknesses which led
-us to reject the first two; see "Rejected alternatives" below for details.
+ have considered, and appears not to suffer from any of the weaknesses which
+ led us to reject the first two; see "Rejected alternatives" below for
+ details.
### Detailed illustration of failure mode with current protocol
With the current protocol, the following sequence of events can occur:
1. A smart-contract client submits a series of transactions `T1`, `T2` with
-`T2`'s sequence number one greater than `T1`'s. `T2` is intended have its
-operations applied if and only if `T1`'s are applied first; the smart contract
-is depending upon `T2` having the wrong sequence number if `T1`'s operations
-are not applied (in which case the smart contract expects that `T1` will not
-have consumed a sequence number). The smart contract sets an expiration time on
-`T1` (as our
-[documentation](https://www.stellar.org/developers/guides/concepts/transactions.html)
-"highly advises" that clients do).
+ `T2`'s sequence number one greater than `T1`'s. `T2` is intended have its
+ operations applied if and only if `T1`'s are applied first; the smart
+ contract is depending upon `T2` having the wrong sequence number if `T1`'s
+ operations are not applied (in which case the smart contract expects that
+ `T1` will not have consumed a sequence number). The smart contract sets an
+ expiration time on `T1` (as our
+ [documentation](https://www.stellar.org/developers/guides/concepts/transactions.html)
+ "highly advises" that clients do).
2. The ledger happens first to have capacity to accept `T1` into a transaction
-set between `T1`'s expiration time and the time at which it will turn out (of
-course the network can not foresee that this will eventually be the case) that
-the next ledger will close.
+ set between `T1`'s expiration time and the time at which it will turn out
+ (of course the network can not foresee that this will eventually be the
+ case) that the next ledger will close.
3. `T1` happens at that point to be in the transaction queue of the validator
-who (though again this is not predictable yet) will turn out to nominate the
-`StellarValue` that will ultimately be externalized. That node builds a
-transaction set that contains `T1`.
+ who (though again this is not predictable yet) will turn out to nominate the
+ `StellarValue` that will ultimately be externalized. That node builds a
+ transaction set that contains `T1`.
4. The transaction set containing `T1` is externalized, but (at least) one of
-the candidate `closeTime`s is greater than `T1`'s expiration time.
+ the candidate `closeTime`s is greater than `T1`'s expiration time.
5. Transactions are applied using the maximum of all candidate `closeTime`s as
-the new last ledger close time. `T1` therefore returns `txTOO_LATE`, and its
-operations are not applied. However, as it reached ledger close before being
-failed, it consumes a sequence number (and is charged a fee).
+ the new last ledger close time. `T1` therefore returns `txTOO_LATE`, and its
+ operations are not applied. However, as it reached ledger close before being
+ failed, it consumes a sequence number (and is charged a fee).
6. `T2` gets into an externalized transaction set (possibly, but not
-necessarily, the same one that `T1` got into) before it too expires. It
-has the correct sequence number, because `T1` consumed a sequence number.
-`T2`'s operations are therefore applied -- even though its preconditions were
-intended to include guarantees that `T1`'s operations had ensured, so the
-actual postconditions of `T2` could violate any of the smart contract's intended
-invariants which had depended upon `T1`'s operations having succeeded before
-`T2`'s operations could be applied.
+ necessarily, the same one that `T1` got into) before it too expires. It has
+ the correct sequence number, because `T1` consumed a sequence number. `T2`'s
+ operations are therefore applied -- even though its preconditions were
+ intended to include guarantees that `T1`'s operations had ensured, so the
+ actual postconditions of `T2` could violate any of the smart contract's
+ intended invariants which had depended upon `T1`'s operations having
+ succeeded before `T2`'s operations could be applied.
So `T` consumes a sequence number (and is charged a fee) but is never applied.
There is a design pattern in some smart contracts which breaks, allowing
-inconsistent transactions to be committed, if it encounters this race: a smart
+inconsistent transactions to be committed, if it encounters this race: a smart
contract might submit a chain of transactions, with incrementing sequence
-numbers, each intended to be applied only if the previous ones succeeded. If
-there were transactions that were intended to be constrained by the incrementing
-sequence numbers only to be applied if `T` succeeded, they could do so because
-`T` had consumed its sequence number, even though it was never applied. The
-dependent transactions would then perform operations which the smart contract
-had intended to be performed only if `T` had succeeded. This would
-potentially be arbitrarily bad for the smart contract.
-
-With the behavior proposed in this CAP, step #3 -- a node building a transaction
-set containing `T1` -- would only occur if `T1` were not expired with respect to
-the `closeTime` in the same proposed `StellarValue` as that transaction set.
-Otherwise, `T1` would not make it into a transaction set, and would therefore
-never consume a sequence number (or be charged a fee), and `T2` would fail
-validation with a bad sequence number, as the smart contract intended in that
-case. If `T1` did make it into a transaction set, then, under the new behavior
-in this CAP, step #5 would change -- if the transaction set containing `T1` won
-nomination, then the `closeTime` of the new ledger would be the `closeTime` from
-the same `StellarValue` as `T1`, with respect to which `T1` is in this case not
-expired. Hence, `T1` would be applied during ledger close; it would not return
-`txTOO_LATE`. `T2` would therefore have the opportunity to be applied (assuming
-it was valid in the other respects in addition to its sequence number), and in
-this case that would be as expected, as `T1` had previously succeeded.
+numbers, each intended to be applied only if the previous ones succeeded. If
+there were transactions that were intended to be constrained by the
+incrementing sequence numbers only to be applied if `T` succeeded, they could
+do so because `T` had consumed its sequence number, even though it was never
+applied. The dependent transactions would then perform operations which the
+smart contract had intended to be performed only if `T` had succeeded. This
+would potentially be arbitrarily bad for the smart contract.
+
+With the behavior proposed in this CAP, step #3 -- a node building a
+transaction set containing `T1` -- would only occur if `T1` were not expired
+with respect to the `closeTime` in the same proposed `StellarValue` as that
+transaction set. Otherwise, `T1` would not make it into a transaction set, and
+would therefore never consume a sequence number (or be charged a fee), and `T2`
+would fail validation with a bad sequence number, as the smart contract
+intended in that case. If `T1` did make it into a transaction set, then, under
+the new behavior in this CAP, step #5 would change -- if the transaction set
+containing `T1` won nomination, then the `closeTime` of the new ledger would be
+the `closeTime` from the same `StellarValue` as `T1`, with respect to which
+`T1` is in this case not expired. Hence, `T1` would be applied during ledger
+close; it would not return `txTOO_LATE`. `T2` would therefore have the
+opportunity to be applied (assuming it was valid in the other respects in
+addition to its sequence number), and in this case that would be as expected,
+as `T1` had previously succeeded.
Note that the duration that this race window in the current protocol remains
-open can be lengthened (up to one minute) by the increase of any _one_ candidate
-`closeTime`, since the current protocol combines candidate `closeTime`s by
-choosing the maximum. That means in particular that a single bad _validator_
-can open this race window significantly, with the consequences including both
-the failure of more transactions with `txTOO_LATE` and the potential for
-inconsistent smart contract behavior. This reflects that the current
-protocol's choice of the maximum candidate `closeTime` is, in a sense, too
-sensitive: it can externalize a value that could have been manipulated by a
-single bad actor. We refer to such manipulation as "maximum `closeTime`
+open can be lengthened (up to one minute) by the increase of any _one_
+candidate `closeTime`, since the current protocol combines candidate
+`closeTime`s by choosing the maximum. That means in particular that a single
+bad _validator_ can open this race window significantly, with the consequences
+including both the failure of more transactions with `txTOO_LATE` and the
+potential for inconsistent smart contract behavior. This reflects that the
+current protocol's choice of the maximum candidate `closeTime` is, in a sense,
+too sensitive: it can externalize a value that could have been manipulated by a
+single bad actor. We refer to such manipulation as "maximum `closeTime`
injection". The proposal in this CAP is less sensitive: a node can affect the
composite `closeTime` only if it manages to provide a transaction set that the
-network as a whole selects as a composited one for the ballot protocol. We
+network as a whole selects as a composited one for the ballot protocol. We
force a node to do a "good deed" (putting together a transaction set which the
compositing function selects over any other node's candidate) for the network's
clients in order to influence the eventual externalized `closeTime`. In the
@@ -340,131 +345,132 @@ the validity of transactions in transaction sets proposed by other nodes.
### Detailed argument in favor of this proposal
-Here we argue that this CAP would represent a clear improvement in behavior
-by arguing individually for each of the semantic changes in the table above, in
-the order presented (we shall use the "index" column for reference), so that the
-desirability of the first change depends only on the state of the current
+Here we argue that this CAP would represent a clear improvement in behavior by
+arguing individually for each of the semantic changes in the table above, in
+the order presented (we shall use the "index" column for reference), so that
+the desirability of the first change depends only on the state of the current
protocol and code, and the desirability of each further change may depend upon
that of earlier changes as well, in effect assuming that they have been made
because they are desirable (thus constructing an inductive argument for the
desirability of the whole CAP).
-1. The first listed change was one of the optimizations made in PR 2608,
-without requiring a protocol change (or, therefore, CAP), so we do not need to
-argue for it in this CAP, but we explain it for context: by estimating when
-the next ledger will close and avoiding flooding any transactions which will
-have expired by that time, we can save the network and memory resources that we
-might have spent on behalf of transactions which were unlikely ever to be
-applied anyway. This CAP could be seen in part as propagating a similar style
-of change to some later stages in the protocol; those changes _do_ require a
-protocol upgrade.
-
-2. The second listed decision point was changed in PR 2608, and we propose to
-refine it further in this CAP. The PR 2608 change was similar to the one made
-in the first decision point 1, at a different place: when we trim transactions
-before choosing which `StellarValue` to nominate first when triggering a new
-ledger. The change had the same idea as the aforementioned one, to minimize the
-use of resources on behalf of likely-useless transactions. In this CAP, we
-propose changing this decision point further to do the trimming based on the
-exact `closeTime` that we are nominating. Since the CAP behavior would allow us
-to know that that would be the exact externalized `closeTime` if the transaction
-set that we nominate were to be externalized, we know that that `closeTime` is
-optimal to use for trimming against. Knowing the optimal value, we no longer
-need an estimate. (The first decision point does not change because at that
-point we do not yet know the rest of the `StellarValue` that we will end up
-nominating -- in particular we do not yet know the `closeTime` -- so an estimate
-is the best that we can do.) And because that value is optimal -- all the
-transactions that it leads to being trimmed would have been guaranteed to have
-returned `txTOO_EARLY`/`txTOO_LATE` if the transaction set had been
-externalized, and all the transactions that it leads to _not_ being trimmed are
-guaranteed _not_ to return `txTOO_EARLY`/`txTOO_LATE` if the transaction set is
-ultimately externalized -- it is clearly desirable to use it.
-
-3. In the presence of change #2, which trims any premature/expired transactions
-before nominating a `StellarValue`, it is only an ill-behaved node that would
-nominate a `StellarValue` containing a transaction which is premature/expired
-with respect to its own `closeTime`, so it becomes sensible for such a value to
-fail validation. There is no loss in this, since we would not want to accept a
-`StellarValue` from an ill-behaved node in any case; this change (given change
-\#2) simply represents a strict improvement in our ability to detect a particular
-incorrect behavior.
-
-4. In the presence of change #3, any candidate transaction set passed in to
-the compositing function consists only of transactions which are valid
-with respect to the `closeTime` in the same `StellarValue` as the transaction
-set. Therefore, once the compositing function has chosen one of the candidate
-transaction sets, the choice of a composited `closeTime` equal to the
-`closeTime` in the same `StellarValue` as the chosen transaction set becomes
-optimal in the following sense: it is precisely the latest `closeTime` with
-respect to which all of the transactions in the chosen transaction set are
-valid (unexpired). By comparison, the old protocol's choice of the maximal
-`closeTime` of all candidates' is simply a loss:
-
- - It might render some of the transactions in the composited set expired,
- which is the `txTOO_LATE` race that motivated this CAP.
-
- - It might render some transactions still in the memory pools of some nodes
- expired, when they might have had a chance of getting into a future ledger
- if the `closeTime` had not been needlessly increased.
-
- - It makes the ledger close time more sensitive to the choice of a large
- `closeTime` by a single bad actor, which we have called the "maximum
- `closeTime` injection" problem, and discussed above in the "Detailed
- illustration of failure mode".
-
-5. Given the ability to preserve the signature (since it is on a nominated
-`` pair, of which we now choose a specific one rather than
-combining multiple ones in producing the `closeTime`) in the output of the
-compositing function, there is little cost to doing so (we have the signature
-in memory and have already checked it), so we may as well lose as little
-information as possible. And given that we have preserved the signature on
-`StellarValue`s throughout the nomination protocol and into the ballot protocol,
-we must expect `SIGNED` `StellarValue`s in the ballot protocol, and we must
-check the signatures again in the ballot protocol, to make sure that the
-additional information that we are preserving is _correct_ information. The
-additional signature-checking, however, will be on a signature that we just
-checked during the preceding nomination round, so we will likely simply hit in
-cache nearly all the time, and not have to do significantly more signature
-checks.
-
-Therefore, we argue that each successive change represents a clear
-improvement, given the previous one (and the first represents a clear
-improvement over the existing implementation).
+1. The first listed change was one of the optimizations made in PR 2608,
+ without requiring a protocol change (or, therefore, CAP), so we do not need
+ to argue for it in this CAP, but we explain it for context: by estimating
+ when the next ledger will close and avoiding flooding any transactions
+ which will have expired by that time, we can save the network and memory
+ resources that we might have spent on behalf of transactions which were
+ unlikely ever to be applied anyway. This CAP could be seen in part as
+ propagating a similar style of change to some later stages in the protocol;
+ those changes _do_ require a protocol upgrade.
+
+2. The second listed decision point was changed in PR 2608, and we propose to
+ refine it further in this CAP. The PR 2608 change was similar to the one
+ made in the first decision point 1, at a different place: when we trim
+ transactions before choosing which `StellarValue` to nominate first when
+ triggering a new ledger. The change had the same idea as the aforementioned
+ one, to minimize the use of resources on behalf of likely-useless
+ transactions. In this CAP, we propose changing this decision point further
+ to do the trimming based on the exact `closeTime` that we are nominating.
+ Since the CAP behavior would allow us to know that that would be the exact
+ externalized `closeTime` if the transaction set that we nominate were to be
+ externalized, we know that that `closeTime` is optimal to use for trimming
+ against. Knowing the optimal value, we no longer need an estimate. (The
+ first decision point does not change because at that point we do not yet
+ know the rest of the `StellarValue` that we will end up nominating -- in
+ particular we do not yet know the `closeTime` -- so an estimate is the best
+ that we can do.) And because that value is optimal -- all the transactions
+ that it leads to being trimmed would have been guaranteed to have returned
+ `txTOO_EARLY`/`txTOO_LATE` if the transaction set had been externalized,
+ and all the transactions that it leads to _not_ being trimmed are
+ guaranteed _not_ to return `txTOO_EARLY`/`txTOO_LATE` if the transaction
+ set is ultimately externalized -- it is clearly desirable to use it.
+
+3. In the presence of change #2, which trims any premature/expired
+ transactions before nominating a `StellarValue`, it is only an ill-behaved
+ node that would nominate a `StellarValue` containing a transaction which is
+ premature/expired with respect to its own `closeTime`, so it becomes
+ sensible for such a value to fail validation. There is no loss in this,
+ since we would not want to accept a `StellarValue` from an ill-behaved node
+ in any case; this change (given change \#2) simply represents a strict
+ improvement in our ability to detect a particular incorrect behavior.
+
+4. In the presence of change #3, any candidate transaction set passed in to
+ the compositing function consists only of transactions which are valid with
+ respect to the `closeTime` in the same `StellarValue` as the transaction
+ set. Therefore, once the compositing function has chosen one of the
+ candidate transaction sets, the choice of a composited `closeTime` equal to
+ the `closeTime` in the same `StellarValue` as the chosen transaction set
+ becomes optimal in the following sense: it is precisely the latest
+ `closeTime` with respect to which all of the transactions in the chosen
+ transaction set are valid (unexpired). By comparison, the old protocol's
+ choice of the maximal `closeTime` of all candidates' is simply a loss:
+
+ - It might render some of the transactions in the composited set expired,
+ which is the `txTOO_LATE` race that motivated this CAP.
+
+ - It might render some transactions still in the memory pools of some nodes
+ expired, when they might have had a chance of getting into a future ledger
+ if the `closeTime` had not been needlessly increased.
+
+ - It makes the ledger close time more sensitive to the choice of a large
+ `closeTime` by a single bad actor, which we have called the "maximum
+ `closeTime` injection" problem, and discussed above in the "Detailed
+ illustration of failure mode".
+
+5. Given the ability to preserve the signature (since it is on a nominated
+ `` pair, of which we now choose a specific one rather than
+ combining multiple ones in producing the `closeTime`) in the output of the
+ compositing function, there is little cost to doing so (we have the
+ signature in memory and have already checked it), so we may as well lose as
+ little information as possible. And given that we have preserved the
+ signature on `StellarValue`s throughout the nomination protocol and into
+ the ballot protocol, we must expect `SIGNED` `StellarValue`s in the ballot
+ protocol, and we must check the signatures again in the ballot protocol, to
+ make sure that the additional information that we are preserving is
+ _correct_ information. The additional signature-checking, however, will be
+ on a signature that we just checked during the preceding nomination round,
+ so we will likely simply hit in cache nearly all the time, and not have to
+ do significantly more signature checks.
+
+Therefore, we argue that each successive change represents a clear improvement,
+given the previous one (and the first represents a clear improvement over the
+existing implementation).
### Rejected alternatives
The proposal in this CAP is the third that we have considered as a means of
-fixing the problems described in the "Motivation" section. We record them here
+fixing the problems described in the "Motivation" section. We record them here
to motivate the proposal that we later settled on for this CAP.
#### Deferring ledger time update until after applying transactions
The first idea was to change the point at which the current ledger header's
`closeTime` was updated from before transactions are applied to afterwards.
-(Ledger `upgrades` are already performed after transaction applies.) This
-would prevent an increase in the `closeTime` brought about by candidates other
-than the one whose transaction set was selected from causing transactions to
-fail (it would not occur until after they had been applied). However, this
-raised the concern that sometimes transactions would be applied despite having
+(Ledger `upgrades` are already performed after transaction applies.) This would
+prevent an increase in the `closeTime` brought about by candidates other than
+the one whose transaction set was selected from causing transactions to fail
+(it would not occur until after they had been applied). However, this raised
+the concern that sometimes transactions would be applied despite having
expiration times earlier than the close time of the ledger in which they were
-committed -- and there would not even be any bound on how large the gap
-between those times might be. This CAP maintains the invariant that if a
-transaction is applied, the close time of the ledger in which it is applied
-falls within the time bounds of the transaction.
+committed -- and there would not even be any bound on how large the gap between
+those times might be. This CAP maintains the invariant that if a transaction is
+applied, the close time of the ledger in which it is applied falls within the
+time bounds of the transaction.
#### Trimming transactions rendered expired by combining nominated candidates
The second idea was to remove transactions from the composited transaction set
-selected by the combine-candidates code in the nomination protocol, based on the
-(maximal) `closeTime` just selected by that code. However, that would seem to
-have opened up a new denial-of-service attack on the ledger:
-an attacker could create large numbers of transactions with very high fees,
-making it likely that they would be accepted into transaction sets, but with
-extremely short expiration times, that would almost certainly come before the
-next ledger close. They could therefore fill up the transaction sets with
-such transactions, yet avoid the transaction fees (the transactions would be
-trimmed by the new code before fees were charged), leaving the transaction
-sets produced by the nomination protocol with little or no room for legitimate
+selected by the combine-candidates code in the nomination protocol, based on
+the (maximal) `closeTime` just selected by that code. However, that would seem
+to have opened up a new denial-of-service attack on the ledger: an attacker
+could create large numbers of transactions with very high fees, making it
+likely that they would be accepted into transaction sets, but with extremely
+short expiration times, that would almost certainly come before the next ledger
+close. They could therefore fill up the transaction sets with such
+transactions, yet avoid the transaction fees (the transactions would be trimmed
+by the new code before fees were charged), leaving the transaction sets
+produced by the nomination protocol with little or no room for legitimate
transactions.
### Reflections on rejected alternatives
@@ -474,15 +480,15 @@ changes to transaction set composition and validity in the earliest protocol
phases possible", note that from the first rejected alternative, to the second
rejected alternative, to the proposal in this CAP, the phase of the protocol at
which we have considered placing the new trimming and validating code has moved
-earlier at each step. The "detailed argument in favor" of this CAP also
+earlier at each step. The "detailed argument in favor" of this CAP also
proceeded change-by-change in order from earlier to later phases of the
consensus protocol.
### Aside: Changing the combining of `upgrades` is feasible, but unnecessary
Considering the `closeTime`-related changes in this CAP also raised a further
-discussion: because the `SIGNED` `StellarValue`, besides the transaction set and
-close time, also contains a set of ledger `upgrades`, we could choose
+discussion: because the `SIGNED` `StellarValue`, besides the transaction set
+and close time, also contains a set of ledger `upgrades`, we could choose
additionally to change `upgrades` to come from the single selected
`StellarValue` rather than being combined in a way similar to the `closeTime`s,
as they are in the current protocol.
@@ -503,14 +509,14 @@ combinations of parameter changes would be insensible, then it might no longer
make sense just to combine each parameter individually -- the simple union of
individual changes could produce a result which was inconsistent. In that case,
the right way to combine them consistently would become ambiguous; there could
-be multiple options of which none would include all candidate parameter changes.
-Choosing the best single nominated set of upgrades might be the simplest way
-of choosing a consistent composite of all candidate upgrades.
+be multiple options of which none would include all candidate parameter
+changes. Choosing the best single nominated set of upgrades might be the
+simplest way of choosing a consistent composite of all candidate upgrades.
Overall, changing the selection of `upgrades` to match that of `closeTime`
could be said to make the resulting protocol simpler -- but it would make the
change for the CAP more complicated, and we haven't found any clear benefit to
-making that further change. (We did start with and do some testing of an
+making that further change. (We did start with and do some testing of an
implementation that also changed `upgrades` selection, but in the latest
implementation we have undone that part and are changing only `closeTime`.)
@@ -519,9 +525,9 @@ implementation we have undone that part and are changing only `closeTime`.)
Semantic changes resulting from this CAP we hope to be limited to the
correction of undesirable aspects of the current protocol, such as the
potential inconsistency of smart contract behavior described in the
-"Motivation", and not on any aspects of the protocol that well-behaved
-clients could be _relying_ on (a well-behaved client should not _rely_ on a
-potentially harmful race happening!).
+"Motivation", and not on any aspects of the protocol that well-behaved clients
+could be _relying_ on (a well-behaved client should not _rely_ on a potentially
+harmful race happening!).
## Security Concerns
@@ -531,44 +537,43 @@ then rejected, as described above in the "Design Rationale".
## Test Cases
-- A test is added to confirm that when a node selects a `StellarValue`
-to nominate, it trims from the selected transaction set precisely those
-transactions which would be invalid according to the `closeTime` in that
-`StellarValue`, and no others (in previous protocol versions, it should
-continue to trim those which are invalid according to the last ledger close
-time).
-
-- A test is added to confirm that nominated `StellarValue`s are checked
-for internal consistency between their close times and transaction sets --
-that is, that they contain no transactions which are invalid according to
-their own close times (a nominated `StellarValue` that does not meet that
-condition should be rejected by other nodes). (In old protocols, transactions
-in a nominated `StellarValue` are checked for validity against the last ledger
-close time.)
-
-- A test is added to confirm that the new code which builds upon PR 2608
-to allow a client to choose the `closeTime` to compare with a transaction's
-`minTime` correctly alters which transactions are considered `txTOO_EARLY` and
-which are not. (PR 2608 introduced the option for clients to choose a
-`closeTime` other than the last ledger close time to compare with a
-transaction's `maxTime` when determining which transactions are considered
-`txTOO_LATE`.)
+- A test is added to confirm that when a node selects a `StellarValue` to
+ nominate, it trims from the selected transaction set precisely those
+ transactions which would be invalid according to the `closeTime` in that
+ `StellarValue`, and no others (in previous protocol versions, it should
+ continue to trim those which are invalid according to the last ledger close
+ time).
+
+- A test is added to confirm that nominated `StellarValue`s are checked for
+ internal consistency between their close times and transaction sets -- that
+ is, that they contain no transactions which are invalid according to their
+ own close times (a nominated `StellarValue` that does not meet that condition
+ should be rejected by other nodes). (In old protocols, transactions in a
+ nominated `StellarValue` are checked for validity against the last ledger
+ close time.)
+
+- A test is added to confirm that the new code which builds upon PR 2608 to
+ allow a client to choose the `closeTime` to compare with a transaction's
+ `minTime` correctly alters which transactions are considered `txTOO_EARLY`
+ and which are not. (PR 2608 introduced the option for clients to choose a
+ `closeTime` other than the last ledger close time to compare with a
+ transaction's `maxTime` when determining which transactions are considered
+ `txTOO_LATE`.)
- The existing test for the compositing/combining function, which confirms that
-a set of candidate `StellarValue`s produces the expected composited `closeTime`,
-changes in three ways:
-
- - It tests that old protocols continue to behave the same way, but that
- in new protocol versions, the composited `closeTime` is simply that of the
- `StellarValue` containing the best transaction set, not the maximum
- `closeTime` of all candidates.
+ a set of candidate `StellarValue`s produces the expected composited
+ `closeTime`, changes in three ways:
+ - It tests that old protocols continue to behave the same way, but that in
+ new protocol versions, the composited `closeTime` is simply that of the
+ `StellarValue` containing the best transaction set, not the maximum
+ `closeTime` of all candidates.
- It is enhanced to test for expected `upgrades` as well (but these should
- behave the same between old and new protocols; it just checks that the
- new protocol doesn't accidentally change this).
+ behave the same between old and new protocols; it just checks that the new
+ protocol doesn't accidentally change this).
- It tests that old nomination protocols produce `BASIC` `StellarValue`s, but
- new nomination protocols produce `SIGNED` ones.
+ new nomination protocols produce `SIGNED` ones.
- The existing tests which check that the `StellarValue` produced by the
nomination protocol has the correct type changes to expect `BASIC`
@@ -577,11 +582,11 @@ changes in three ways:
## Implementation
-An implementation of this protocol has been written, which as far as the
-author knows is complete and correct in its semantics; it will certainly need
+An implementation of this protocol has been written, which as far as the author
+knows is complete and correct in its semantics; it will certainly need
refactoring, but it might constitute a demonstration of the practicality of
-implementing this proposal, and of a bound on the scope of the changes required,
-as well as a way of experimenting with its consequences:
+implementing this proposal, and of a bound on the scope of the changes
+required, as well as a way of experimenting with its consequences:
[Closetime change branch](https://github.com/rokopt/stellar-core/tree/proto-622-closetime)
@@ -590,28 +595,28 @@ The changes therein are limited to stellar-core, and comprise the following:
- The protocol version is bumped (currently anticipated to be to 14).
- The herder's choice of which transactions to include in the transaction set
-that it is going to nominate to depend, in new protocols only, on the close
-time that it is going to nominate, rather than (as in the current unreleased
-code) an estimate of when the next ledger is likely to close or (as in the
-released code) the last ledger close time.
+ that it is going to nominate to depend, in new protocols only, on the close
+ time that it is going to nominate, rather than (as in the current unreleased
+ code) an estimate of when the next ledger is likely to close or (as in the
+ released code) the last ledger close time.
- The herder's validation of `StellarValue`s nominated by other nodes changes
-to expect, in new protocols only, that values used by the ballot protocol
-are `SIGNED`.
+ to expect, in new protocols only, that values used by the ballot protocol are
+ `SIGNED`.
- The herder's validation of `StellarValue`s nominated by other nodes changes
-which `closeTime` to validate the transactions in the `StellarValue` against
-from the last ledger close time to the `closeTime` in the `StellarValue`.
+ which `closeTime` to validate the transactions in the `StellarValue` against
+ from the last ledger close time to the `closeTime` in the `StellarValue`.
- The herder's compositing of candidate `StellarValue`s into a single value to
-nominate, in new protocols only (it preserves its old behavior in old
-protocols), changes in the following ways:
-
+ nominate, in new protocols only (it preserves its old behavior in old
+ protocols), changes in the following ways:
- It chooses a composited `closeTime` directly from the `StellarValue`
- containing the selected transaction set. In particular, it does not need
- any trimming of the selected transaction set. (That set should already be
- consistent with `closeTime`, since it came from one nominated `StellarValue`,
- which the herder has already validated by the time it composites them.)
+ containing the selected transaction set. In particular, it does not need
+ any trimming of the selected transaction set. (That set should already be
+ consistent with `closeTime`, since it came from one nominated
+ `StellarValue`, which the herder has already validated by the time it
+ composites them.)
- It returns `SIGNED` `StellarValue`s.
diff --git a/core/cap-0035.md b/core/cap-0035.md
index 9654ea599..8e13afca4 100644
--- a/core/cap-0035.md
+++ b/core/cap-0035.md
@@ -17,54 +17,61 @@ Protocol version: 17
## Simple Summary
-This proposal provides the Issuer with a means to clawback (and reissue if desired)
-assets in order to support regulatory requirements. This function can be used to:
+This proposal provides the Issuer with a means to clawback (and reissue if
+desired) assets in order to support regulatory requirements. This function can
+be used to:
-1) recover assets that have been fraudulently obtained
-2) respond to regulatory actions, if required
-3) enable identity proofed persons to recover an enabled asset
-in the event of loss of key custody or theft.
+1. recover assets that have been fraudulently obtained
+2. respond to regulatory actions, if required
+3. enable identity proofed persons to recover an enabled asset in the event of
+ loss of key custody or theft.
-The proposal does not involve shared
-custody of the person’s account and does not affect custody of bearer assets in
-the persons account.
+The proposal does not involve shared custody of the person’s account and does
+not affect custody of bearer assets in the persons account.
## Working Group
-This protocol change was initially authored by Dan Doney and Tomer Weller. The working group include other authors and the consulted persons include key individuals familiar with the implementation of the core protocol and maintainers of Horizon and its SDKs.
+This protocol change was initially authored by Dan Doney and Tomer Weller. The
+working group include other authors and the consulted persons include key
+individuals familiar with the implementation of the core protocol and
+maintainers of Horizon and its SDKs.
## Motivation
-In order to meet securities regulatory requirements in many jurisdictions globally,
-the issuer (or designated transfer agent) must be able to demonstrate the ability
-to revoke assets in the event of a mistaken or fraudulent transaction or other
-regulatory action regarding a specific person or asset wide. To receive approval,
-the Issuer must demonstrate the ability to perform this action with or without the
-permission of the affected person. Unlike approaches that involve multiple
-signatures on the person’s wallet (brokerage clawback), this approach does not
-compromise the custody of other assets in the person’s wallet. Additionally, this
-approach is an improvement over an approach that invalidates the trustline. While
-trustline invalidation freezes assets, tokens (asset shares) remain in circulation
-compromising accounting models. The approach has other benefits to owners of the
-affected asset including the ability to recover assets if wallet custody is lost
-including the important business continuity mitigations in the loss of control of
-custodial or omnibus account.
-Ex: https://www.ccn.com/190m-gone-how-canada-biggest-bitcoin-exchange-lost-it/
+In order to meet securities regulatory requirements in many jurisdictions
+globally, the issuer (or designated transfer agent) must be able to demonstrate
+the ability to revoke assets in the event of a mistaken or fraudulent
+transaction or other regulatory action regarding a specific person or asset
+wide. To receive approval, the Issuer must demonstrate the ability to perform
+this action with or without the permission of the affected person. Unlike
+approaches that involve multiple signatures on the person’s wallet (brokerage
+clawback), this approach does not compromise the custody of other assets in the
+person’s wallet. Additionally, this approach is an improvement over an approach
+that invalidates the trustline. While trustline invalidation freezes assets,
+tokens (asset shares) remain in circulation compromising accounting models. The
+approach has other benefits to owners of the affected asset including the
+ability to recover assets if wallet custody is lost including the important
+business continuity mitigations in the loss of control of custodial or omnibus
+account. Ex:
+https://www.ccn.com/190m-gone-how-canada-biggest-bitcoin-exchange-lost-it/
### Goals Alignment
+
This CAP is aligned with the following Stellar Network Goals:
- The Stellar Network should be secure and reliable.
-- The Stellar Network should enable cross-border payments, i.e. payments via
-exchange of assets, throughout the globe, enabling users to make payments between
-assets in a manner that is fast, cheap, and highly usable.
+- The Stellar Network should enable cross-border payments, i.e. payments via
+ exchange of assets, throughout the globe, enabling users to make payments
+ between assets in a manner that is fast, cheap, and highly usable.
## Abstract
+
This proposal introduces new operations `ClawbackOp`,
`ClawbackClaimableBalanceOp`, and `SetTrustLineFlagsOp`, a new account flag
-`AUTH_CLAWBACK_ENABLED_FLAG`, a new trustline flag `TRUSTLINE_CLAWBACK_ENABLED_FLAG`,
-and a new claimable balance flag `CLAIMABLE_BALANCE_CLAWBACK_ENABLED_FLAG`.
+`AUTH_CLAWBACK_ENABLED_FLAG`, a new trustline flag
+`TRUSTLINE_CLAWBACK_ENABLED_FLAG`, and a new claimable balance flag
+`CLAIMABLE_BALANCE_CLAWBACK_ENABLED_FLAG`.
The `AUTH_CLAWBACK_ENABLED_FLAG` flag on the issuing account must be set when a
trustline is created to authorize a `ClawbackOp` operation submitted by the
@@ -72,9 +79,9 @@ issuer account.
A claimable balance inherits its clawback enabled status from the account
creating the claimable balance. A `ClawbackClaimableBalanceOp` operation is
-valid for claimable balances created by accounts whose trustline for the
-asset has clawback enabled, and for claimable balances created by an issuer
-account when the issuer account has clawback enabled.
+valid for claimable balances created by accounts whose trustline for the asset
+has clawback enabled, and for claimable balances created by an issuer account
+when the issuer account has clawback enabled.
The `ClawbackOp` and `ClawbackClaimableBalanceOp` operations result in the
removal of the specified assets issued from the network.
@@ -88,14 +95,15 @@ The `ClawbackOp` may result in revocation of some or all of the specified
assets from the designated account based on the amount provided in the
operation.
-The `SetTrustLineFlagsOp` allows the user to set and clear specific trustline
+The `SetTrustLineFlagsOp` allows the user to set and clear specific trustline
flags. This operation can be used to unset `TRUSTLINE_CLAWBACK_ENABLED_FLAG`.
## Specification
### XDR changes
-This patch of XDR changes is based on the XDR files in tag/commit `v15.1.0` (`90b2780584c6390207bf09291212d606896ce9f8`) of [stellar-core].
+This patch of XDR changes is based on the XDR files in tag/commit `v15.1.0`
+(`90b2780584c6390207bf09291212d606896ce9f8`) of [stellar-core].
```diff mddiffcheck.base=v15.1.0
diff --git a/src/xdr/Stellar-ledger-entries.x b/src/xdr/Stellar-ledger-entries.x
@@ -105,7 +113,7 @@ index 8d7463915..26ff33d43 100644
@@ -28,6 +28,17 @@ enum AssetType
ASSET_TYPE_CREDIT_ALPHANUM12 = 2
};
-
+
+union AssetCode switch (AssetType type)
+{
+case ASSET_TYPE_CREDIT_ALPHANUM4:
@@ -131,11 +139,11 @@ index 8d7463915..26ff33d43 100644
+ // with clawback enabled set to "true"
+ AUTH_CLAWBACK_ENABLED_FLAG = 0x8
};
-
+
// mask for all valid flags
const MASK_ACCOUNT_FLAGS = 0x7;
+const MASK_ACCOUNT_FLAGS_V16 = 0xF;
-
+
// maximum number of signers
const MAX_SIGNERS = 20;
@@ -187,12 +203,16 @@ enum TrustLineFlags
@@ -148,18 +156,18 @@ index 8d7463915..26ff33d43 100644
+ // balances created with its credit may also be clawed back
+ TRUSTLINE_CLAWBACK_ENABLED_FLAG = 4
};
-
+
// mask for all trustline flags
const MASK_TRUSTLINE_FLAGS = 1;
const MASK_TRUSTLINE_FLAGS_V13 = 3;
+const MASK_TRUSTLINE_FLAGS_V16 = 7;
-
+
struct TrustLineEntry
{
@@ -337,6 +357,27 @@ case CLAIMABLE_BALANCE_ID_TYPE_V0:
Hash v0;
};
-
+
+enum ClaimableBalanceFlags
+{
+ // If set, the issuer account of the asset held by the claimable balance may
@@ -207,7 +215,7 @@ index 7f08d7579..3c2b1f0be 100644
+ CLAWBACK_CLAIMABLE_BALANCE = 20,
+ SET_TRUST_LINE_FLAGS = 21
};
-
+
/* CreateAccount
@@ -236,20 +239,9 @@ struct ChangeTrustOp
struct AllowTrustOp
@@ -222,7 +230,7 @@ index 7f08d7579..3c2b1f0be 100644
- case ASSET_TYPE_CREDIT_ALPHANUM12:
- AssetCode12 assetCode12;
+ AssetCode asset;
-
+
- // add other asset types here in the future
- }
- asset;
@@ -231,11 +239,11 @@ index 7f08d7579..3c2b1f0be 100644
+ // 0, or any bitwise combination of the AUTHORIZED_* flags of TrustLineFlags
uint32 authorize;
};
-
+
@@ -376,6 +368,48 @@ case REVOKE_SPONSORSHIP_SIGNER:
signer;
};
-
+
+/* Claws back an amount of an asset from an account
+
+ Threshold: med
@@ -302,12 +310,12 @@ index 7f08d7579..3c2b1f0be 100644
+ SET_OPTIONS_INVALID_HOME_DOMAIN = -9, // malformed home domain
+ SET_OPTIONS_AUTH_REVOCABLE_REQUIRED = -10 // auth revocable is required for clawback
};
-
+
union SetOptionsResult switch (SetOptionsResultCode code)
@@ -1120,6 +1161,71 @@ default:
void;
};
-
+
+/******* Clawback Result ********/
+
+enum ClawbackResultCode
@@ -395,9 +403,9 @@ index 7f08d7579..3c2b1f0be 100644
Clawback operates similar to auth revocation in that it takes an amount of the
asset out of active circulation by preventing the account from using it on the
-network. Auth revocation freezes the full balance of an asset in an account, but
-clawback provides fine grain control and allows the issuer to take out of the
-account and destroy a specific amount of the asset.
+network. Auth revocation freezes the full balance of an asset in an account,
+but clawback provides fine grain control and allows the issuer to take out of
+the account and destroy a specific amount of the asset.
In order to execute a clawback of an amount in an account, an issuer account
must have its `AUTH_CLAWBACK_ENABLED_FLAG` flag set when the account holding
@@ -417,15 +425,14 @@ must have been created by an account that has clawback enabled on its
trustline.
In order to execute a clawback of a claimable balance created by an issuer
-account, the claimable balance must have been created when clawback was
-enabled on the issuer account.
+account, the claimable balance must have been created when clawback was enabled
+on the issuer account.
#### Account
-This proposal introduces a new flag to accounts,
-`AUTH_CLAWBACK_ENABLED_FLAG`. When the flag is set, trustlines created to the
-account inherit the flag and the balances within those trustlines may be
-clawed back by the issuer.
+This proposal introduces a new flag to accounts, `AUTH_CLAWBACK_ENABLED_FLAG`.
+When the flag is set, trustlines created to the account inherit the flag and
+the balances within those trustlines may be clawed back by the issuer.
An account may set or unset the flag using the existing `SetOptionsOp`
operation, unless the `AUTH_IMMUTABLE_FLAG` flag is set, in the same way that
@@ -433,13 +440,14 @@ existing `AUTH_*` flags may be set or unset unless the immutable flag is set.
#### Trustline
-This proposal introduces a new flag to trustlines, `TRUSTLINE_CLAWBACK_ENABLED_FLAG`,
-that is set at the time the trustline is created if the issuer account of the
-asset of the trustline has its `AUTH_CLAWBACK_ENABLED_FLAG` flag set.
+This proposal introduces a new flag to trustlines,
+`TRUSTLINE_CLAWBACK_ENABLED_FLAG`, that is set at the time the trustline is
+created if the issuer account of the asset of the trustline has its
+`AUTH_CLAWBACK_ENABLED_FLAG` flag set.
-If the new flag is set it indicates that the balance held by the trustline
-can be clawed back by the issuer using the `ClawbackOp`, and that any
-claimable balance created by the account will also be clawback enabled.
+If the new flag is set it indicates that the balance held by the trustline can
+be clawed back by the issuer using the `ClawbackOp`, and that any claimable
+balance created by the account will also be clawback enabled.
#### Claimable Balance
@@ -447,14 +455,14 @@ This proposal introduces the first flag to claimable balances,
`CLAIMABLE_BALANCE_CLAWBACK_ENABLED_FLAG`.
The `CLAIMABLE_BALANCE_CLAWBACK_ENABLED_FLAG` flag is set at the time the
-claimable balance is created if the account creating the claimable balance
-has `TRUSTLINE_CLAWBACK_ENABLED_FLAG` set on the trustline of the asset of
-the claimable balance.
+claimable balance is created if the account creating the claimable balance has
+`TRUSTLINE_CLAWBACK_ENABLED_FLAG` set on the trustline of the asset of the
+claimable balance.
If an issuer account creates a claimable balance for an asset it issues, the
-`CLAIMABLE_BALANCE_CLAWBACK_ENABLED_FLAG` flag is set at the time the
-claimable balance is created if the issuer account creating the claimable
-balance has `AUTH_CLAWBACK_ENABLED` set.
+`CLAIMABLE_BALANCE_CLAWBACK_ENABLED_FLAG` flag is set at the time the claimable
+balance is created if the issuer account creating the claimable balance has
+`AUTH_CLAWBACK_ENABLED` set.
If the new flag is set it indicates that the balance held by the claimable
balance can be clawed back by the issuer using the
@@ -463,49 +471,51 @@ balance can be clawed back by the issuer using the
#### Allow Trust Operation
This proposal introduces no changes to the `AllowTrustOp` operation XDR, but
-the operation will not accept the `TRUSTLINE_CLAWBACK_ENABLED_FLAG` as a valid flag
-that it will operate. The definition of what flags the operation supports
+the operation will not accept the `TRUSTLINE_CLAWBACK_ENABLED_FLAG` as a valid
+flag that it will operate. The definition of what flags the operation supports
will be limited to `AUTHORIZED_*` flags. When applying new flags to accounts
the operation will not change the `TRUSTLINE_CLAWBACK_ENABLED_FLAG`.
-If an `AllowTrustOp` operation is submitted with the `TRUSTLINE_CLAWBACK_ENABLED_FLAG`
-set, the operation will fail with the existing result code
-`ALLOW_TRUST_MALFORMED`.
+If an `AllowTrustOp` operation is submitted with the
+`TRUSTLINE_CLAWBACK_ENABLED_FLAG` set, the operation will fail with the
+existing result code `ALLOW_TRUST_MALFORMED`.
#### Set Options Operation
This proposal introduces a new result code to the `SetOptionsOp` operation XDR,
-which will be returned if `AUTH_REVOCABLE_FLAG` flag is not set whenever the
+which will be returned if `AUTH_REVOCABLE_FLAG` flag is not set whenever the
`AUTH_CLAWBACK_ENABLED_FLAG` flag is set.
This introduces a failing result in these cases:
1. Setting only `AUTH_CLAWBACK_ENABLED_FLAG` without `AUTH_REVOCABLE_FLAG`
-already set will result in `SET_OPTIONS_AUTH_REVOCABLE_REQUIRED`.
+ already set will result in `SET_OPTIONS_AUTH_REVOCABLE_REQUIRED`.
2. Clearing `AUTH_REVOCABLE_FLAG` while `AUTH_CLAWBACK_ENABLED_FLAG` is set
-will result in `SET_OPTIONS_AUTH_REVOCABLE_REQUIRED`.
+ will result in `SET_OPTIONS_AUTH_REVOCABLE_REQUIRED`.
#### Clawback Operation
-This proposal introduces the `ClawbackOp` operation. The `ClawbackOp`
-operation reduces the balance of the asset in the account by the specified
-amount of the specific `asset` from the `from` account, returning it to the
-issuer account, burning it.
+This proposal introduces the `ClawbackOp` operation. The `ClawbackOp` operation
+reduces the balance of the asset in the account by the specified amount of the
+specific `asset` from the `from` account, returning it to the issuer account,
+burning it.
Similar to other operations the clawback operation will fail if the account
balance is less than the amount specified when accounting for selling
liabilities. If clawback is required of asset amounts locked up with selling
liabilities then the issuer may use the `AllowTrustOp` operation to revoke
authorization of the trustline, which will cancel any existing ledger entries
-creating selling liabilities, such as offers, and issue the `ClawbackOp` in
-the same transaction. If the issuer wishes to allow the `from` account to
-continue utilizing the asset it can include another `AllowTrustOp` after the
+creating selling liabilities, such as offers, and issue the `ClawbackOp` in the
+same transaction. If the issuer wishes to allow the `from` account to continue
+utilizing the asset it can include another `AllowTrustOp` after the
`ClawbackOp` to authorize the account once again.
The clawback operation requires a medium threshold signature to authorize the
operation.
-`CLAWBACK_MALFORMED` will be returned for `ClawbackOp` during validation under the following conditions:
+`CLAWBACK_MALFORMED` will be returned for `ClawbackOp` during validation under
+the following conditions:
+
- `asset` is native.
- `asset` value is invalid.
- `asset.issuer` != source account
@@ -513,18 +523,20 @@ operation.
- `from` == source account
Possible return values for the `ClawbackOp` during application are:
+
- `CLAWBACK_SUCCESS` if the clawback is successful.
-- `CLAWBACK_NOT_CLAWBACK_ENABLED` if the `TRUSTLINE_CLAWBACK_ENABLED_FLAG` is not set.
+- `CLAWBACK_NOT_CLAWBACK_ENABLED` if the `TRUSTLINE_CLAWBACK_ENABLED_FLAG` is
+ not set.
- `CLAWBACK_NO_TRUST` if the `from` account does not have a trustline for
-`asset`.
+ `asset`.
- `CLAWBACK_UNDERFUNDED` if the `from` account does not have sufficient
-available balance of `asset` after accounting for selling liabilities.
+ available balance of `asset` after accounting for selling liabilities.
#### Clawback Claimable Balance Operation
This proposal introduces the `ClawbackClaimableBalanceOp` operation. The
-`ClawbackClaimableBalanceOp` operation destroys a claimable balance,
-returning the asset to the issuer account, burning it.
+`ClawbackClaimableBalanceOp` operation destroys a claimable balance, returning
+the asset to the issuer account, burning it.
The operation will only succeed if the claimable balance has its
`CLAIMABLE_BALANCE_CLAWBACK_ENABLED_FLAG` set.
@@ -532,8 +544,7 @@ The operation will only succeed if the claimable balance has its
The operation source account must be the issuer account of the asset held in
the claimable balance.
-The operation requires a medium threshold signature to authorize the
-operation.
+The operation requires a medium threshold signature to authorize the operation.
The operation introduces no new or unique behavior to how sponsored reserves
function. When the operation applies, removing the claimable balance ledger
@@ -542,28 +553,33 @@ same way it is freed when any sponsored ledger entry is removed.
`ClawbackClaimableBalanceOp` is always valid.
-Possible return values for the `ClawbackClaimableBalanceOp` during application are:
+Possible return values for the `ClawbackClaimableBalanceOp` during application
+are:
+
- `CLAWBACK_CLAIMABLE_BALANCE_SUCCESS` if the clawback is successful.
-- `CLAWBACK_CLAIMABLE_BALANCE_DOES_NOT_EXIST` if the claimable balance does
-not exist.
-- `CLAWBACK_CLAIMABLE_BALANCE_NOT_ISSUER` if the source account is the not
-the issuer of the `asset`.
+- `CLAWBACK_CLAIMABLE_BALANCE_DOES_NOT_EXIST` if the claimable balance does not
+ exist.
+- `CLAWBACK_CLAIMABLE_BALANCE_NOT_ISSUER` if the source account is the not the
+ issuer of the `asset`.
- `CLAWBACK_CLAIMABLE_BALANCE_NOT_CLAWBACK_ENABLED` if the
-`CLAIMABLE_BALANCE_CLAWBACK_ENABLED_FLAG` is not set.
+ `CLAIMABLE_BALANCE_CLAWBACK_ENABLED_FLAG` is not set.
#### Set TrustLine Flags Operation
-This proposal introduces the `SetTrustLineFlagsOp` operation. The operation works
-like the `AllowTrustOp`, except it uses set and clear parameters to set/clear specific
-trustline flags. This will allow an issuer to clear the `TRUSTLINE_CLAWBACK_ENABLED_FLAG`.
-An important detail to note here is that `SetTrustLineFlagsOp` will mimic the changes to `AllowTrustOp`
-specified in [CAP-0029](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0029.md).
+This proposal introduces the `SetTrustLineFlagsOp` operation. The operation
+works like the `AllowTrustOp`, except it uses set and clear parameters to
+set/clear specific trustline flags. This will allow an issuer to clear the
+`TRUSTLINE_CLAWBACK_ENABLED_FLAG`. An important detail to note here is that
+`SetTrustLineFlagsOp` will mimic the changes to `AllowTrustOp` specified in
+[CAP-0029](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0029.md).
-The operation requires a low threshold signature to authorize the
-operation.
+The operation requires a low threshold signature to authorize the operation.
+
+`SET_TRUST_LINE_FLAGS_MALFORMED` will be returned for `SetTrustLineFlagsOp`
+during validation under the following conditions:
-`SET_TRUST_LINE_FLAGS_MALFORMED` will be returned for `SetTrustLineFlagsOp` during validation under the following conditions:
-- Both `AUTHORIZED_FLAG` and `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` are set on `setFlags`.
+- Both `AUTHORIZED_FLAG` and `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` are set
+ on `setFlags`.
- `asset` is invalid.
- `asset` is `ASSET_TYPE_NATIVE`.
- `setFlags` and `clearFlags` are modifying the same flags.
@@ -573,76 +589,78 @@ operation.
- `asset.issuer` != source account
Possible return values for the `SetTrustLineFlagsOp` during application are:
+
- `SET_TRUST_LINE_FLAGS_SUCCESS` if the operation is successful.
- `SET_TRUST_LINE_FLAGS_NO_TRUST_LINE` if the trustline doesn't exist.
-- `SET_TRUST_LINE_FLAGS_CANT_REVOKE` if `AUTH_REVOCABLE_FLAG` is not set on the issuer,
- but the authorization is downgraded (`AUTHORIZED_FLAG` -> `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG`, `AUTHORIZED_FLAG` -> 0,
- or `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` -> 0).
-- `SET_TRUST_LINE_FLAGS_INVALID_STATE` if the final state of the trustline flag has both
- `AUTHORIZED_FLAG` and `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` set.
+- `SET_TRUST_LINE_FLAGS_CANT_REVOKE` if `AUTH_REVOCABLE_FLAG` is not set on the
+ issuer, but the authorization is downgraded (`AUTHORIZED_FLAG` ->
+ `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG`, `AUTHORIZED_FLAG` -> 0, or
+ `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` -> 0).
+- `SET_TRUST_LINE_FLAGS_INVALID_STATE` if the final state of the trustline flag
+ has both `AUTHORIZED_FLAG` and `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` set.
## Design Rationale
In the event of regulatory action, erroneous transaction, or loss of custody,
-the issuer may conduct a clawback transaction if the appropriate flags are
-set. In the event of loss of custody, the affected party would need to
-demonstrate they are the rightful owner of the account (usually through
-reproofing KYC credentials or otherwise authenticating). On obtaining this
-proof, the issuer could execute a clawback from the lost account followed by a
-subsequent payment to a separate account under control of the affected party.
-Needless to say, executing a reallocation is a significant responsibility and
-in many cases should be reserved for licensed entities (like a transfer agent)
-holding the issuer credentials and aware of responsibilities under the law of
-the jurisdiction of the affected party and asset.
-
+the issuer may conduct a clawback transaction if the appropriate flags are set.
+In the event of loss of custody, the affected party would need to demonstrate
+they are the rightful owner of the account (usually through reproofing KYC
+credentials or otherwise authenticating). On obtaining this proof, the issuer
+could execute a clawback from the lost account followed by a subsequent payment
+to a separate account under control of the affected party. Needless to say,
+executing a reallocation is a significant responsibility and in many cases
+should be reserved for licensed entities (like a transfer agent) holding the
+issuer credentials and aware of responsibilities under the law of the
+jurisdiction of the affected party and asset.
+
### Flags
The account `AUTH_CLAWBACK_ENABLED_FLAG` flag allows the issuer to indicate
-that it has control over the use of the asset on the network. By including
-the flag in account flags, account owners may review the revocability of an
-asset issued by the issuer and have the choice to avoid this type of asset if
-they object to the implied trust in the issuer.
-
-The account `AUTH_REVOCABLE_FLAG` flag must also be set because for clawback
-to succeed in all cases that an account holds an asset, the issuer must be
-able to revoke authorization to release any offers creating selling
-liabilities with the asset. If an issuer enables clawback but not
-`AUTH_REVOCABLE_FLAG` it will likely be oversight.
-
-By setting the `TRUSTLINE_CLAWBACK_ENABLED_FLAG` flag on the trustline, account owners
-have confidence that the clawback feature may not be enabled if it was not
-enabled when they created their trustline.
-
-By setting the `CLAIMABLE_BALANCE_CLAWBACK_ENABLED_FLAG` flag on the claimable balance
-based on the state of the trustline of the account creating the claimable balance,
-account owners have confidence that the clawback feature may not be enabled
-for claimable balances they create if it was not enabled when they created
-their trustline.
+that it has control over the use of the asset on the network. By including the
+flag in account flags, account owners may review the revocability of an asset
+issued by the issuer and have the choice to avoid this type of asset if they
+object to the implied trust in the issuer.
+
+The account `AUTH_REVOCABLE_FLAG` flag must also be set because for clawback to
+succeed in all cases that an account holds an asset, the issuer must be able to
+revoke authorization to release any offers creating selling liabilities with
+the asset. If an issuer enables clawback but not `AUTH_REVOCABLE_FLAG` it will
+likely be oversight.
+
+By setting the `TRUSTLINE_CLAWBACK_ENABLED_FLAG` flag on the trustline, account
+owners have confidence that the clawback feature may not be enabled if it was
+not enabled when they created their trustline.
+
+By setting the `CLAIMABLE_BALANCE_CLAWBACK_ENABLED_FLAG` flag on the claimable
+balance based on the state of the trustline of the account creating the
+claimable balance, account owners have confidence that the clawback feature may
+not be enabled for claimable balances they create if it was not enabled when
+they created their trustline.
### Threshold
The clawback operations require a medium threshold signature because they are
-changing the balance of accounts and changing the states of claimable
-balances and is more aligned with impact of a payment operation than an allow
-trust operation.
+changing the balance of accounts and changing the states of claimable balances
+and is more aligned with impact of a payment operation than an allow trust
+operation.
-`SetTrustLineFlagsOp` requires a low threshold signature because `AllowTrustOp`,
-the other operation that allows one to modify trustline flags, requires a low
-threshold as well.
+`SetTrustLineFlagsOp` requires a low threshold signature because
+`AllowTrustOp`, the other operation that allows one to modify trustline flags,
+requires a low threshold as well.
### Claimable Balances
Claimable balances are immutable and so any functionality to clawback a
-claimable balance is all or nothing. The clawback feature of claimable
-balances is proposed with a flag so that it is explicit when a claimable
-balance can be clawed back. The flag state is inherited from the trustline of
-the account creating the claimable balance to ensure that the balance
-controlled by the account holder does not change clawback enabled status when
-moving between account and claimable balances.
+claimable balance is all or nothing. The clawback feature of claimable balances
+is proposed with a flag so that it is explicit when a claimable balance can be
+clawed back. The flag state is inherited from the trustline of the account
+creating the claimable balance to ensure that the balance controlled by the
+account holder does not change clawback enabled status when moving between
+account and claimable balances.
A separate operation is specified to ensure that clawback is transparent and
-highly visible in comparison to routine claiming of a claimable balance and
-to allow issuers that use claimable balances routinely to distinguish between
+highly visible in comparison to routine claiming of a claimable balance and to
+allow issuers that use claimable balances routinely to distinguish between
claimable balances they can routinely claim and claimable balances they may
clawback.
@@ -655,76 +673,80 @@ of a trustline is not possible in this case. By setting the
on the state of the issuer account flags when the issuer account creates a
claimable balance, the issuer account retains control to specify if claimable
balances it creates are clawed back. This is intuitive, but not overly
-flexible. Issuer accounts that need to create claimable balances with
-clawback disabled, but need to have clawback enabled for new trustlines, can
-create claimable balances via a second account that it allows to have a
-trustline that is clawback disabled.
-
+flexible. Issuer accounts that need to create claimable balances with clawback
+disabled, but need to have clawback enabled for new trustlines, can create
+claimable balances via a second account that it allows to have a trustline that
+is clawback disabled.
### Allow Trust Operation
-The `AllowTrustOp` is disallowed from operating on the new `TRUSTLINE_CLAWBACK_ENABLED_FLAG`
-trustline flag because this operation doesn't have a parameter to set/clear flags.
-It requires the user to be aware of existing flags, which is an operational burden.
+The `AllowTrustOp` is disallowed from operating on the new
+`TRUSTLINE_CLAWBACK_ENABLED_FLAG` trustline flag because this operation doesn't
+have a parameter to set/clear flags. It requires the user to be aware of
+existing flags, which is an operational burden.
-A new operation, `SetTrustLineFlagsOp`, is proposed in this cap to make
-trustline flag modification simpler. Additionally the `AllowTrustOp` operation is
-semantically intended to change authorization and clawback is outside it's scope.
+A new operation, `SetTrustLineFlagsOp`, is proposed in this cap to make
+trustline flag modification simpler. Additionally the `AllowTrustOp` operation
+is semantically intended to change authorization and clawback is outside it's
+scope.
The `AllowTrustOp` is deprecated for `SetTrustLineFlagsOp`.
-
### Set TrustLine Flags Operation
-The `SetTrustLineFlagsOp` will make it easier for issuers to modify trustline flags.
-
-In the context of clawback, an issuer can clear the `TRUSTLINE_CLAWBACK_ENABLED_FLAG` from a trustline
-without knowledge of the current state of the other flags. This is not possible with
-`AllowTrustOp` because it sets the trustline flags to the value passed in by `AllowTrustOp`.
-It is also not possible to add the existing flag to the `AllowTrustOp` operation without
-breaking existing users of that operation since they will not be expecting to include the
-current state of the `TRUSTLINE_CLAWBACK_ENABLED_FLAG` in the flags field.
-
-Clearing the `TRUSTLINE_CLAWBACK_ENABLED_FLAG` allows the issuer to create a clawback enabled asset,
-with a subset of trustlines that are not subject to clawback. The ability to clear flag will also
-allow an issuer to disable clawback on an asset entirely by removing `AUTH_CLAWBACK_ENABLED_FLAG`
-on its own account and clearing `TRUSTLINE_CLAWBACK_ENABLED_FLAG` from all existing trustlines to
-that asset.
-
+The `SetTrustLineFlagsOp` will make it easier for issuers to modify trustline
+flags.
+
+In the context of clawback, an issuer can clear the
+`TRUSTLINE_CLAWBACK_ENABLED_FLAG` from a trustline without knowledge of the
+current state of the other flags. This is not possible with `AllowTrustOp`
+because it sets the trustline flags to the value passed in by `AllowTrustOp`.
+It is also not possible to add the existing flag to the `AllowTrustOp`
+operation without breaking existing users of that operation since they will not
+be expecting to include the current state of the
+`TRUSTLINE_CLAWBACK_ENABLED_FLAG` in the flags field.
+
+Clearing the `TRUSTLINE_CLAWBACK_ENABLED_FLAG` allows the issuer to create a
+clawback enabled asset, with a subset of trustlines that are not subject to
+clawback. The ability to clear flag will also allow an issuer to disable
+clawback on an asset entirely by removing `AUTH_CLAWBACK_ENABLED_FLAG` on its
+own account and clearing `TRUSTLINE_CLAWBACK_ENABLED_FLAG` from all existing
+trustlines to that asset.
### Asset instead of AssetCode
-Both `SetTrustLineFlagsOp` and `ClawbackOp` use an `Asset` as a parameter instead of an `AssetCode`
-to defer the decision of who all future authorizers of the operation can be. The `Asset` includes
-the issuer account, where-as use of `AssetCode` would assume the source account is always the issuer
-account. In this proposal, only the issuer may authorize the operation. This decision makes it possible
-in a future proposal to expand the accounts that may be allowed to issue the operation.
+Both `SetTrustLineFlagsOp` and `ClawbackOp` use an `Asset` as a parameter
+instead of an `AssetCode` to defer the decision of who all future authorizers
+of the operation can be. The `Asset` includes the issuer account, where-as use
+of `AssetCode` would assume the source account is always the issuer account. In
+this proposal, only the issuer may authorize the operation. This decision makes
+it possible in a future proposal to expand the accounts that may be allowed to
+issue the operation.
## Protocol Upgrade Transition
### Backwards Incompatibilities
-The change does not have an affect on previous assets, accounts, or transaction
-structure. It should not cause a breaking change in existing implementations.
+The change does not have an affect on previous assets, accounts, or transaction
+structure. It should not cause a breaking change in existing implementations.
Pre-authorized, pre-signed, or pre-planned transactions that create accounts,
-and create claimable balances, using assets that become clawback enabled
-could fail if those transactions did not include the issuer account as a
-claimant.
+and create claimable balances, using assets that become clawback enabled could
+fail if those transactions did not include the issuer account as a claimant.
The `ClawbackOp` operation introduced is reversible meaning that their use if
reversed has no impact on existing pre-signed or pre-authorized transactions
involving the asset and the account that the clawback operates on.
-The `ClawbackClaimableBalanceOp` operation is not reversible meaning that
-after their use there is no way to recreate the destroyed claimable balance
-with the same identifier. Pre-signed or pre-authorized transactions that
-claim the claimable balance will have encoded its identifier. Authors of
-contracts or systems utilizing pre-signed or pre-authorized transactions can
-identify that assets may be clawed back by inspecting the flags of the
-account that will create the claimable balance and the the flags of the
-issuer to understand if the issuer could enable clawback on new accounts.
-
+The `ClawbackClaimableBalanceOp` operation is not reversible meaning that after
+their use there is no way to recreate the destroyed claimable balance with the
+same identifier. Pre-signed or pre-authorized transactions that claim the
+claimable balance will have encoded its identifier. Authors of contracts or
+systems utilizing pre-signed or pre-authorized transactions can identify that
+assets may be clawed back by inspecting the flags of the account that will
+create the claimable balance and the the flags of the issuer to understand if
+the issuer could enable clawback on new accounts.
+
### Resource Utilization
No substantial changes to resource utilization.
diff --git a/core/cap-0036.md b/core/cap-0036.md
index 1d68d6e1d..db18d5c77 100644
--- a/core/cap-0036.md
+++ b/core/cap-0036.md
@@ -19,12 +19,12 @@ Protocol version: TBD
This proposal provides the Issuer with a means to claim assets stored in
claimable balances in order to support regulatory requirements. This function
-can be used to:
+can be used to:
-1) recover assets that have been fraudulently obtained
-2) respond to regulatory actions, if required
-3) enable identity proofed persons to recover an enabled asset
-in the event of loss of key custody or theft.
+1. recover assets that have been fraudulently obtained
+2. respond to regulatory actions, if required
+3. enable identity proofed persons to recover an enabled asset in the event of
+ loss of key custody or theft.
The proposal does not involve shared custody of the person’s account and does
not affect custody of bearer assets in the persons account.
@@ -32,8 +32,8 @@ not affect custody of bearer assets in the persons account.
## Working Group
This protocol change is being developed in conjunction with CAP-35 which was
-initially by Dan Doney and Tomer Weller. The working group include other authors
-and the consulted persons include key individuals familiar with the
+initially by Dan Doney and Tomer Weller. The working group include other
+authors and the consulted persons include key individuals familiar with the
implementation of the core protocol and maintainers of Horizon and its SDKs.
## Motivation
@@ -46,28 +46,29 @@ This CAP is aligned with the following Stellar Network Goals:
- The Stellar Network should be secure and reliable.
-- The Stellar Network should enable cross-border payments, i.e. payments via
-exchange of assets, throughout the globe, enabling users to make payments between
-assets in a manner that is fast, cheap, and highly usable.
+- The Stellar Network should enable cross-border payments, i.e. payments via
+ exchange of assets, throughout the globe, enabling users to make payments
+ between assets in a manner that is fast, cheap, and highly usable.
## Abstract
This proposal introduces a new `ClawbackClaimableBalanceOp` operation. The
`AUTH_RECOVERABLE` flag on the issuing account must be set to authorize a
-`ClawbackClaimableBalanceOp` operation submitted by the Issuing account. The
+`ClawbackClaimableBalanceOp` operation submitted by the Issuing account. The
`ClawbackClaimableBalanceOp` operation results in the claiming of a specific
-`ClaimableBalanceEntry`. The `ClawbackClaimableBalanceOp` operation only applies
-to assets issued by the source account. Assets that are revocable can be easily
-distinguished from traditional blockchain assets (bearer instruments) so that
-asset owners are aware of rights. The transaction results in revocation of all
-of the specified asset from the claimable balance and the claimable balance is
-destroyed as if it had been claimed by a claimaint.
+`ClaimableBalanceEntry`. The `ClawbackClaimableBalanceOp` operation only
+applies to assets issued by the source account. Assets that are revocable can
+be easily distinguished from traditional blockchain assets (bearer instruments)
+so that asset owners are aware of rights. The transaction results in revocation
+of all of the specified asset from the claimable balance and the claimable
+balance is destroyed as if it had been claimed by a claimaint.
## Specification
### XDR changes
-This patch of XDR changes is based on the XDR files as they are after the XDR changes from CAP-35 are applied.
+This patch of XDR changes is based on the XDR files as they are after the XDR
+changes from CAP-35 are applied.
```diff mddiffcheck.ignore=true
--- a/src/xdr/Stellar-transaction.x
@@ -80,12 +81,12 @@ This patch of XDR changes is based on the XDR files as they are after the XDR ch
+ CLAWBACK = 19,
+ CLAWBACK_CLAIMABLE_BALANCE = 20
};
-
+
/* CreateAccount
@@ -346,6 +347,17 @@ struct ClawbackOp
int64 amount;
};
-
+
+/* Claws back a claimable balance
+
+ Threshold: med
@@ -98,7 +99,7 @@ This patch of XDR changes is based on the XDR files as they are after the XDR ch
+};
+
/* BeginSponsoringFutureReserves
-
+
Establishes the is-sponsoring-future-reserves-for relationship between
@@ -475,6 +487,8 @@ struct Operation
RevokeSponsorshipOp revokeSponsorshipOp;
@@ -112,7 +113,7 @@ This patch of XDR changes is based on the XDR files as they are after the XDR ch
@@ -1193,6 +1207,27 @@ default:
void;
};
-
+
+/******* ClawbackClaimableBalance Result ********/
+
+enum ClawbackClaimableBalanceResultCode
@@ -149,6 +150,7 @@ This patch of XDR changes is based on the XDR files as they are after the XDR ch
```
### Semantics
+
An issuer clawing back a `ClaimableBalanceEntry` operates similar to auth
revocation and the `ClawbackOp` operation introduced in CAP-35. Account auth
recovation freezes the full balance of an asset in an account, clawback from an
@@ -160,42 +162,63 @@ in a claimable balance.
In order to execute a clawback on a claimable balance, an issuer account must
have its `AUTH_REVOCABLE` flag set. Once set, the issuer submits a
`ClawbackClaimableBalanceOp` operation in the same way a claimant can submit a
-`ClaimClaimableBalanceOp`. No predicates on the `ClaimableBalanceEntry` impact the success of the clawback.
+`ClaimClaimableBalanceOp`. No predicates on the `ClaimableBalanceEntry` impact
+the success of the clawback.
This operation does not require the signature of any claimaint accounts, or the
signature of the account that created the claimable balance. The amount of the
asset clawed back is burned and is not sent to any other address since the
-return of an asset to the . The issuer may reissue the asset to the same account
-or to another account if the intent of the clawback is to move the asset to
-another account.
+return of an asset to the . The issuer may reissue the asset to the same
+account or to another account if the intent of the clawback is to move the
+asset to another account.
#### Account
+
This proposal uses the existing `AUTH_REVOCABLE` flag in the issuer account
`AccountFlags`. Existing behavior and meaning of the flag is unchanged.
#### ClawbackClaimableBalance Operation
-The `ClawbackClaimableBalanceOp` operation destroys a claimable balance effectively returning the asset to the issuer, and burning the asset stored within.
+
+The `ClawbackClaimableBalanceOp` operation destroys a claimable balance
+effectively returning the asset to the issuer, and burning the asset stored
+within.
The clawback operation requires a medium threshold signature to authorize the
operation.
Possible return values for the `ClawbackClaimableBalanceOp` are:
+
- `CLAWBACK_CLAIMABLE_BALANCE_SUCCESS` if the clawback is successful.
- `CLAWBACK_CLAIMABLE_BALANCE_DOES_NOT_EXIST` if the claimable balance does not
exist.
- `CLAWBACK_CLAIMABLE_BALANCE_NOT_ISSUER` if the `sourceAccount` is not the
issuer account of the asset stored in the `ClaimableBalanceEntry`.
-- `CLAWBACK_CLAIMABLE_BALANCE_NOT_REVOCABLE` if the `AUTH_REVOCABLE` flag is not set
- on the `sourceAccount`.
+- `CLAWBACK_CLAIMABLE_BALANCE_NOT_REVOCABLE` if the `AUTH_REVOCABLE` flag is
+ not set on the `sourceAccount`.
## Design Rationale
-The rationale for this proposal extends the rational as described in CAP-35. Claimable balances store value on the network and the same events of regulatory action, erroneous transaction, or loss of custody of a claimaint account, can result in an issuer needing to reissue an asset stored within a claimable balance. CAP-35 adds this capability to assets stored in accounts, and this proposal provides the same functionality to claimable balances.
-CAP-35 allows an issuer to clawback specific amounts within an account but this proposal limits clawback to the full amount within the claimable balance. Claimable balances are immutable and there would be massive downstream impacts to changing the amount within a claimable balance.
+The rationale for this proposal extends the rational as described in CAP-35.
+Claimable balances store value on the network and the same events of regulatory
+action, erroneous transaction, or loss of custody of a claimaint account, can
+result in an issuer needing to reissue an asset stored within a claimable
+balance. CAP-35 adds this capability to assets stored in accounts, and this
+proposal provides the same functionality to claimable balances.
+
+CAP-35 allows an issuer to clawback specific amounts within an account but this
+proposal limits clawback to the full amount within the claimable balance.
+Claimable balances are immutable and there would be massive downstream impacts
+to changing the amount within a claimable balance.
+
+Claimable balances cannot be recreated after they are destroyed with clawback
+because their claimable balance ID is generated from the transaction that
+creates them, and to recreate the same claimable balance would duplicate events
+in downstream systems. Issuers can reissue the asset in a new claimable balance
+configured with the same claimaints and predicates to recreate the claimable
+balance, but the claimable balance ID will not be identical.
-Claimable balances cannot be recreated after they are destroyed with clawback because their claimable balance ID is generated from the transaction that creates them, and to recreate the same claimable balance would duplicate events in downstream systems. Issuers can reissue the asset in a new claimable balance configured with the same claimaints and predicates to recreate the claimable balance, but the claimable balance ID will not be identical.
-
### Reusing the AUTH_REVOCABLE flag
+
The account `AUTH_REVOCABLE` flag allows the issuer to indicate that it has
control over the use of the asset on the network. By including the
`AUTH_REVOCABLE` flag in account flags, account owners may review the
@@ -205,6 +228,7 @@ another form of an issuer revoking use of an asset with fine control over the
exact amount that the issuer is taking out of active circulation.
### Threshold
+
The clawback operation requires a medium threshold signature because it is
changing the balance of an account and is more aligned with impact of a payment
operation than an allow trust operation.
@@ -213,8 +237,8 @@ operation than an allow trust operation.
### Backwards Incompatibilities
-The change does not have an affect on previous assets, accounts, or transaction
-structure. It should not cause a breaking change in existing implementations.
+The change does not have an affect on previous assets, accounts, or transaction
+structure. It should not cause a breaking change in existing implementations.
The change does make it possible for claimable balances to be destroyed, and
claimable balances could form an important part of series of transactions that
@@ -234,8 +258,9 @@ of an accidental or temporary clawback would be identical to that of a clawback
of an asset from an account, or temporary auth revocation. Mutability of
claimable balances has not been proposed because when they were introduced they
were intended to be immutable.
-
+
### Resource Utilization
+
No substantial changes to resource utilization.
## Test Cases
diff --git a/core/cap-0037.md b/core/cap-0037.md
index a60a2467e..e909441f6 100644
--- a/core/cap-0037.md
+++ b/core/cap-0037.md
@@ -6,7 +6,7 @@ Title: Automated Market Makers
Working Group:
Owner: OrbitLens
Authors: OrbitLens
- Consulted: Jon Jove , Nicolas Barry , Nikhil Saraf, Phil Meng , Leigh McCulloch , Tomer Weller
+ Consulted: Jon Jove , Nicolas Barry , Nikhil Saraf, Phil Meng , Leigh McCulloch , Tomer Weller
Status: Draft
Created: 2021-03-03
Discussion: https://groups.google.com/g/stellar-dev/c/Ofb2KXwzva0/m/LLcUKWFmBwAJ
@@ -19,8 +19,8 @@ This proposal introduces liquidity pools and automated market makers on the
protocol level. AMMs rely on a mathematical formula to quote asset prices. A
liquidity pool is a ledger entry that contains funds deposited by users
(liquidity providers). In return for providing liquidity to the protocol, users
-earn fees from trades. The described approach of the interleaved order execution
-combines the liquidity of existing orderbooks with liquidity pools.
+earn fees from trades. The described approach of the interleaved order
+execution combines the liquidity of existing orderbooks with liquidity pools.
## Motivation
@@ -36,14 +36,14 @@ increase, as well as the number of operations required to maintain positions on
all orderbooks.
On the other hand, automated market makers provide natural incentives for
-liquidity crowdsourcing, making it much easier for ordinary users to participate
-in the process while gaining interest on their long-term holdings.
+liquidity crowdsourcing, making it much easier for ordinary users to
+participate in the process while gaining interest on their long-term holdings.
Asset issuers don't need to wait until the token attracts a critical mass of
users. They can start making several trading pairs with a newly issued asset by
merely depositing tokens to the pool or engaging community users to provision
-liquidity. This will certainly simplify the process of starting a new project on
-Stellar, as well as provide a powerful marketing flywheel for early-stage
+liquidity. This will certainly simplify the process of starting a new project
+on Stellar, as well as provide a powerful marketing flywheel for early-stage
tokens.
The AMM concept implies that no third-party company holds user funds at any
@@ -58,22 +58,23 @@ scalability compared to the existing DEX.
Proposed interleaved order execution on both the orderbook and liquidity pool
provides a familiar exchange experience in combination with the ability to have
on-chain limit orders. On the other hand, it fully incorporates all benefits of
-shared liquidity pools, at the same time hiding the underlying technical details
-from end-users. Users always get the best possible exchange price based on the
-combined liquidity.
+shared liquidity pools, at the same time hiding the underlying technical
+details from end-users. Users always get the best possible exchange price based
+on the combined liquidity.
## Abstract
-This proposal brings the concept of shared liquidity pools with automated market
-making to the protocol. Users deposit funds to a pool providing liquidity to the
-automated market maker execution engine which can quote asset prices based on an
-algorithm that derives the price directly from the amounts of tokens deposited
-to the pool.
+This proposal brings the concept of shared liquidity pools with automated
+market making to the protocol. Users deposit funds to a pool providing
+liquidity to the automated market maker execution engine which can quote asset
+prices based on an algorithm that derives the price directly from the amounts
+of tokens deposited to the pool.
Pool fees charged on every executed trade are accumulated in the pool,
increasing its liquidity. A user can withdraw the pool stake plus proportional
-accrued interest from the pool. Collected interest incentivizes users to deposit
-their funds to the pool, participating in the collective liquidity allocation.
+accrued interest from the pool. Collected interest incentivizes users to
+deposit their funds to the pool, participating in the collective liquidity
+allocation.
## Specification
@@ -91,7 +92,7 @@ their funds to the pool, participating in the collective liquidity allocation.
@@ -403,6 +403,43 @@ struct ClaimableBalanceEntry
ext;
};
-
+
+/* Contains information about current balances of the liquidity pool*/
+struct LiquidityPoolEntry
+{
@@ -142,7 +143,7 @@ their funds to the pool, participating in the collective liquidity allocation.
+ LiquidityStakeEntry LiquidityStake;
}
data;
-
+
@@ -479,6 +520,21 @@ case CLAIMABLE_BALANCE:
{
ClaimableBalanceID balanceID;
@@ -177,12 +178,12 @@ their funds to the pool, participating in the collective liquidity allocation.
+ DEPOSIT_POOL_LIQUIDITY = 21,
+ WITHDRAW_POOL_LIQUIDITY = 22
};
-
+
/* CreateAccount
@@ -390,6 +392,37 @@ struct ClawbackClaimableBalanceOp
ClaimableBalanceID balanceID;
};
-
+
+/* Deposits funds to the liquidity pool
+
+ Threshold: med
@@ -220,7 +221,7 @@ their funds to the pool, participating in the collective liquidity allocation.
@@ -1186,6 +1219,67 @@ default:
void;
};
-
+
+/******* DepositPoolLiquidity Result ********/
+
+enum DepositPoolLiquidityResultCode
@@ -286,15 +287,16 @@ their funds to the pool, participating in the collective liquidity allocation.
## Semantics
-Modified semantics of trading-related operations presented in this CAP allows to
-drastically reduce the number of new interaction flows. Liquidity from the pools
-will be immediately available for existing Stellar applications through the
-convenient offers and path payment interface operations.
+Modified semantics of trading-related operations presented in this CAP allows
+to drastically reduce the number of new interaction flows. Liquidity from the
+pools will be immediately available for existing Stellar applications through
+the convenient offers and path payment interface operations.
In this proposal, a constant product invariant is used for all calculations

-Other invariants can be implemented as separate pools with different price
-quotation formulas and execution conditions.
+Other
+invariants can be implemented as separate pools with different price quotation
+formulas and execution conditions.
#### DepositPoolLiquidityOp
@@ -303,13 +305,13 @@ liquidity pool defined as `LiquidityPoolEntry`.
- Before processing a deposit, basic validation is required to ensure that a
given combination of assets is allowed. For example, the situation when
- `assetA`=`assetB` should result in `DEPOSIT_POOL_LIQUIDITY_NOT_ALLOWED` error.
- This version of the proposal doesn't imply any other restrictions, but this
- may change in the future.
-- The node performs a lookup of a `LiquidityStakeEntry` by hash derived from the
- operation source account address, `poolType`, `assetA`, and `assetB`.
-- If the proposed pool fee (`fee` parameter) is less than 0.0001 or greater than
- 0.01, `DEPOSIT_POOL_LIQUIDITY_NOT_ALLOWED` error returned.
+ `assetA`=`assetB` should result in `DEPOSIT_POOL_LIQUIDITY_NOT_ALLOWED`
+ error. This version of the proposal doesn't imply any other restrictions, but
+ this may change in the future.
+- The node performs a lookup of a `LiquidityStakeEntry` by hash derived from
+ the operation source account address, `poolType`, `assetA`, and `assetB`.
+- If the proposed pool fee (`fee` parameter) is less than 0.0001 or greater
+ than 0.01, `DEPOSIT_POOL_LIQUIDITY_NOT_ALLOWED` error returned.
- The node loads source account balances for `assetA`, `assetB`. If any of the
balances do not exist, `DEPOSIT_POOL_LIQUIDITY_INSUFFICIENT_AMOUNT` error
returned.
@@ -318,60 +320,59 @@ liquidity pool defined as `LiquidityPoolEntry`.
Current pool price:

where:
- - ***A*** and ***B*** - amounts of asset A and asset B currently in the pool
- - ***a*** and ***b*** - `maxAmountA` and `maxAmountB` defined in the
- operation
-- Maximum allowed price deviation is controlled by the `priceAccuracy` (***d***)
- parameter.
+ - **_A_** and **_B_** - amounts of asset A and asset B currently in the pool
+ - **_a_** and **_b_** - `maxAmountA` and `maxAmountB` defined in the
+ operation
+- Maximum allowed price deviation is controlled by the `priceAccuracy`
+ (**_d_**) parameter.

In case of a significant deviation, `DEPOSIT_POOL_LIQUIDITY_PRICE_MISMATCH`
- error returned. This check is ignored if the pool doesn't exist
- or `priceAccuracy` equals to zero.
+ error returned. This check is ignored if the pool doesn't exist or
+ `priceAccuracy` equals to zero.
- Effective token amounts that can be deposited are adjusted to follow the
current price.
- If
- 
+ If 
, token A deposit amount is adjusted as
- )
+ ![`a=ceil(bd*P)`]()
otherwise
- )
+ ![`b=ceil(ad/P)`]()
where:
- - )
- - )
- - ***a*** and ***b*** – maximum effective amounts of tokens A and B that can
- be deposited to the pool. If the actual deposited amount of any token
- equals zero, `DEPOSIT_POOL_LIQUIDITY_INSUFFICIENT_AMOUNT` error returned.
+ - ![`ad=min(maxAmountA,accountBalanceA)`]()
+ - ![`bd=min(maxAmountB,accountBalanceB)`]()
+ - **_a_** and **_b_** – maximum effective amounts of tokens A and B that can
+ be deposited to the pool. If the actual deposited amount of any token
+ equals zero, `DEPOSIT_POOL_LIQUIDITY_INSUFFICIENT_AMOUNT` error returned.
- Stake weight calculated as
- )
+ ![`s=floor(S*√((a*b)/(A*B)))`]()
where
- - ***s*** - account stake (share of the pool obtained after the deposit)
- - ***a*** and ***b*** - effective amount of tokens to deposit
- - ***S*** - total stakes currently in the pool (from `LiquidityPoolEntry`)
- - ***A*** and ***B*** - correspondingly amount of token A and B currently
- deposited to the pool
+ - **_s_** - account stake (share of the pool obtained after the deposit)
+ - **_a_** and **_b_** - effective amount of tokens to deposit
+ - **_S_** - total stakes currently in the pool (from `LiquidityPoolEntry`)
+ - **_A_** and **_B_** - correspondingly amount of token A and B currently
+ deposited to the pool
- If `LiquidityPoolEntry` does not exist on-chain (this is the first deposit),
it is created automatically. The stake weight for the deposit, in this case,
the stake computed as
- )
-- If ***s***=0 (this can be the case with a very small stake or as a result of
- rounding approximation), the node
- returns `DEPOSIT_POOL_LIQUIDITY_INSUFFICIENT_AMOUNT` error.
-- If one of the assets is XLM and its balance does not satisfy the basic reserve
- requirement, `DEPOSIT_POOL_LIQUIDITY_LOW_RESERVE` error returned.
+ ![`s=floor(√(a*b))`]()
+- If **_s_**=0 (this can be the case with a very small stake or as a result of
+ rounding approximation), the node returns
+ `DEPOSIT_POOL_LIQUIDITY_INSUFFICIENT_AMOUNT` error.
+- If one of the assets is XLM and its balance does not satisfy the basic
+ reserve requirement, `DEPOSIT_POOL_LIQUIDITY_LOW_RESERVE` error returned.
- If the account `LiquidityStakeEntry` exists, the stake is increased
- `stake`+=***s*** and the proposed pool fee updated with the value from the
+ `stake`+=**_s_** and the proposed pool fee updated with the value from the
deposit operation. Otherwise, new `LiquidityStakeEntry` created,
`numSubEntries` for the source account incremented, and base reserve locked.
- Effective pool fee recalculated using a weighted average formula:

where
- - ***Fₙ*** - new pool fee
- - ***S*** - total stakes currently in the pool (from `LiquidityPoolEntry`)
- - ***F*** - current pool fee
- - ***s*** - account stake
- - ***f*** - pool fee proposed by the account
-- The node modifies `LiquidityPoolEntry` setting `amountA`+=***a***,
- `amountB`+=***b***, `stakes`+=***s***, fee=***Fₙ***.
+ - **_Fₙ_** - new pool fee
+ - **_S_** - total stakes currently in the pool (from `LiquidityPoolEntry`)
+ - **_F_** - current pool fee
+ - **_s_** - account stake
+ - **_f_** - pool fee proposed by the account
+- The node modifies `LiquidityPoolEntry` setting `amountA`+=**_a_**,
+ `amountB`+=**_b_**, `stakes`+=**_s_**, fee=**_Fₙ_**.
- Deposited funds transferred into the pool.
- `DEPOSIT_POOL_LIQUIDITY_SUCCESS` code returned.
@@ -380,41 +381,42 @@ liquidity pool defined as `LiquidityPoolEntry`.
`WithdrawPoolLiquidityOp` operation withdraws funds from a liquidity pool
proportionally to the account stake size.
-- The node performs a lookup of a `LiquidityStakeEntry` by hash derived from the
- operation source account address, `poolType`, `assetA`, and `assetB`.
+- The node performs a lookup of a `LiquidityStakeEntry` by hash derived from
+ the operation source account address, `poolType`, `assetA`, and `assetB`.
- If corresponding `LiquidityStakeEntry` was not found,
`WITHDRAW_POOL_LIQUIDITY_NOT_FOUND` error is returned.
- Requested stake withdrawal size larger then `LiquidityStakeEntry` `stake`
- value yields `WITHDRAW_POOL_LIQUIDITY_INVALID_STAKE` error. If the
- requested `stake` equals zero, the entire account liquidity is withdrawn.
+ value yields `WITHDRAW_POOL_LIQUIDITY_INVALID_STAKE` error. If the requested
+ `stake` equals zero, the entire account liquidity is withdrawn.
- The node loads current state of the matching liquidity pool.
- An account has a right to withdraw liquidity stake equal to

The amount of tokens to withdraw for each asset is computed as
- )
- )
+ ![`a=floor(A*l)`]()
+ ![`b=floor(B*l)`]()
where
- - ***s*** - share to withdraw
- - ***S*** - total number of pool shares from `LiquidityPoolEntry`
- - ***A*** and ***B*** - current token amount of the asset A and asset B in
- the pool respectively
+ - **_s_** - share to withdraw
+ - **_S_** - total number of pool shares from `LiquidityPoolEntry`
+ - **_A_** and **_B_** - current token amount of the asset A and asset B in
+ the pool respectively
- Trustlines info loaded for `assetA` and `assetB`. If the source account does
- not have a trustline for one of the assets or the trustline is not authorized,
- `WITHDRAW_POOL_LIQUIDITY_NO_TRUSTLINE` error returned. If a trustline limit
- prevents the transfer, `WITHDRAW_POOL_LIQUIDITY_LINE_FULL` error returned.
-- `LiquidityStakeEntry` updated: `stake`-=***s***. If the remaining stake equals
- zero, `LiquidityStakeEntry` is removed, `numSubEntries` for the source account
- is decremented, and base reserve unlocked.
+ not have a trustline for one of the assets or the trustline is not
+ authorized, `WITHDRAW_POOL_LIQUIDITY_NO_TRUSTLINE` error returned. If a
+ trustline limit prevents the transfer, `WITHDRAW_POOL_LIQUIDITY_LINE_FULL`
+ error returned.
+- `LiquidityStakeEntry` updated: `stake`-=**_s_**. If the remaining stake
+ equals zero, `LiquidityStakeEntry` is removed, `numSubEntries` for the source
+ account is decremented, and base reserve unlocked.
- Effective pool fee recalculated using a weighted average formula:

where
- - ***Fₙ*** - new pool fee
- - ***S*** - total stakes currently in the pool (from `LiquidityPoolEntry`)
- - ***F*** - current pool fee
- - ***s*** - stake to withdraw
- - ***f*** - pool fee proposed by the account (from `LiquidityStakeEntry`)
-- `LiquidityPoolEntry` updated: `amountA`-=***a***, `amountB`-=***b***,
- `stakes`-=***s***, fee=***Fₙ***. If the remaining pool stake equals zero,
+ - **_Fₙ_** - new pool fee
+ - **_S_** - total stakes currently in the pool (from `LiquidityPoolEntry`)
+ - **_F_** - current pool fee
+ - **_s_** - stake to withdraw
+ - **_f_** - pool fee proposed by the account (from `LiquidityStakeEntry`)
+- `LiquidityPoolEntry` updated: `amountA`-=**_a_**, `amountB`-=**_b_**,
+ `stakes`-=**_s_**, fee=**_Fₙ_**. If the remaining pool stake equals zero,
`LiquidityPoolEntry` is removed.
- Funds transferred to the source account balances.
- `WITHDRAW_POOL_LIQUIDITY_SUCCESS` code returned.
@@ -431,52 +433,52 @@ liquidity pools for the traded asset pair, fetches available cross orders
orders.
On every step, it checks whether the next maker order crosses the price of the
-taker order. Before the maker order execution the engine estimates the number of
-tokens that can be traded on each liquidity pool for the same trading pair up to
-the price of the current maker order.
+taker order. Before the maker order execution the engine estimates the number
+of tokens that can be traded on each liquidity pool for the same trading pair
+up to the price of the current maker order.
Amounts to be traded against the pool can be calculated using the following set
of formulas:
- Price-bound swap (estimate based on the target price)
- If *new price* > *current price*:
- %5Ccdot(1%2Bf)))
- )
- If *new price* < *current price*:
- %5Ccdot(1%2Bf)))
- %5Ccdot%20P-A))
+ If _new price_ > _current price_:
+ ![`a=ceil((√(A*B*P)-A)*(1+f))`]()
+ ![`b=ceil((A+a)/P-B)`]()
+ If _new price_ < _current price_:
+ ![`b=ceil((√(A*B/P)-B)*(1+f))`]()
+ ![`a=ceil((B+b)*P-A)`]()
- Amount-bound buy swap (estimate based on the target token amount)
For a given token A target amount:
- %7D%7BA-a%7D))
+ ![`b=ceil(a*B*(1+f)/(A-a))`]()
For a given token B target amount:
- %7D%7BB-b%7D))
+ ![`a=ceil(b*A*(1+f)/(B-b))`]()
- Amount-bound sell swap (estimate based on the source token amount)
For a given token A source amount:
- %7D%7BA%2Ba%7D))
+ ![`b=floor(-a*B*(1-f)/(A+a))`]()
For a given token B source amount:
- %7D%7BB%2Bb%7D))
+ ![`a=floor(-b*A*(1-f)/(B+b))`]()
where
- - ***A*** and ***B*** - current amounts of asset A and asset B in the pool
- - ***a*** and ***b*** – effective amounts of asset A and asset B to swap
- - ***f*** - trading pool fee
- - ***P*** - maximum price (equals currently processed maker order price)
+ - **_A_** and **_B_** - current amounts of asset A and asset B in the pool
+ - **_a_** and **_b_** – effective amounts of asset A and asset B to swap
+ - **_f_** - trading pool fee
+ - **_P_** - maximum price (equals currently processed maker order price)
-If ***a***>0 and ***b***>0, the corresponding amount of purchased tokens is
+If **_a_**>0 and **_b_**>0, the corresponding amount of purchased tokens is
deducted from the pool and added to the variable accumulating the total amount
traded on the pool. Another variable accumulates matching sold asset amount.
After that, the current maker order itself is matched to the remaining taker
-order amount, and so on, up to the point when a taker order is executed in full.
-If a manage offer operation is being processed and the outstanding amount can't
-be executed on the orderbook nor the pool, a new maker order with the remaining
-amount is created on the orderbook.
+order amount, and so on, up to the point when a taker order is executed in
+full. If a manage offer operation is being processed and the outstanding amount
+can't be executed on the orderbook nor the pool, a new maker order with the
+remaining amount is created on the orderbook.
Pool settlement occurs – traded tokens are deducted from the pool and added to
-the account balance, and matching amount of asset B transferred from the account
-balance to the pool.
+the account balance, and matching amount of asset B transferred from the
+account balance to the pool.
-The same behavior applies to path payment operations. This CAP doesn't imply any
-changes of current order matching or execution process for DEX offers.
+The same behavior applies to path payment operations. This CAP doesn't imply
+any changes of current order matching or execution process for DEX offers.
A trade against a pool generates a `ClaimOfferAtom` result with `offerID` equal
to `-poolType` and empty `sellerID`.
@@ -501,12 +503,12 @@ several shortcomings:
reality, it results in significantly larger codebase changes (more operations
and more use-cases to handle), a lot of work on the Horizon side, and much
more effort from the ecosystem developers.
-- The trading process becomes confusing for regular users. What's the difference
- between an order and a swap? How to get the best rate?
+- The trading process becomes confusing for regular users. What's the
+ difference between an order and a swap? How to get the best rate?
Of course, sooner or later wallets and exchange interfaces should come to the
rescue, providing hints in the interface and maybe even aggregating
- information across the liquidity pool and orderbook for a given assets pair to
- ensure the best possible exchange price. That's feasible, but not very
+ information across the liquidity pool and orderbook for a given assets pair
+ to ensure the best possible exchange price. That's feasible, but not very
user-friendly and may lead to confusion.
- Fragmented liquidity means that for any trade or path payment larger than
several dollars a wallet needs to perform several trades (against the
@@ -533,12 +535,12 @@ Advantages of the proposed approach:
- Users always receive the best possible price as the trade is executed against
the entire liquidity available for the certain trading pair.
- The orderbook and liquidity pool always remain in the balanced state which
- means there are no arbitrage opportunities between the pool and orderbook. The
- trading engine automatically conducts arbitrage rebalancing on each trade
+ means there are no arbitrage opportunities between the pool and orderbook.
+ The trading engine automatically conducts arbitrage rebalancing on each trade
under the hood, eliminating the need for external arbitrage actors.
- There are no reasonable use-cases that require trading exclusively on the
- pool. Price manipulations is probably the only applicable example of pool-only
- swaps.
+ pool. Price manipulations is probably the only applicable example of
+ pool-only swaps.
- Smaller attack surface since there is no way to trade on pool directly. This
also automatically prevents attacks based on the imbalanced oracle state and
significantly increases the cost of intentional price manipulations as the
@@ -558,8 +560,8 @@ Advantages of the proposed approach:
`ClaimOfferAtom` reused for the pool trades has an empty seller account
reference since the trade is executed against the pool. In order to distinguish
between specific pools which participated in the trade, the `offerID` field
-contains a negated `poolType` value (to avoid potential conflict with real offer
-IDs).
+contains a negated `poolType` value (to avoid potential conflict with real
+offer IDs).
This allows us to use the same data contract for all trades regardless of the
pool/orderbook type. However, introducing a separate contract specifically for
@@ -576,9 +578,9 @@ orderbook while illiquid or very volatile trading pairs require higher fees to
counter possible impermanent loss or low yield scenarios.
To provide the required pool fee flexibility without inventing complex voting
-mechanics, the voting prerogative delegated to the liquidity providers which can
-set the `fee` parameter in `DepositPoolLiquidityOp` operation, so the users can
-vote for the pool fee with their liquidity stakes. The effective pool yield
+mechanics, the voting prerogative delegated to the liquidity providers which
+can set the `fee` parameter in `DepositPoolLiquidityOp` operation, so the users
+can vote for the pool fee with their liquidity stakes. The effective pool yield
represents a weighted average of proposed liquidity stake fees. This way
accounts with larger stakes have more voting power which is fair given that
every liquidity provider risks proportional to the deposited stake.
@@ -603,17 +605,18 @@ the alphabetical order upon insertion. The comparator function takes into
account the asset type, asset code, and asset issuer address respectively.
Ledger key for the `LiquidityStakeEntry` is a hash derived from the account
-address+`poolType`+`assetA`+`assetB` fields combination to optimize the storage.
+address+`poolType`+`assetA`+`assetB` fields combination to optimize the
+storage.
#### No Assumptions Regarding Future Pool Types
This CAP makes no assumptions about possible pool functionality extensions or
-the introduction of other pool types in the future. With high probability, those
-potential new AMM architectures will require different implementation
-approaches, separate deposit/withdrawal operations, and additional parameters in
-ledger entries. It's impossible to predict requirements for yet-to-be-invented
-concepts, so designing infrastructure with regard to unknown forthcoming changes
-looks impractical.
+the introduction of other pool types in the future. With high probability,
+those potential new AMM architectures will require different implementation
+approaches, separate deposit/withdrawal operations, and additional parameters
+in ledger entries. It's impossible to predict requirements for
+yet-to-be-invented concepts, so designing infrastructure with regard to unknown
+forthcoming changes looks impractical.
As most of the existing approaches that can be ported to Stellar do not provide
significant benefits compared to the simple constant product invariant (or
@@ -642,8 +645,8 @@ may be significantly slower than 64-bit arithmetics.
Every `LiquidityPoolEntry` requires storage space on the ledger but unlike
`LiquidityStakeEntry` it is not backed by the account base reserve. This
-behavior can't be directly used to perform a resource execution attack as
-every `LiquidityPoolEntry` requires at least one `LiquidityStakeEntry`.
+behavior can't be directly used to perform a resource execution attack as every
+`LiquidityPoolEntry` requires at least one `LiquidityStakeEntry`.
## Security Concerns
diff --git a/core/cap-0038.md b/core/cap-0038.md
index ea68042bc..e979bc98f 100644
--- a/core/cap-0038.md
+++ b/core/cap-0038.md
@@ -14,16 +14,19 @@ Protocol version: 18
```
## Simple Summary
+
Automated market makers provide a simple way to provide liquidity and exchange
assets.
## Working Group
+
This proposal was initially authored by Jonathan Jove based on the results of
numerous discussions. The working group includes the author of a similar
proposal (OrbitLens), people with knowledge of market making (Nikhil Saraf and
Phil Meng), and a maintainer of Horizon and its SDKs (Tamir Sen).
## Motivation
+
Projects such as Uniswap have shown that automated market makers are effective
at providing easy-to-access liquidity at scale. The simplicity and
non-interactivity of liquidity pools can attract large amounts of capital and
@@ -31,17 +34,19 @@ enable high volumes of trading. We believe adding automated market makers to
Stellar will improve overall liquidity on the network.
### Goals Alignment
+
This proposal is aligned with several Stellar Network Goals, among them:
- The Stellar Network should run at scale and at low cost to all participants
-of the network
+ of the network
- The Stellar Network should enable cross-border payments, i.e. payments via
-exchange of assets, throughout the globe, enabling users to make payments
-between assets in a manner that is fast, cheap, and highly usable.
+ exchange of assets, throughout the globe, enabling users to make payments
+ between assets in a manner that is fast, cheap, and highly usable.
- The Stellar Network should make it easy for developers of Stellar projects to
-create highly usable products.
+ create highly usable products.
## Abstract
+
This proposal introduces automated market makers to the Stellar network.
`LiquidityPoolEntry` is introduced as a new type of `LedgerEntry` which stores
the state of a liquidity pool. New operations, `LiquidityPoolDepositOp` and
@@ -56,7 +61,10 @@ exchanging assets with the order book and liquidity pools.
## Specification
### XDR changes
-This patch of XDR changes is based on the XDR files in commit (`a5e7028c04305c7b6f7d08c981e87bb9891b7364`) of stellar-core.
+
+This patch of XDR changes is based on the XDR files in commit
+(`a5e7028c04305c7b6f7d08c981e87bb9891b7364`) of stellar-core.
+
```diff mddiffcheck.base=a5e7028c04305c7b6f7d08c981e87bb9891b7364
diff --git a/src/xdr/Stellar-ledger-entries.x b/src/xdr/Stellar-ledger-entries.x
index 0e7bc842..885cf2d4 100644
@@ -67,7 +75,7 @@ index 0e7bc842..885cf2d4 100644
typedef uint64 TimePoint;
typedef opaque DataValue<64>;
+typedef Hash PoolID; // SHA256(LiquidityPoolParameters)
-
+
// 1-4 alphanumeric characters right-padded with 0 bytes
typedef opaque AssetCode4[4];
@@ -25,7 +26,8 @@ enum AssetType
@@ -78,12 +86,12 @@ index 0e7bc842..885cf2d4 100644
+ ASSET_TYPE_CREDIT_ALPHANUM12 = 2,
+ ASSET_TYPE_POOL_SHARE = 3
};
-
+
union AssetCode switch (AssetType type)
@@ -39,24 +41,28 @@ case ASSET_TYPE_CREDIT_ALPHANUM12:
// add other asset types here in the future
};
-
+
+struct AlphaNum4
+{
+ AssetCode4 assetCode;
@@ -100,7 +108,7 @@ index 0e7bc842..885cf2d4 100644
{
case ASSET_TYPE_NATIVE: // Not credit
void;
-
+
case ASSET_TYPE_CREDIT_ALPHANUM4:
- struct
- {
@@ -108,7 +116,7 @@ index 0e7bc842..885cf2d4 100644
- AccountID issuer;
- } alphaNum4;
+ AlphaNum4 alphaNum4;
-
+
case ASSET_TYPE_CREDIT_ALPHANUM12:
- struct
- {
@@ -116,7 +124,7 @@ index 0e7bc842..885cf2d4 100644
- AccountID issuer;
- } alphaNum12;
+ AlphaNum12 alphaNum12;
-
+
// add other asset types here in the future
};
@@ -90,7 +96,8 @@ enum LedgerEntryType
@@ -127,12 +135,12 @@ index 0e7bc842..885cf2d4 100644
+ CLAIMABLE_BALANCE = 4,
+ LIQUIDITY_POOL = 5
};
-
+
struct Signer
@@ -214,12 +221,46 @@ const MASK_TRUSTLINE_FLAGS = 1;
const MASK_TRUSTLINE_FLAGS_V13 = 3;
const MASK_TRUSTLINE_FLAGS_V17 = 7;
-
+
+enum LiquidityPoolType
+{
+ LIQUIDITY_POOL_CONSTANT_PRODUCT = 0
@@ -177,7 +185,7 @@ index 0e7bc842..885cf2d4 100644
+ TrustLineAsset asset; // type of asset (with issuer)
+ int64 balance; // how much of this asset the user has.
+ // Asset defines the unit for this;
-
+
int64 limit; // balance cannot be above this
uint32 flags; // see TrustLineFlags
@@ -238,6 +279,8 @@ struct TrustLineEntry
@@ -192,7 +200,7 @@ index 0e7bc842..885cf2d4 100644
@@ -403,6 +446,33 @@ struct ClaimableBalanceEntry
ext;
};
-
+
+struct LiquidityPoolConstantProductParameters
+{
+ Asset assetA; // assetA < assetB
@@ -231,7 +239,7 @@ index 0e7bc842..885cf2d4 100644
+ LiquidityPoolEntry liquidityPool;
}
data;
-
+
@@ -457,7 +529,7 @@ case TRUSTLINE:
struct
{
@@ -239,7 +247,7 @@ index 0e7bc842..885cf2d4 100644
- Asset asset;
+ TrustLineAsset asset;
} trustLine;
-
+
case OFFER:
@@ -479,6 +551,12 @@ case CLAIMABLE_BALANCE:
{
@@ -252,7 +260,7 @@ index 0e7bc842..885cf2d4 100644
+ PoolID liquidityPoolID;
+ } liquidityPool;
};
-
+
// list of all envelope types used in the application
@@ -492,6 +570,7 @@ enum EnvelopeType
ENVELOPE_TYPE_AUTH = 3,
@@ -270,7 +278,7 @@ index a21c577a..84b84cbf 100644
@@ -47,6 +47,27 @@ struct StellarValue
ext;
};
-
+
+const MASK_LEDGER_HEADER_FLAGS = 0x7;
+
+enum LedgerHeaderFlags
@@ -312,7 +320,7 @@ index a21c577a..84b84cbf 100644
+ LEDGER_UPGRADE_BASE_RESERVE = 4,
+ LEDGER_UPGRADE_FLAGS = 5
};
-
+
union LedgerUpgrade switch (LedgerUpgradeType type)
@@ -111,6 +135,8 @@ case LEDGER_UPGRADE_MAX_TX_SET_SIZE:
uint32 newMaxTxSetSize; // update maxTxSetSize
@@ -321,7 +329,7 @@ index a21c577a..84b84cbf 100644
+case LEDGER_UPGRADE_FLAGS:
+ uint32 newFlags; // update flags
};
-
+
/* Entries used to define the bucket list */
diff --git a/src/xdr/Stellar-transaction.x b/src/xdr/Stellar-transaction.x
index 75f39eb4..f9d62a69 100644
@@ -330,7 +338,7 @@ index 75f39eb4..f9d62a69 100644
@@ -7,6 +7,12 @@
namespace stellar
{
-
+
+union LiquidityPoolParameters switch (LiquidityPoolType type)
+{
+case LIQUIDITY_POOL_CONSTANT_PRODUCT:
@@ -349,12 +357,12 @@ index 75f39eb4..f9d62a69 100644
+ LIQUIDITY_POOL_DEPOSIT = 22,
+ LIQUIDITY_POOL_WITHDRAW = 23
};
-
+
/* CreateAccount
@@ -212,6 +220,23 @@ struct SetOptionsOp
Signer* signer;
};
-
+
+union ChangeTrustAsset switch (AssetType type)
+{
+case ASSET_TYPE_NATIVE: // Not credit
@@ -373,7 +381,7 @@ index 75f39eb4..f9d62a69 100644
+};
+
/* Creates, updates or deletes a trust line
-
+
Threshold: med
@@ -221,7 +246,7 @@ struct SetOptionsOp
*/
@@ -381,13 +389,13 @@ index 75f39eb4..f9d62a69 100644
{
- Asset line;
+ ChangeTrustAsset line;
-
+
// if limit is set to 0, deletes the trust line
int64 limit;
@@ -409,6 +434,37 @@ struct SetTrustLineFlagsOp
uint32 setFlags; // which flags to set
};
-
+
+const LIQUIDITY_POOL_FEE_V18 = 30;
+
+/* Deposit assets into a liquidity pool
@@ -433,7 +441,7 @@ index 75f39eb4..f9d62a69 100644
}
body;
};
-
+
-union OperationID switch (EnvelopeType type)
+union HashIDPreimage switch (EnvelopeType type)
{
@@ -455,12 +463,12 @@ index 75f39eb4..f9d62a69 100644
+ Asset asset;
+ } revokeID;
};
-
+
enum MemoType
@@ -635,7 +704,33 @@ struct TransactionSignaturePayload
-
+
/* Operation Results section */
-
+
-/* This result is used when offers are taken during an operation */
+enum ClaimAtomType
+{
@@ -495,7 +503,7 @@ index 75f39eb4..f9d62a69 100644
@@ -651,6 +746,32 @@ struct ClaimOfferAtom
int64 amountBought;
};
-
+
+struct ClaimLiquidityAtom
+{
+ PoolID liquidityPoolID;
@@ -523,7 +531,7 @@ index 75f39eb4..f9d62a69 100644
+};
+
/******* CreateAccount Result ********/
-
+
enum CreateAccountResultCode
@@ -745,7 +866,7 @@ union PathPaymentStrictReceiveResult switch (
case PATH_PAYMENT_STRICT_RECEIVE_SUCCESS:
@@ -549,7 +557,7 @@ index 75f39eb4..f9d62a69 100644
// offers that got claimed while creating this offer
- ClaimOfferAtom offersClaimed<>;
+ ClaimAtom offersClaimed<>;
-
+
union switch (ManageOfferEffect effect)
{
@@ -933,7 +1054,10 @@ enum ChangeTrustResultCode
@@ -562,7 +570,7 @@ index 75f39eb4..f9d62a69 100644
+ CHANGE_TRUST_CANNOT_DELETE = -7, // Asset trustline is still referenced in a pool
+ CHANGE_TRUST_NOT_AUTH_MAINTAIN_LIABILITIES = -8 // Asset trustline is deauthorized
};
-
+
union ChangeTrustResult switch (ChangeTrustResultCode code)
@@ -956,7 +1080,9 @@ enum AllowTrustResultCode
// source account does not require trust
@@ -573,7 +581,7 @@ index 75f39eb4..f9d62a69 100644
+ ALLOW_TRUST_LOW_RESERVE = -6 // claimable balances can't be created
+ // on revoke due to low reserves
};
-
+
union AllowTrustResult switch (AllowTrustResultCode code)
@@ -1152,7 +1278,8 @@ enum RevokeSponsorshipResultCode
REVOKE_SPONSORSHIP_DOES_NOT_EXIST = -1,
@@ -583,7 +591,7 @@ index 75f39eb4..f9d62a69 100644
+ REVOKE_SPONSORSHIP_ONLY_TRANSFERABLE = -4,
+ REVOKE_SPONSORSHIP_MALFORMED = -5
};
-
+
union RevokeSponsorshipResult switch (RevokeSponsorshipResultCode code)
@@ -1218,7 +1345,9 @@ enum SetTrustLineFlagsResultCode
SET_TRUST_LINE_FLAGS_MALFORMED = -1,
@@ -594,12 +602,12 @@ index 75f39eb4..f9d62a69 100644
+ SET_TRUST_LINE_FLAGS_LOW_RESERVE = -5 // claimable balances can't be created
+ // on revoke due to low reserves
};
-
+
union SetTrustLineFlagsResult switch (SetTrustLineFlagsResultCode code)
@@ -1229,6 +1358,63 @@ default:
void;
};
-
+
+/******* LiquidityPoolDeposit Result ********/
+
+enum LiquidityPoolDepositResultCode
@@ -676,6 +684,7 @@ index 75f39eb4..f9d62a69 100644
### Semantics
#### LiquidityPoolDepositOp
+
`LiquidityPoolDepositOp` is the only way for an account to deposit funds into a
liquidity pool. The operation specifies a maximum amount to deposit for each
asset in the pool (ordered field-wise lexicographically among assets). The
@@ -697,11 +706,12 @@ specified by minPrice and maxPrice.
- `lpdo.maxPrice.n <= 0`
- `lpdo.maxPrice.d <= 0`
- `lpdo.minPrice.n * lpdo.maxPrice.d > lpdo.minPrice.d * lpdo.maxPrice.n` (this
-is equivalent to
-`lpdo.minPrice.n / lpdo.minPrice.d > lpdo.maxPrice.n / lpdo.maxPrice.d`)
+ is equivalent to
+ `lpdo.minPrice.n / lpdo.minPrice.d > lpdo.maxPrice.n / lpdo.maxPrice.d`)
+
+The process of applying `LiquidityPoolDepositOp lpdo` with source
+`sourceAccount` is
-The process of applying `LiquidityPoolDepositOp lpdo` with source `sourceAccount`
-is
```
tlPool = loadTrustLine(sourceAccount, lpdo.liquidityPoolID)
if !exists(tlPool)
@@ -772,12 +782,13 @@ Succeed with LIQUIDITY_POOL_DEPOSIT_SUCCESS
```
#### LiquidityPoolWithdrawOp
+
`LiquidityPoolWithdrawOp` is the only way for an account to withdraw funds from
a liquidity pool. The operation specifies an amount of pool shares to withdraw.
-Using that number of pool shares, it calculates amounts of each asset to withdraw
-with a maximum error (rounded against the depositor) of 1 stroop in each asset.
-Finally, it checks that the withdrawn amounts are at least those specified by
-minAmountA and minAmountB.
+Using that number of pool shares, it calculates amounts of each asset to
+withdraw with a maximum error (rounded against the depositor) of 1 stroop in
+each asset. Finally, it checks that the withdrawn amounts are at least those
+specified by minAmountA and minAmountB.
`LiquidityPoolWithdrawOp` will return `opNOT_SUPPORTED` during validation if
`(ledgerHeader.v1.flags & DISABLE_LIQUIDITY_POOL_WITHDRAWAL_FLAG) == DISABLE_LIQUIDITY_POOL_WITHDRAWAL_FLAG`
@@ -788,8 +799,9 @@ minAmountA and minAmountB.
- `lpwo.minAmountA < 0`
- `lpwo.minAmountB < 0`
-The process of applying `LiquidityPoolWithdrawOp lpwo` with source `sourceAccount`
-is
+The process of applying `LiquidityPoolWithdrawOp lpwo` with source
+`sourceAccount` is
+
```
tlPool = loadTrustLine(sourceAccount, lpwo.liquidityPoolID)
if !exists(tlPool)
@@ -832,11 +844,12 @@ Succeed with LIQUIDITY_POOL_WITHDRAW_SUCCESS
```
#### ChangeTrustOp
-`ChangeTrustOp` is extended to allow the creation, modification, and deletion of
-pool share trust lines. If a pool share trust line is the first one created for
+
+`ChangeTrustOp` is extended to allow the creation, modification, and deletion
+of pool share trust lines. If a pool share trust line is the first one created
+for the specified parameters, then the corresponding `LiquidityPoolEntry` will
+be created. Likewise, if a pool share trust line is the last one deleted for
the specified parameters, then the corresponding `LiquidityPoolEntry` will be
-created. Likewise, if a pool share trust line is the last one deleted for the
-specified parameters, then the corresponding `LiquidityPoolEntry` will be
deleted. To create a pool share trust line, you must have trust lines for each
of the constituent assets and those trust lines must at least be authorized to
maintain liabilities.
@@ -851,31 +864,34 @@ if `line.type() == ASSET_TYPE_POOL_SHARE` and
The behavior of `ChangeTrustOp` is changed for all trust line types
If `line.type() != ASSET_TYPE_POOL_SHARE` then
+
- If the asset trust line is being deleted but `liquidityPoolUseCount != 0`,
-return `CHANGE_TRUST_CANNOT_DELETE`.
+ return `CHANGE_TRUST_CANNOT_DELETE`.
If `line.type() == ASSET_TYPE_POOL_SHARE` and
+
- If pool share trust line does not exist (and therefore needs to be created)
- For each asset in the pool where the source account is not the issuer
- If the trust line for the asset is missing, return
- `CHANGE_TRUST_TRUST_LINE_MISSING`.
- - If the trust line for the asset is not authorized to maintain liabilities,
- return `CHANGE_TRUST_NOT_AUTH_MAINTAIN_LIABILITIES`.
+ `CHANGE_TRUST_TRUST_LINE_MISSING`.
+ - If the trust line for the asset is not authorized to maintain
+ liabilities, return `CHANGE_TRUST_NOT_AUTH_MAINTAIN_LIABILITIES`.
- The pool share trust line `tl` has
- `tl.asset.liquidityPoolID() == SHA256(line.liquidityPool())`
+ `tl.asset.liquidityPoolID() == SHA256(line.liquidityPool())`
- No flags are set on the pool share trust line.
- If no liquidity pool with `liquidityPoolID == SHA256(line.liquidityPool())`
- exists, then that liquidity pool is created.
+ exists, then that liquidity pool is created.
- The pool share trust line should count as two subentries (and therefore
require two base reserves)
- `poolSharesTrustLineCount` is incremented on the corresponding liquidity
pool and `liquidityPoolUseCount` is incremented on each asset trust line.
Note that `poolSharesTrustLineCount` is counting the number of **pool share
- trust lines** tied to a pool, so this will get incremented even if the source
- account is the issuer of both assets in the pool. `liquidityPoolUseCount` on
- the other hand counts the number of pools a given **asset trust line** is used in,
- so this is irrelevant if the source account is the issuer. The issuer doesn't have
- a trust line to assets it has issued, so this step is skipped in that case.
+ trust lines** tied to a pool, so this will get incremented even if the
+ source account is the issuer of both assets in the pool.
+ `liquidityPoolUseCount` on the other hand counts the number of pools a
+ given **asset trust line** is used in, so this is irrelevant if the source
+ account is the issuer. The issuer doesn't have a trust line to assets it
+ has issued, so this step is skipped in that case.
- If pool share trust line is being deleted
- `poolSharesTrustLineCount` is decremented on the corresponding liquidity
@@ -884,19 +900,22 @@ If `line.type() == ASSET_TYPE_POOL_SHARE` and
0, then that liquidity pool is erased.
#### SetTrustLineFlagsOp and AllowTrustOp
+
The authorization revocation behavior of `SetTrustLineFlagsOp` and
`AllowTrustOp` is extended to force a redeem of pool shares if any of the
referenced asset trust lines get their authorization revoked. For each redeemed
-pool share trust line, a claimable balance will be created for every constituent
-asset if there is a balance being withdrawn and the claimant is not the issuer.
-This means that for a redeemed pool share trust line, there can be zero, one, or
-two claimable balances created. These claimable balances will be sponsored by the
-sponsor of the pool share trust line, and will be unconditionally claimable by the
-owner of the pool share trust line.
+pool share trust line, a claimable balance will be created for every
+constituent asset if there is a balance being withdrawn and the claimant is not
+the issuer. This means that for a redeemed pool share trust line, there can be
+zero, one, or two claimable balances created. These claimable balances will be
+sponsored by the sponsor of the pool share trust line, and will be
+unconditionally claimable by the owner of the pool share trust line.
-The validity conditions for `SetTrustLineFlagsOp` and `AllowTrustOp` are unchanged.
+The validity conditions for `SetTrustLineFlagsOp` and `AllowTrustOp` are
+unchanged.
The process of applying `SetTrustLineFlagsOp` and `AllowTrustOp`
+
```
tl = loadTrustLine(trustor, asset)
@@ -966,31 +985,32 @@ if isAuthorizedToMaintainLiabilities(tl) && !isAuthorizedToMaintainLiabilities(e
```
#### PathPaymentStrictSendOp and PathPaymentStrictReceiveOp
+
`PathPaymentStrictSendOp` and `PathPaymentStrictReceiveOp` are the only ways to
exchange with a liquidity pool. The operation does not allow users to determine
their own routing, rather the operation routes the exchange to the single venue
-("venue" in this context means either the order book or the liquidity pool) that
-yields the best price. In all respects, the behavior with liquidity pools is
-analogous to the behavior without liquidity pools.
+("venue" in this context means either the order book or the liquidity pool)
+that yields the best price. In all respects, the behavior with liquidity pools
+is analogous to the behavior without liquidity pools.
As noted, for each step in the path the exchange will be routed to the order
book or liquidity pool that yields the best price for the entire exchange. The
behavior is changed such that:
1. The price of the exchange is computed for the liquidity pool, or it is
-recorded that the exchange is not possible. There are multiple reasons that the
-exchange might not be possible, including insufficient liquidity or INT64_MAX
-overflow of either pool reserve. Note that exceeding limits set by the operation
-does not qualify as "not possible" in this context.
+ recorded that the exchange is not possible. There are multiple reasons that
+ the exchange might not be possible, including insufficient liquidity or
+ INT64_MAX overflow of either pool reserve. Note that exceeding limits set by
+ the operation does not qualify as "not possible" in this context.
2. The price of the exchange is computed for the order book, or it is recorded
-that the exchange is not possible. There are multiple reasons that the exchange
-might not be possible, including insufficient liquidity or a self trade. Note
-that exceeding limits set by the operation does not qualify as "not possible" in
-this context.
+ that the exchange is not possible. There are multiple reasons that the
+ exchange might not be possible, including insufficient liquidity or a self
+ trade. Note that exceeding limits set by the operation does not qualify as
+ "not possible" in this context.
3. If both exchanges are possible, then choose the one which produces the best
-price. In the event that both prices are equal, choose the liquidity pool.
+ price. In the event that both prices are equal, choose the liquidity pool.
4. If the exchange is not possible on the liquidity pool, then return whatever
-result was produced when exchanging with the order book.
+ result was produced when exchanging with the order book.
As an example of the above, consider a path payment strict send with path
`A -> B -> C:`
@@ -1005,9 +1025,9 @@ It is important to recognize that this happens on each step in the path, so
"return whatever result was produced when exchanging with the order book" is a
local statement _not_ a global statement. It is definitely possible that a path
payment will produce a different result then it would have in the absence of
-liquidity pools. One simple example of this occurs on a one step path when there
-is no liquidity on the order book, but there is sufficient liquidity on the
-liquidity pool to perform the exchange at a bad price. In the absence of
+liquidity pools. One simple example of this occurs on a one step path when
+there is no liquidity on the order book, but there is sufficient liquidity on
+the liquidity pool to perform the exchange at a bad price. In the absence of
liquidity pools the result would have been too few offers, but with liquidity
pools the result would have been under destination minimum or over source
maximum.
@@ -1024,28 +1044,37 @@ such a liquidity pool is `(X + x - Fx) (Y - y) >= XY` where
There are two important cases to handle: if we know the amount received, and if
we know the amount disbursed. If we know the amount received `x`, then the
invariant can be rearranged to yield
+
```
y <= Y - XY / (X + x - Fx)
= (1 - F) Yx / (X + x - Fx)
```
+
so the integrality requirement produces
+
```
y = floor[(1 - F) Yx / (X + x - Fx)] .
```
+
If we know the amount disbursed `y`, then the invariant can be rearranged to
yield
+
```
x >= (XY / (Y - y) - X) / (1 - F)
= Xy / (Y - y) / (1 - F)
```
+
so the integrality requirement produces
+
```
x = ceil[Xy / (Y - y) / (1 - F)] .
```
+
In this proposal, `F = 0.003` which corresponds to 0.3% (this is encoded by
`LIQUIDITY_POOL_FEE_V18`).
#### RevokeSponsorshipOp
+
This proposal adds a new result code `REVOKE_SPONSORSHIP_MALFORMED` for
`RevokeSponsorshipOp`. This result code will be returned on validation if
`RevokeSponsorshipOp.ledgerKey().type() == LIQUIDITY_POOL`. Additionally, all
@@ -1053,24 +1082,29 @@ existing validation failures will now return `REVOKE_SPONSORSHIP_MALFORMED` for
consistency.
#### Ledger Header Flags
+
This proposal also adds a `LedgerHeaderExtensionV1` that contains flags for
validators to vote on using a new `LedgerUpgradeType` `LEDGER_UPGRADE_FLAGS`.
-Three different flags can be set (enforced by
-`MASK_LEDGERHEADER_FLAGS`), which are -
-
-- `DISABLE_LIQUIDITY_POOL_TRADING_FLAG`: disable trading against liquidity pools
-- `DISABLE_LIQUIDITY_POOL_DEPOSIT_FLAG`: disable depositing into liquidity pools
-- `DISABLE_LIQUIDITY_POOL_WITHDRAWAL_FLAG`: disable withdrawing from liquidity pools
-
-This will allow validators to disable parts of this CAP in the
-event that unexpected behavior is encountered. These flags can only be set if
-validators vote for them, and they should only be used in case of emergency. The
-ability to disable these pool related features is only temporary, and will be
-removed in the future.
+Three different flags can be set (enforced by `MASK_LEDGERHEADER_FLAGS`), which
+are -
+
+- `DISABLE_LIQUIDITY_POOL_TRADING_FLAG`: disable trading against liquidity
+ pools
+- `DISABLE_LIQUIDITY_POOL_DEPOSIT_FLAG`: disable depositing into liquidity
+ pools
+- `DISABLE_LIQUIDITY_POOL_WITHDRAWAL_FLAG`: disable withdrawing from liquidity
+ pools
+
+This will allow validators to disable parts of this CAP in the event that
+unexpected behavior is encountered. These flags can only be set if validators
+vote for them, and they should only be used in case of emergency. The ability
+to disable these pool related features is only temporary, and will be removed
+in the future.
## Design Rationale
### Erasing the Liquidity Pool
+
Unused liquidity pools are erased automatically. An unused liquidity pool is
characterized by the property that no account has a trust line for the
corresponding pool shares. The implementation of `LiquidityPoolWithdrawOp` must
@@ -1078,6 +1112,7 @@ guarantee that the liquidity pool has no reserves if no account owns shares in
the liquidity pool, in order to avoid destroying assets.
### No LiquidityPoolEntry Reserve
+
This proposal does not require a reserve for a `LiquidityPoolEntry`. This is
justified by the fact that a `LiquidityPoolEntry` cannot exist without the
existence of a `TrustLineEntry` which can hold the pool share. The
@@ -1092,13 +1127,14 @@ account that creates a pool to be merged if it can find another account to
assume the sponsorship.
### TrustLineEntry with asset of type ASSET_TYPE_POOL_SHARE takes two base reserves
-This proposal uses Claimable Balances to send back an asset when a redemption is
-forced due to auth revocation. Instead of making the issuer put up the reserve,
-we would like to have the owner of the asset put up the reserve. This is ideal
-for a few reasons. First, the claimant of the claimable balance now has an
-additional incentive to claim the balance, and second, the issuer will not have
-to worry about potentially putting up many reserves in the case where many pool
-trust lines need to be redeemed on a revocation.
+
+This proposal uses Claimable Balances to send back an asset when a redemption
+is forced due to auth revocation. Instead of making the issuer put up the
+reserve, we would like to have the owner of the asset put up the reserve. This
+is ideal for a few reasons. First, the claimant of the claimable balance now
+has an additional incentive to claim the balance, and second, the issuer will
+not have to worry about potentially putting up many reserves in the case where
+many pool trust lines need to be redeemed on a revocation.
So how do we make the owner of the asset put up the reserve for the Claimable
Balance? We require pool trust lines to require the same number of reserves as
@@ -1108,48 +1144,58 @@ the trust line is deleted (freeing up two reserves), and then two sponsored
Claimable Balances are created.
But what if the owner account is already sponsoring at least `UINT32_MAX - 1`
-entries? They won't be able to sponsor those claimable balances and the revocation
-would fail. For this reason, we should limit `numSponsoring + numSubentries` to
-`UINT32_MAX`, guaranteeing that an account can always sponsor a claimable
-balance for every subentry that gets removed.
+entries? They won't be able to sponsor those claimable balances and the
+revocation would fail. For this reason, we should limit
+`numSponsoring + numSubentries` to `UINT32_MAX`, guaranteeing that an account
+can always sponsor a claimable balance for every subentry that gets removed.
### Claimable Balance is not created on authorization revocation if the claimant is the issuer
-Let's say Account A has a pool share trust line using Asset b1 and Asset b2. Account A
-is the issuer of Asset b1, but not b2. If A has it's authorization for b2 pulled then a redeem is
-forced in the liquidity pool and a Claimable Balance can be created for b2. But should one be created
-for b1? Account A would end up claiming the b1 balance, which would just result in the balance getting
-burned because A is the issuer of b1. We could accomplish the same step without failure by not creating
-the Claimable Balance in the first place and make this scenario simpler for the issuer.
+
+Let's say Account A has a pool share trust line using Asset b1 and Asset b2.
+Account A is the issuer of Asset b1, but not b2. If A has it's authorization
+for b2 pulled then a redeem is forced in the liquidity pool and a Claimable
+Balance can be created for b2. But should one be created for b1? Account A
+would end up claiming the b1 balance, which would just result in the balance
+getting burned because A is the issuer of b1. We could accomplish the same step
+without failure by not creating the Claimable Balance in the first place and
+make this scenario simpler for the issuer.
### Authorization revocation of an asset trust line in a pool can fail
-Here are the possible scenarios when a trust line in a pool has it's authorization revoked.
-Remember that the pool share trust line is deleted to free up reserves for two claimable balances -
+
+Here are the possible scenarios when a trust line in a pool has it's
+authorization revoked. Remember that the pool share trust line is deleted to
+free up reserves for two claimable balances -
1. Account A has a pool share trust line.
- On revoke, claimable balances are sponsored by A. Guaranteed to succeed.
2. Account A has a pool share trust line, but the trust line is sponsored by B.
- On revoke, claimable balances are sponsored by B. Guaranteed to succeed.
-3. Account A has a pool share trust line, the trust line is sponsored by B, but B is in the middle
-of a sponsorship sandwich where its entries will be sponsored by C.
- - On revoke, claimable balances (if any need to be created) are sponsored by C. This can fail
- if C does not have enough available balance to sponsor the claimable balances or if
- `numSponsoring + numSubentries + numClaimableBalancesToSponsor > UINT32_MAX`. The issuer can just
- submit the revoke again outside the sandwich for this to succeed.
- - This still works if C=A because claimable balances aren't subentries.
-4. Account A has a pool share trust line, and is in the middle of a sponsorship sandwich
-where its entries will be sponsored by C.
- - On revoke, claimable balances (if any need to be created) are sponsored by C. This can fail
- if C does not have enough available balance to sponsor the claimable balances or if
- `numSponsoring + numSubentries + numClaimableBalancesToSponsor > UINT32_MAX`. The issuer can just
- submit the revoke again outside the sandwich for this to succeed.
+3. Account A has a pool share trust line, the trust line is sponsored by B, but
+ B is in the middle of a sponsorship sandwich where its entries will be
+ sponsored by C. - On revoke, claimable balances (if any need to be created)
+ are sponsored by C. This can fail if C does not have enough available
+ balance to sponsor the claimable balances or if
+ `numSponsoring + numSubentries + numClaimableBalancesToSponsor > UINT32_MAX`.
+ The issuer can just submit the revoke again outside the sandwich for this to
+ succeed. - This still works if C=A because claimable balances aren't
+ subentries.
+4. Account A has a pool share trust line, and is in the middle of a sponsorship
+ sandwich where its entries will be sponsored by C.
+ - On revoke, claimable balances (if any need to be created) are sponsored by
+ C. This can fail if C does not have enough available balance to sponsor
+ the claimable balances or if
+ `numSponsoring + numSubentries + numClaimableBalancesToSponsor > UINT32_MAX`.
+ The issuer can just submit the revoke again outside the sandwich for this
+ to succeed.
For the failure cases, you can see that the account that owns/sponsors the pool
-share trust line is in the middle of a sponsorship sandwich. It is actually
-up to the issuer if the revoke is done in the middle of a sponsorship sandwich,
-so the issuer could always just submit the revoke with the sponsorship sandwich to make
-sure the revoke succeeds.
+share trust line is in the middle of a sponsorship sandwich. It is actually up
+to the issuer if the revoke is done in the middle of a sponsorship sandwich, so
+the issuer could always just submit the revoke with the sponsorship sandwich to
+make sure the revoke succeeds.
### Liquidity Pools Support Arbitrary Asset Pairs
+
Some implementations of liquidity pools, such as Uniswap V1, enforced the
requirement that one of the constituent assets was fixed. More recent
implementations, such as Uniswap V2, have generally removed this constraint.
@@ -1157,21 +1203,24 @@ implementations, such as Uniswap V2, have generally removed this constraint.
This proposal allows complete flexibility in the constituent assets of a
liquidity pool. We believe that this enables liquidity to be provided in the
manner that is most efficient. For example, providing liquidity between two
-stablecoins with the same underlying asset can be relatively low risk (assuming both
-are creditable). But if instead liquidity had to be provided against some fixed
-third asset, then the liquidity provider would be subject to impermanent loss in
-both liquidity pools.
+stablecoins with the same underlying asset can be relatively low risk (assuming
+both are creditable). But if instead liquidity had to be provided against some
+fixed third asset, then the liquidity provider would be subject to impermanent
+loss in both liquidity pools.
### Price Bounds for LiquidityPoolDepositOp
+
Fix assets X and Y. Suppose that `p > 0` is the value of Y in terms of X. A
portfolio consisting of `x` of X and `y` of Y has value `x + py` in terms of X.
Fix a real-valued differentiable function `f` of two real-valued variables such
that for all `p > 0` the system of equations
+
```
0 = f(x,y)
df/dy = p df/dx
```
+
has a unique solution with `x,y > 0` and `x' + py' >= x + py` for all `(x',y')`
satisfying `x',y' > 0` and `f(x',y') = 0` that are sufficiently near to
`(x,y)`. Consider a liquidity pool where the reserves `x` of X and `y` of `Y`
@@ -1179,11 +1228,13 @@ are constrained to satisfy `f(x,y) = 0`. Then the value of the reserves can be
minimized by the method of Lagrange multipliers. Let `z` be a Lagrange
multiplier and define the Lagrangian `L(x,y,z) = x + py - z f(x,y)`. This
yields the system of equations
+
```
0 = dL/dx = 1 - z df/dx
0 = dL/dy = p - z df/dy
0 = dL/dz = -f(x,y) .
```
+
`z` can be eliminated by combining the first two equations, which produces
`df/dy = p df/dx`. Following from the definition of `f`, there exist unique
`x,y > 0` that satisfy these equations. Furthermore, `(x,y)` is a local minima
@@ -1201,34 +1252,40 @@ prevent a vulnerability.
It is easy to see that the constant product invariant satisfies the above
conditions. Let `f(x,y) = xy - k` for some `k > 0`. For all `p > 0` the system
of equations
+
```
0 = xy - k
x = py
```
+
has the unique solution `y = sqrt(k/p), x = sqrt(kp)` where `x,y > 0`. Now note
that for any `(x',y')` satisfying `x',y' > 0` and `f(x',y') = 0`, the AM-GM
inequality implies
+
```
x' + py' = x' + kp/x' >= 2 sqrt(kp) = x + py .
```
### Price Bounds for LiquidityPoolWithdrawOp
-The corrolary to the above results "Price Bounds for LiquidityPoolDepositOp"
-is that an attacker cannot profit at the expense of a withdrawer, because moving
+
+The corrolary to the above results "Price Bounds for LiquidityPoolDepositOp" is
+that an attacker cannot profit at the expense of a withdrawer, because moving
the pool from its fair price actually makes the pool shares more valuable.
-But the above analysis only applies if you ignore rounding. With rounding, it is
-always possible that you receive less than you expect. It is even possible that
-you receive 0 of one asset. Therefore, we must include minimum withdrawal
+But the above analysis only applies if you ignore rounding. With rounding, it
+is always possible that you receive less than you expect. It is even possible
+that you receive 0 of one asset. Therefore, we must include minimum withdrawal
amounts.
### LiquidityPoolWithdrawOp Specifies a Withdrawal Amount
+
In some jurisdictions, every operation on a blockchain is considered a taxable
event. This means that the process of withdrawing all funds in order to deposit
a new amount could have extremely adverse tax consequences. Allowing arbitrary
withdrawal amounts avoids this issue with little extra complexity.
### Store Pool Shares in Trust Lines
+
This proposal stores pool shares in trust lines, without allowing pool shares
to be used in any operation except `LiquidityPoolWithdrawOp` and `ChangeTrust`.
This means that pool shares already have any associated data that could be
@@ -1236,21 +1293,24 @@ necessary to make them transferable or control authorization, but we do not
enable these features in this proposal.
### Pool Shares are not Transferable
+
There are good reasons that pool shares should be transferable, most notably
-that this would facilitate using them as collateral in lending. Stellar has very
-limited support for lending, so this reason is not sufficient to justify the
-effort of supporting transferability at this point. This proposal is designed
-such that transferability could easily be added in a future protocol version.
+that this would facilitate using them as collateral in lending. Stellar has
+very limited support for lending, so this reason is not sufficient to justify
+the effort of supporting transferability at this point. This proposal is
+designed such that transferability could easily be added in a future protocol
+version.
One of the decisions that will need to be made is what should happen to the
authorization state of a pool trustline if one of its corresponding asset
-trustlines has authorization downgraded. We could either always check the
-asset trustline's authorization when checking the pool trustline's authorization,
-or automatically update pool trustlines when the asset trustline changes. We need
-to make sure a pool trustline cannot be transferred to and redeemed by an account
-that has a deauthorized trustline to one of the assets in the pool.
+trustlines has authorization downgraded. We could either always check the asset
+trustline's authorization when checking the pool trustline's authorization, or
+automatically update pool trustlines when the asset trustline changes. We need
+to make sure a pool trustline cannot be transferred to and redeemed by an
+account that has a deauthorized trustline to one of the assets in the pool.
### Trust Lines for Pool Shares do not have any flags set
+
`ChangeTrustOp` creates trust lines for pool shares with no flags set. Right
now, pool shares aren't transferable so the set of possible interactions is
limited. `LiquidityPoolDepositOp` should be able to mint pool shares if the
@@ -1268,38 +1328,42 @@ the actual authorization state of the pool trust line from the asset trust
lines.
### Pool withdrawals are allowed when asset trust lines have AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG set
+
It would be unfair to lock an account's funds in a Liquidity Pool when they no
longer want to be a part of one. It's currently possible for an account to pull
-offers in the `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` state, so it makes sense
-to treat pool shares the same. An account in this state can withdraw from a pool,
-but will not be able to do anything else with those funds since operations like
-payments check authorization.
+offers in the `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` state, so it makes
+sense to treat pool shares the same. An account in this state can withdraw from
+a pool, but will not be able to do anything else with those funds since
+operations like payments check authorization.
### Clawback assets from a pool
+
There are no operations that clawback directly from a pool, but the same
results can be achieved by using `SetTrustlineFlagsOp` or `AllowTrustOp`. The
issuer can deauthorize an asset trust line, which will redeem the all pool
-trust lines using that asset and account back to the owner account if
-possible, and if not, into a claimable balance. The issuer can then use
-`ClawbackOp` or `ClawbackClaimableBalanceOp` to clawback the assets.
+trust lines using that asset and account back to the owner account if possible,
+and if not, into a claimable balance. The issuer can then use `ClawbackOp` or
+`ClawbackClaimableBalanceOp` to clawback the assets.
### Alternative authorization revocation solution
-The current proposal forces a redemption of all referenced pool trust lines when an
-asset trust line has its authorization revoked. There is an alternative to this
-solution. We could instead require the issuer to revoke authorization on
-individual pool trust lines to force a redemption. This approach will require the
-issuer to look up all pool trust lines for an asset trust line to perform the
-revoke. It would also add an additional step when regulating assets for the
-issuer, so we would need to add an opt in flag for liquidity pools so issuers
-are aware of this.
+
+The current proposal forces a redemption of all referenced pool trust lines
+when an asset trust line has its authorization revoked. There is an alternative
+to this solution. We could instead require the issuer to revoke authorization
+on individual pool trust lines to force a redemption. This approach will
+require the issuer to look up all pool trust lines for an asset trust line to
+perform the revoke. It would also add an additional step when regulating assets
+for the issuer, so we would need to add an opt in flag for liquidity pools so
+issuers are aware of this.
This approach would be simpler to implement, and we wouldn't need to tie the
lifetime of an asset trust line to a pool trust line like in this proposal, but
it is not as user-friendly as the current proposal.
### Why the asset trust line is required to exist for the lifetime of corresponding pool trust lines
-This proposal introduces `TrustLineEntryExtensionV2.liquidityPoolUseCount`, which
-keeps track of the number of pool trust lines an asset trust line is used
+
+This proposal introduces `TrustLineEntryExtensionV2.liquidityPoolUseCount`,
+which keeps track of the number of pool trust lines an asset trust line is used
in. The extension is used to make sure the trust line is not deleted while
corresponding pool trust lines exist. This is required because without the
asset trust line, there is no way to deauthorize the trust line and force a
@@ -1309,16 +1373,17 @@ This mechanism is not required for the native asset since a trust line is not
required to hold the native asset, and the native asset is trustless.
### No Interleaved Execution
-This proposal uses `PathPaymentStrictSendOp` and `PathPaymentStrictReceiveOp` as
-opaque interfaces to exchange on the Stellar network. These operations are
-referred to as opaque interfaces because there is no way to specify how you want
-the exchange to execute. This approach is favorable because it requires no
+
+This proposal uses `PathPaymentStrictSendOp` and `PathPaymentStrictReceiveOp`
+as opaque interfaces to exchange on the Stellar network. These operations are
+referred to as opaque interfaces because there is no way to specify how you
+want the exchange to execute. This approach is favorable because it requires no
changes to clients that depend on exchange.
-Because this is an opaque interface, the only thing we should absolutely require
-is that adding liquidity pools as an execution option should never make the
-exchange price worse. This is a weak requirement. Specifically it is much weaker
-than requiring that exchange produces the best price.
+Because this is an opaque interface, the only thing we should absolutely
+require is that adding liquidity pools as an execution option should never make
+the exchange price worse. This is a weak requirement. Specifically it is much
+weaker than requiring that exchange produces the best price.
The primary reason not to require that exchange produces the best price is ease
of implementation. Requiring that exchange always produces the best price
@@ -1330,19 +1395,20 @@ where an exchange will execute against either
- the order book alone (this is exactly what happens in protocol 16)
- one specific liquidity pool alone
-depending on which produces the better price. This price is not guaranteed to be
-the best possible price, but by construction it cannot be worse than executing
-against the order book alone.
+depending on which produces the better price. This price is not guaranteed to
+be the best possible price, but by construction it cannot be worse than
+executing against the order book alone.
-It is important to recognize that this happens on each step in the path, so
-no interleaving is a local statement _not_ a global statement. It is definitely
-possible that a path payment will exchange with a liquidity pool on one step and
-the order book on another step. A particularly surprising case occurs when there
-is a path with an internal loop such as `A -> B -> C -> A -> B`, in which case
-the first `A -> B` may use one venue and the second `A -> B` may use the other
-venue.
+It is important to recognize that this happens on each step in the path, so no
+interleaving is a local statement _not_ a global statement. It is definitely
+possible that a path payment will exchange with a liquidity pool on one step
+and the order book on another step. A particularly surprising case occurs when
+there is a path with an internal loop such as `A -> B -> C -> A -> B`, in which
+case the first `A -> B` may use one venue and the second `A -> B` may use the
+other venue.
#### Residual Arbitrage Opportunities
+
There are a variety of objections to this approach, but none of them justifies
the additional complexity of interleaved execution. It is claimed that
interleaved execution guarantees that there will be no arbitrage opportunities
@@ -1351,21 +1417,22 @@ pools involving the same assets, but false otherwise. If there are linear
combinations of assets which are effectively risk-free, then exchange will
still generate arbitrage opportunities even if interleaved execution is used.
-A concrete example where this could occur is if there were two highly creditable
-issuers of USD. Let's call the assets USD1 and USD2. Suppose a large exchange
-occurs from USD1 to EUR. With interleaved execution, there are no arbitrage
-opportunities between the order book and liquidity pools for USD1/EUR. But it
-could still be possible to sell USD1 for EUR, then buy USD2 for EUR at a profit.
-If there is a USD1/USD2 market then the arbitrageur may be able to settle their
-position instantly. Otherwise, the arbitrageur may wait until the opposite
-arbitrage opportunity arises to unwind their position.
+A concrete example where this could occur is if there were two highly
+creditable issuers of USD. Let's call the assets USD1 and USD2. Suppose a large
+exchange occurs from USD1 to EUR. With interleaved execution, there are no
+arbitrage opportunities between the order book and liquidity pools for
+USD1/EUR. But it could still be possible to sell USD1 for EUR, then buy USD2
+for EUR at a profit. If there is a USD1/USD2 market then the arbitrageur may be
+able to settle their position instantly. Otherwise, the arbitrageur may wait
+until the opposite arbitrage opportunity arises to unwind their position.
-There is also the reality that if the price of a liquidity pool has moved enough
-to generate an arbitrage opportunity with the order book or another liquidity
-pool, then it has probably moved enough to generate an arbitrage opportunity
-with some centralized exchange.
+There is also the reality that if the price of a liquidity pool has moved
+enough to generate an arbitrage opportunity with the order book or another
+liquidity pool, then it has probably moved enough to generate an arbitrage
+opportunity with some centralized exchange.
### CreatePassiveOfferOp, ManageBuyOfferOp, and ManageSellOfferOp Unchanged
+
This proposal does not change the behavior of `CreatePassiveOfferOp`,
`ManageBuyOfferOp`, and `ManageSellOfferOp`. This is a consequence of not
enforcing best pricing and interleaved execution. The Stellar protocol does not
@@ -1378,18 +1445,21 @@ pools, in the sense that they are the only way to change liquidity provided to
those venues.
### ClaimAtom
-In order to enable `PathPaymentStrictSendOp` and `PathPaymentStrictReceiveOp` to
-emit accurate information about exchanges with liquidity pools, we converted
+
+In order to enable `PathPaymentStrictSendOp` and `PathPaymentStrictReceiveOp`
+to emit accurate information about exchanges with liquidity pools, we converted
`ClaimOfferAtom` into a union named `ClaimAtom`. Even though `ClaimAtom` is not
-required for `CreatePassiveOfferOp`, `ManageBuyOfferOp`, and `ManageSellOfferOp`,
-we still make the analogous change to the corresponding operation results. This
-should allow downstream systems to handle both results in the same way.
+required for `CreatePassiveOfferOp`, `ManageBuyOfferOp`, and
+`ManageSellOfferOp`, we still make the analogous change to the corresponding
+operation results. This should allow downstream systems to handle both results
+in the same way.
### No Minimum Deposit Time
+
Other proposals include a minimum time that funds must be deposited in a
-liquidity pool before they can be withdrawn. The argument is that this will help
-ensure stability of liquidity, avoiding fluctuations as volume moves between
-different pairs.
+liquidity pool before they can be withdrawn. The argument is that this will
+help ensure stability of liquidity, avoiding fluctuations as volume moves
+between different pairs.
This proposal does not include a minimum deposit time. The primary argument is
that liquidity fluctuations are the direct manifestation of liquidity providers
@@ -1405,17 +1475,19 @@ that they want to. Modern payment networks, like Stellar, should be trying to
remove friction rather than create it.
### Future Work: Support Fees Other than 0.3%
-This proposal fixes the constant product market maker fee at 0.3%. But we expect
-future protocol versions to take advantage of the extensibility which has been
-built into this proposal to support other fees. Such changes should be
+
+This proposal fixes the constant product market maker fee at 0.3%. But we
+expect future protocol versions to take advantage of the extensibility which
+has been built into this proposal to support other fees. Such changes should be
relatively easy to implement.
## Protocol Upgrade Transition
### Backwards Incompatibilities
+
This proposal introduces one backwards incompatibility. Clients that depend on
-`PathPaymentStrictSendOp` and `PathPaymentStrictReceiveOp` executing against the
-order book will be broken. There are two ways a client could depend on this
+`PathPaymentStrictSendOp` and `PathPaymentStrictReceiveOp` executing against
+the order book will be broken. There are two ways a client could depend on this
- Expecting to receive a certain price
- Expecting to execute certain orders
@@ -1427,16 +1499,20 @@ to this proposal. Therefore, the risk of this backwards incompatibility is
minimal.
### Resource Utilization
+
This proposal should have a minor effect on resource utilization. Converting
`PathPaymentStrictSendOp` and `PathPaymentStrictReceiveOp` into opaque
interfaces for exchange will slightly increase the constant factors associated
with these operations but will not effect the asymptotic complexity.
## Security Concerns
+
This proposal does not introduce any new security concerns.
## Test Cases
+
None yet.
## Implementation
+
None yet.
diff --git a/core/cap-0039.md b/core/cap-0039.md
index 53fa23619..13bbbadb8 100644
--- a/core/cap-0039.md
+++ b/core/cap-0039.md
@@ -16,12 +16,13 @@ Protocol version: TBD
## Simple Summary
This CAP addresses the following authorization semantics requirements:
+
- It should be clear and predictable to an asset holder if their assets are
-revocable.
+ revocable.
- It should be possible for issuer accounts to communicate their intent to
-revoke without giving up the mutability of their asset.
+ revoke without giving up the mutability of their asset.
- It should be possible for issuer accounts to make their assets usable with
-contracts, such as payment channels, without making their accounts immutable.
+ contracts, such as payment channels, without making their accounts immutable.
## Working Group
@@ -32,8 +33,8 @@ consulted individuals mentioned at the top of this document.
Trustline authorization is an important feature of the Stellar protocol. It
allows issuers to handle various regulatory requirements. However, its current
-behavior forces asset issuers to make a choice between immutable and predictable
-for asset holders, or mutable and unpredictable for asset holders.
+behavior forces asset issuers to make a choice between immutable and
+predictable for asset holders, or mutable and unpredictable for asset holders.
The current behavior makes all non-immutable assets revocable even if the asset
issuer does not have auth revocable flag cleared, and even if issuer has no
@@ -42,17 +43,18 @@ other reasons.
Most assets on the Stellar network are not immutable.
-This prevents most assets from being used in contracts since the revocation of a
-trustline can break a contract. This may prevent most assets from being used in payment channels, such as those described in [CAP-21].
+This prevents most assets from being used in contracts since the revocation of
+a trustline can break a contract. This may prevent most assets from being used
+in payment channels, such as those described in [CAP-21].
### Goals Alignment
This CAP is aligned with the following Stellar Network Goals:
- The Stellar Network should make it clear for asset holders to understand the
-trust relationship they have with asset issuers.
+ trust relationship they have with asset issuers.
- The Stellar Network should enable predictable lock-up of funds in escrow
-accounts for contracts, such as payment channels.
+ accounts for contracts, such as payment channels.
## Specification
@@ -75,12 +77,12 @@ index 0e7bc842..68c52758 100644
+ // Trustlines are created with revocation disabled set to "true"
+ AUTH_NOT_REVOCABLE_FLAG = 0x10
};
-
+
// mask for all valid flags
const MASK_ACCOUNT_FLAGS = 0x7;
const MASK_ACCOUNT_FLAGS_V17 = 0xF;
+const MASK_ACCOUNT_FLAGS_V18 = 0x1F;
-
+
// maximum number of signers
const MAX_SIGNERS = 20;
@@ -206,13 +209,16 @@ enum TrustLineFlags
@@ -92,13 +94,13 @@ index 0e7bc842..68c52758 100644
+ // issuer has specified that it may not revoke authorization.
+ TRUSTLINE_NOT_REVOCABLE_FLAG = 8,
};
-
+
// mask for all trustline flags
const MASK_TRUSTLINE_FLAGS = 1;
const MASK_TRUSTLINE_FLAGS_V13 = 3;
const MASK_TRUSTLINE_FLAGS_V17 = 7;
+const MASK_TRUSTLINE_FLAGS_V18 = 15;
-
+
struct TrustLineEntry
{
@@ -106,78 +108,89 @@ index 0e7bc842..68c52758 100644
### Semantics
-This proposal introduces one new account flag that controls whether new trustlines are created with revocation disabled or not.
+This proposal introduces one new account flag that controls whether new
+trustlines are created with revocation disabled or not.
-This proposal introduces one new trustline flag that captures onto the trustline
-at the moment it is created whether it will be revocable by the issuer account.
+This proposal introduces one new trustline flag that captures onto the
+trustline at the moment it is created whether it will be revocable by the
+issuer account.
-This proposal changes the `AllowTrustOp` to disallow its use to revoke or reduce
-the limit of a trustline that has the new trustline flag set. This prevents
-authorization revocation on trustlines when the issuer has indicated it does not
-intend to revoke trustlines while allowing the issuer account to remain mutable.
+This proposal changes the `AllowTrustOp` to disallow its use to revoke or
+reduce the limit of a trustline that has the new trustline flag set. This
+prevents authorization revocation on trustlines when the issuer has indicated
+it does not intend to revoke trustlines while allowing the issuer account to
+remain mutable.
-Existing and new trustlines for issuers that do not use the new account flag are
-unaffected and will use the existing behavior they have today. An issuer may
-revoke existing trustline at anytime by enabling the `AUTH_REVOCABLE_FLAG`
+Existing and new trustlines for issuers that do not use the new account flag
+are unaffected and will use the existing behavior they have today. An issuer
+may revoke existing trustline at anytime by enabling the `AUTH_REVOCABLE_FLAG`
account flag on the issuer account, and using the `AllowTrustOp` operation to
revoke authorization of the trustor.
-New trustlines created when the issuer account has its `AUTH_NOT_REVOCABLE_FLAG`
-will not be revocable, even if the issuer account sets the `AUTH_REVOCABLE_FLAG`
-account flag at a later time.
+New trustlines created when the issuer account has its
+`AUTH_NOT_REVOCABLE_FLAG` will not be revocable, even if the issuer account
+sets the `AUTH_REVOCABLE_FLAG` account flag at a later time.
#### Account Flags
This proposal introduces a new account flag:
-- `AUTH_NOT_REVOCABLE_FLAG` that indicates if trustlines should be created
-not revocable.
+
+- `AUTH_NOT_REVOCABLE_FLAG` that indicates if trustlines should be created not
+ revocable.
#### TrustLine Flags
-This proposal introduces a new trustline flag that is set on the trustline
-when it is created:
+This proposal introduces a new trustline flag that is set on the trustline when
+it is created:
+
- `TRUSTLINE_NOT_REVOCABLE_FLAG` that is set if the issuer account has its
-`AUTH_NOT_REVOCABLE_FLAG` flag set.
+ `AUTH_NOT_REVOCABLE_FLAG` flag set.
#### Change Trust Operation
This proposal introduces changes to the semantics of the `ChangeTrustOp`
operation.
-When `ChangeTrustOp` creates a new trustline it sets
+When `ChangeTrustOp` creates a new trustline it sets
`TRUSTLINE_NOT_REVOCABLE_FLAG` if the `AUTH_NOT_REVOCABLE_FLAG` account flag is
set on the issuer account.
-When `ChangeTrustOp` modifies an existing trustline the new flag is not changed,
-regardless of the state of the `AUTH_NOT_REVOCABLE_FLAG` or
+When `ChangeTrustOp` modifies an existing trustline the new flag is not
+changed, regardless of the state of the `AUTH_NOT_REVOCABLE_FLAG` or
`AUTH_REVOCABLE_FLAG` account flags on the issuer account.
#### Allow Trust Operation
-This proposal introduces changes to the semantics of the `ALLOW_TRUST` operation.
+This proposal introduces changes to the semantics of the `ALLOW_TRUST`
+operation.
- Disallow `ALLOW_TRUST` operations that downgrade authorization when the
-trustline is authorized and the `TRUSTLINE_NOT_REVOCABLE_FLAG` trustline flag is
-set.
+ trustline is authorized and the `TRUSTLINE_NOT_REVOCABLE_FLAG` trustline flag
+ is set.
#### Set TrustLine Flags Operation
-This proposal extends the cases that the `SetTrustLineFlagsOp` operation will return `SET_TRUST_LINE_FLAGS_MALFORMED` during validation to include the following conditions:
+This proposal extends the cases that the `SetTrustLineFlagsOp` operation will
+return `SET_TRUST_LINE_FLAGS_MALFORMED` during validation to include the
+following conditions:
+
- `TRUSTLINE_NOT_REVOCABLE_FLAG` is set on `clearFlags`.
Issuer accounts could use `SetTrustLineFlagsOp` to set the
-`TRUSTLINE_NOT_REVOCABLE_FLAG` on accounts, making it such
-that existing trustlines cannot be revoked.
+`TRUSTLINE_NOT_REVOCABLE_FLAG` on accounts, making it such that existing
+trustlines cannot be revoked.
## Design Rationale
-The `ChangeTrustOp` semantics introduced are consistent with the semantics of the `TRUSTLINE_CLAWBACK_ENABLED_FLAG` flag that was introduced in [CAP-35].
+The `ChangeTrustOp` semantics introduced are consistent with the semantics of
+the `TRUSTLINE_CLAWBACK_ENABLED_FLAG` flag that was introduced in [CAP-35].
A disabled flag is introduced because disabling revocation is the new behavior.
Existing trustlines of non-immutable issuers are already revocable enabled even
though no flag on the trustline indicates that.
-A new not revocable flag is introduced onto accounts so that existing issuer accounts see no change in behavior.
+A new not revocable flag is introduced onto accounts so that existing issuer
+accounts see no change in behavior.
## Protocol Upgrade Transition
@@ -185,8 +198,8 @@ A new not revocable flag is introduced onto accounts so that existing issuer acc
This proposal is backwards compatible with existing and new trustlines.
-This proposal is backwards compatible for existing and new issuers who expect to
-enable auth revocation in the future but do not wish to enable it today.
+This proposal is backwards compatible for existing and new issuers who expect
+to enable auth revocation in the future but do not wish to enable it today.
### Resource Utilization
diff --git a/core/cap-0040.md b/core/cap-0040.md
index 3203038e9..ef8c71854 100644
--- a/core/cap-0040.md
+++ b/core/cap-0040.md
@@ -36,8 +36,8 @@ agreed by all parties to the contract.
Signing a set of transactions in contracts that have a time delay as specified
in [CAP-21] requires participants to exchange signatures for each transaction
-across multiple-steps, exchanging the signatures for the transactions in reverse
-order that they can be executed.
+across multiple-steps, exchanging the signatures for the transactions in
+reverse order that they can be executed.
A party must not share their signature for an earlier transaction before
receiving all signatures for latter transactions otherwise another party may be
@@ -46,8 +46,8 @@ authorize a latter transaction.
In the payment channel designs identified in the design rationale of [CAP-21]
this requires participants to exchange signatures across at least three
-messages. This coordination increases implementation complexity. This complexity
-increases significantly for payment channel implementations that are
+messages. This coordination increases implementation complexity. This
+complexity increases significantly for payment channel implementations that are
asynchronous or payment channels where receiving participants are allowed to
fall behind confirming payments from a sending participant.
@@ -60,7 +60,7 @@ transactions could be authorized simplifies payment channel implementations.
This CAP is aligned with the following Stellar Network Goals:
- The Stellar Network should make it easy for developers of Stellar projects to
-create highly usable products
+ create highly usable products
## Specification
@@ -90,7 +90,7 @@ index 8f7d5c20..03149f3d 100644
+ SIGNER_KEY_TYPE_HASH_X = KEY_TYPE_HASH_X,
+ SIGNER_KEY_TYPE_ED25519_SIGNED_PAYLOAD = KEY_TYPE_ED25519_SIGNED_PAYLOAD
};
-
+
union PublicKey switch (PublicKeyType type)
@@ -52,6 +54,13 @@ case SIGNER_KEY_TYPE_PRE_AUTH_TX:
case SIGNER_KEY_TYPE_HASH_X:
@@ -104,7 +104,7 @@ index 8f7d5c20..03149f3d 100644
+ opaque payload<64>;
+ } ed25519SignedPayload;
};
-
+
// variable size as the size depends on the signature scheme used
```
@@ -127,12 +127,14 @@ of the signer's payload using the private key that derives the signer's ed25519
public key.
For example, given:
+
- A private key `Ks` and its derived public key `Kp`.
- A ed25519 signed payload signer `S` that contains:
- Payload `P`.
- Public key `Kp`.
A signature that satisfies signer `S` is produced by:
+
- Ed25519 signing `P` with `Ks`.
Unlike transaction signatures in the Stellar protocol, the payload of this
@@ -149,8 +151,8 @@ payload such that it has a length of 4 bytes, for calculating the hint.
#### Transaction Envelopes
This proposal makes no structural changes to transaction envelopes other than
-the signature of an ed25519 signed payload signer may be included in the list of
-decorated signatures.
+the signature of an ed25519 signed payload signer may be included in the list
+of decorated signatures.
#### Signature Checking
@@ -158,8 +160,8 @@ Signature checking is changed to include verifying that any ed25519 signed
payload signer's have matching signatures.
Ed25519 signed payload signer signatures are verified by performing ed25519
-signature verification using the signature, the payload from the signer, and the
-ed25519 public key from the signer.
+signature verification using the signature, the payload from the signer, and
+the ed25519 public key from the signer.
#### Signer Verification Order
@@ -175,8 +177,8 @@ fails with `txBAD_AUTH_EXTRA`.
#### Transaction Validity
If a payload signer is used in the `extraSigners` precondition specified in
-[CAP-21], and the payload is empty, the transaction will fail with `txMALFORMED`
-(also specified in [CAP-21]).
+[CAP-21], and the payload is empty, the transaction will fail with
+`txMALFORMED` (also specified in [CAP-21]).
#### SetOptionsOp
@@ -186,16 +188,16 @@ is empty.
## Design Rationale
-This proposal provides a primitive that makes it possible to construct a set
-of transactions where all transactions can be authorized and submitted if the
+This proposal provides a primitive that makes it possible to construct a set of
+transactions where all transactions can be authorized and submitted if the
first transaction is authorized and submitted.
This proposal makes this possible since a Stellar transaction hash can be
specified as the payload of an ed25519 signed payload signer. A Stellar
transaction hash is the SHA-256 hash of the `TransactionSignaturePayload`
containing the Stellar transaction. A valid signature for the signer will also
-be a valid signature of the other Stellar transaction if the ed25519 key used to
-sign the payload is also required to sign the other transaction.
+be a valid signature of the other Stellar transaction if the ed25519 key used
+to sign the payload is also required to sign the other transaction.
An ed25519 signed payload signer can be constructed for each subsequent
transaction that needs authorizing, and included in the `extraSigners`
@@ -207,9 +209,9 @@ existing protocol by using the preauth transaction signer, use of that signer
requires creating a new ledger entry which is impractical in contracts with
multiple iterations.
-The maximum size of the set of transactions is three, limited to being one
-more than the maximum number of `extraSigners` specified in [CAP-21], currently
-two, and can be increased past three if the maximum size of `extraSigners` is
+The maximum size of the set of transactions is three, limited to being one more
+than the maximum number of `extraSigners` specified in [CAP-21], currently two,
+and can be increased past three if the maximum size of `extraSigners` is
changed.
The hint is specified as an XOR of the last 4 bytes of all elements of the
@@ -229,22 +231,22 @@ the requirements on when participants may exchange signatures for transactions.
The following paragraph in CAP-21:
->To update the payment channel state, the parties 1) increment i, 2)
->sign and exchange a closing transaction C_i, and finally 3) sign and
->exchange a declaration transaction D_i.
+> To update the payment channel state, the parties 1) increment i, 2) sign and
+> exchange a closing transaction C_i, and finally 3) sign and exchange a
+> declaration transaction D_i.
May be changed to:
->To update the payment channel state, the parties 1) increment i, 2)
->sign and exchange a closing transaction C_i and declaration
->transaction D_i.
+> To update the payment channel state, the parties 1) increment i, 2) sign and
+> exchange a closing transaction C_i and declaration transaction D_i.
-This changes the number of messages the parties must send to each other to
-update the payment channel, reducing the number from three to two. It reduces
-the number of messages down to one for any the payment channel implementation
+This changes the number of messages the parties must send to each other to
+update the payment channel, reducing the number from three to two. It reduces
+the number of messages down to one for any the payment channel implementation
where the receiving party of a payment update is the beneficiary.
Three messages using CAP-21:
+
```
+---+ +---+
| A | | B |
@@ -262,6 +264,7 @@ Three messages using CAP-21:
```
Simplified two message process using CAP-21 and CAP-40:
+
```
+---+ +---+
| A | | B |
@@ -275,8 +278,9 @@ Simplified two message process using CAP-21 and CAP-40:
| |
```
-Simplified one message process using CAP-21 and CAP-40 where B is the only
+Simplified one message process using CAP-21 and CAP-40 where B is the only
beneficiary of the update, supporting queue-like processing of payments:
+
```
+---+ +---+
| A | | B |
@@ -288,40 +292,40 @@ beneficiary of the update, supporting queue-like processing of payments:
```
This simplified exchange would be implemented by requiring the sender of a
-payment to include in D_i the `extraSigners` precondition with an ed25519 signed
-payload signer where the payload is the transaction hash of C_i and the ed25519
-public key is the public key of the receiving participant that must sign D_i and
-C_i.
-
-The effect of this change is the receiving participant must sign the transaction
-hash of C_i to authorize D_i, embedding their signature for C_i into D_i. If the
-receiving participant nefariously authorizes and submits D_i without sharing a
-signature for C_i, the sender who is watching the network may see D_i be
-submitted, extract the signature for C_i from the signatures on D_i, attach the
-signature to C_i, and submit C_i.
+payment to include in D_i the `extraSigners` precondition with an ed25519
+signed payload signer where the payload is the transaction hash of C_i and the
+ed25519 public key is the public key of the receiving participant that must
+sign D_i and C_i.
+
+The effect of this change is the receiving participant must sign the
+transaction hash of C_i to authorize D_i, embedding their signature for C_i
+into D_i. If the receiving participant nefariously authorizes and submits D_i
+without sharing a signature for C_i, the sender who is watching the network may
+see D_i be submitted, extract the signature for C_i from the signatures on D_i,
+attach the signature to C_i, and submit C_i.
The use of the payload signer in CAP-21's payment channel requires a payload of
-32 bytes to store the hash of another Stellar transaction. 32 bytes are required
-because Stellar transaction hashes utilize the SHA-256 algorithm.
+32 bytes to store the hash of another Stellar transaction. 32 bytes are
+required because Stellar transaction hashes utilize the SHA-256 algorithm.
### Other Uses
This new signer is likely to have other applications in scenarios similar to
-where HASH_X signers are currently used, except that the data revealed would not
-only be a hash shared by multiple transactions across multiple chains, but could
-be a signature for a hash validated by a smart contract on another chain, or a
-signature for any other use. However, this has limited use to only signatures of
-ed25519 keys as specified.
+where HASH_X signers are currently used, except that the data revealed would
+not only be a hash shared by multiple transactions across multiple chains, but
+could be a signature for a hash validated by a smart contract on another chain,
+or a signature for any other use. However, this has limited use to only
+signatures of ed25519 keys as specified.
The use of the payload signer in these other applications could require a
variety of different payload sizes. It would be impractical to permit large
payloads without changes to Stellar's fee participation because the sizes of
transactions and account sub-entries in the ledger could explode.
-A payload with a maximum length of 64 bytes, rather than 32 bytes, would support
-payloads that are hashes for most hashing algorithms defined today – e.g.
-SHA-384, SHA-512 – which may support future cross-chain applications that choose
-to use ed25519 signatures of hashes.
+A payload with a maximum length of 64 bytes, rather than 32 bytes, would
+support payloads that are hashes for most hashing algorithms defined today –
+e.g. SHA-384, SHA-512 – which may support future cross-chain applications that
+choose to use ed25519 signatures of hashes.
## Protocol Upgrade Transition
@@ -338,8 +342,8 @@ The size of signatures, and therefore transactions, remain the same.
The effort to verify the signature is equivalent to the effort to verify an
ed25519 signature.
-The size of signers stored in the ledger will be 3x the size, at 96 bytes,
-for ed25519 signed payload signers compared to 32 bytes for all other signers.
+The size of signers stored in the ledger will be 3x the size, at 96 bytes, for
+ed25519 signed payload signers compared to 32 bytes for all other signers.
## Test Cases
@@ -350,15 +354,15 @@ None yet.
If a transaction requires the signature of a Signed Payload Signer and a signer
signs it without understanding the preimage of the payload, the signer could be
authorizing another Stellar transaction if the payload is a Stellar transaciton
-hash, or the signer could be authorizing some other operation on another network
-or application. People and applications should never sign a hash that they do
-not completely understand the consequences of signing, in all the contexts that
-their key provides authentication or authorization. People and applications
-should never sign a transaction they do not completely understand. There are
-many cases where signing a transaction not completely understood can pose a
-security concern for the signer because the signer could be authorizing
-something they would not approve of if they did understand the transaction. This
-proposal introduces a new case with the Signed Payload Signer.
+hash, or the signer could be authorizing some other operation on another
+network or application. People and applications should never sign a hash that
+they do not completely understand the consequences of signing, in all the
+contexts that their key provides authentication or authorization. People and
+applications should never sign a transaction they do not completely understand.
+There are many cases where signing a transaction not completely understood can
+pose a security concern for the signer because the signer could be authorizing
+something they would not approve of if they did understand the transaction.
+This proposal introduces a new case with the Signed Payload Signer.
## Implementation
diff --git a/core/cap-0041.md b/core/cap-0041.md
index 1f831d1df..467491d53 100644
--- a/core/cap-0041.md
+++ b/core/cap-0041.md
@@ -18,10 +18,11 @@ Protocol version: TBD
## Simple Summary
This proposal provides users with the capability to submit transactions to the
-Stellar network concurrently, without coordinating the sequence numbers of those
-transactions. This capability is limited to transactions that are valid for two
-ledgers only, intended for use in the most common payments and transacting use
-case where users are building, signing, and submitting transactions immediately.
+Stellar network concurrently, without coordinating the sequence numbers of
+those transactions. This capability is limited to transactions that are valid
+for two ledgers only, intended for use in the most common payments and
+transacting use case where users are building, signing, and submitting
+transactions immediately.
## Working Group
@@ -32,26 +33,28 @@ TBD
Users of the Stellar network must coordinate and navigate the sequence number
for their account when submitting more than one transaction at a time.
-Typically this involves throttling payments to be processed serially and risking
-one transaction in the chain being invalid preventing a subsequent transaction
-from executing.
+Typically this involves throttling payments to be processed serially and
+risking one transaction in the chain being invalid preventing a subsequent
+transaction from executing.
Users who need to transact concurrently, or who do not wish to risk failed
subsequent transactions, can create a pool of Stellar accounts that exist only
-to be the source accounts of transactions to provide sequence numbers. Users
+to be the source accounts of transactions to provide sequence numbers. Users
must create the pool of accounts, maintain their balances to cover transaction
fees, and operate a database or infrastructure supporting synchronized locking
of the accounts. An account is locked when selected for use with a transaction.
-An account is unlocked after the transaction is seen as confirmed in a ledger as
-successful or failed, or its time bounds have been exceeded by a closed ledger.
+An account is unlocked after the transaction is seen as confirmed in a ledger
+as successful or failed, or its time bounds have been exceeded by a closed
+ledger.
-These problems are very similar to the problems faced by users of credit network
-acquiring payment systems that do not allow concurrent payments on a single
-virtual terminal. This problem is one of the problems often abstracted from
-merchants of credit networks by payment gateways and payment service providers.
+These problems are very similar to the problems faced by users of credit
+network acquiring payment systems that do not allow concurrent payments on a
+single virtual terminal. This problem is one of the problems often abstracted
+from merchants of credit networks by payment gateways and payment service
+providers.
Modern software products perform tasks concurrently and integrate with APIs and
-external systems without coordinating or synchronizing on a resource. These
+external systems without coordinating or synchronizing on a resource. These
problems increase the complexity of integrating with the Stellar network, and
create an experience more alike to acquiring payment systems and not modern
concurrent systems.
@@ -61,19 +64,19 @@ concurrent systems.
This CAP is aligned with the following Stellar Network Goals:
- The Stellar Network should make it easy for developers of Stellar projects to
-create highly usable products.
+ create highly usable products.
-- The Stellar Network should run at scale and at low cost to all participants of
-the network.
+- The Stellar Network should run at scale and at low cost to all participants
+ of the network.
-- The Stellar Network should enable cross-border payments, i.e. payments via
-exchange of assets, throughout the globe, enabling users to make payments
-between assets in a manner that is fast, cheap, and highly usable.
+- The Stellar Network should enable cross-border payments, i.e. payments via
+ exchange of assets, throughout the globe, enabling users to make payments
+ between assets in a manner that is fast, cheap, and highly usable.
## Abstract
-This proposal allows a transaction to be valid if its `seqNum` is zero (`0`), if
-the transaction is valid for only two ledgers.
+This proposal allows a transaction to be valid if its `seqNum` is zero (`0`),
+if the transaction is valid for only two ledgers.
This proposal is dependent on the `ledgerBounds` transaction precondition
proposed in [CAP-21].
@@ -88,7 +91,7 @@ index 1a4e491a..4574a038 100644
--- a/src/xdr/Stellar-transaction.x
+++ b/src/xdr/Stellar-transaction.x
@@ -1508,7 +1508,10 @@ enum TransactionResultCode
-
+
txNOT_SUPPORTED = -12, // transaction type not supported
txFEE_BUMP_INNER_FAILED = -13, // fee bump inner transaction failed
- txBAD_SPONSORSHIP = -14 // sponsorship not confirmed
@@ -97,7 +100,7 @@ index 1a4e491a..4574a038 100644
+ txMISSING_MEMO = -15, // transaction is missing a memo and required without sequence number
+ txDUPLICATE = -16 // transaction has been included in a prior ledger
};
-
+
// InnerTransactionResult must be binary compatible with TransactionResult
```
@@ -106,14 +109,15 @@ index 1a4e491a..4574a038 100644
This proposal changes the values that are valid for the `seqNum` of a
`TransactionV1Envelope` to not only the next sequence number of its
-`sourceAccount`, but also zero (`0`), if its `ledgerBounds` is set to a non-zero
-value and limits the transaction to being valid only for two or less ledgers,
-and if it has a memo set.
+`sourceAccount`, but also zero (`0`), if its `ledgerBounds` is set to a
+non-zero value and limits the transaction to being valid only for two or less
+ledgers, and if it has a memo set.
A transaction submitted will be valid only if had a memo set and, for a next
ledger `n`:
+
- `ledgerBounds` `minLedger` set to `n-1`, `maxLedger` is set to `n`, and its
-hash was not included in `n-1`'s transaction set.
+ hash was not included in `n-1`'s transaction set.
- `ledgerBounds` `minLedger` set to `n`, and `maxLedger` set to `n` or `n+1`.
A transaction submitted with a `seqNum` of zero but whose memo is not set, is
@@ -125,22 +129,23 @@ A transaction submitted with a `seqNum` of zero that does not satisfy the
if its `minLedger` is greater than the next ledger.
A transaction submitted with a `seqNum` of zero satisfies the `ledgerBounds`
-requirements, but whose hash is included in the last ledgers transaction set, is
-rejected with `TransactionResultCode` `txDUPLICATE`.
+requirements, but whose hash is included in the last ledgers transaction set,
+is rejected with `TransactionResultCode` `txDUPLICATE`.
This proposal introduces a new `TransactionResultCode` `txDUPLICATE` that is
used whenever a transaction is submitted a subsequent time after it has been
-included in a past ledger, and no other condition makes the transaction invalid.
+included in a past ledger, and no other condition makes the transaction
+invalid.
This proposal introduces a new `TransactionResultCode` `txMISSING_MEMO` that is
-used whenever a transaction is submitted a `seqNum` zero without a memo.
+used whenever a transaction is submitted a `seqNum` zero without a memo.
## Design Rationale
-Sequence numbers allow the protocol to guarantee that no replay of a transaction
-is ever possible for an account, forever, without a validator needing to
-remember all transactions that have been included in past ledgers. This reduces
-the storage and lookup costs for a validator.
+Sequence numbers allow the protocol to guarantee that no replay of a
+transaction is ever possible for an account, forever, without a validator
+needing to remember all transactions that have been included in past ledgers.
+This reduces the storage and lookup costs for a validator.
However, for the majority of transactions on the Stellar network sequence
numbers do not need to provide this guarantee forever. The majority of users of
@@ -153,12 +158,12 @@ behavior by application developers that they do not signal a need for most
transactions to be valid for more than a single ledger.
These qualities of the majority of use cases submitting transactions to the
-network indicate that the network does not need to prevent replay using sequence
-numbers forever.
+network indicate that the network does not need to prevent replay using
+sequence numbers forever.
-Validators can efficiently check that a transaction has not occurred in the last
-ledger with limited storage or memory requirements since the data set is limited
-to the transactions in a single ledger.
+Validators can efficiently check that a transaction has not occurred in the
+last ledger with limited storage or memory requirements since the data set is
+limited to the transactions in a single ledger.
### Sequence Number Zero
@@ -192,11 +197,11 @@ that happen to contain the same logical operations.
### Transaction Result Code Duplicate
The `TransactionResultCode` `txDUPLICATE` is introduced because other result
-codes semantics do not fit the case where the `ledgerBounds` are valid, there is
-no sequence number, but a transaction is invalid due to replay. When a duplicate
-transaction is submitted with the protocol today it will likely receive a
-`txBAD_SEQ` result code, however in this case the sequence number is zero or not
-set.
+codes semantics do not fit the case where the `ledgerBounds` are valid, there
+is no sequence number, but a transaction is invalid due to replay. When a
+duplicate transaction is submitted with the protocol today it will likely
+receive a `txBAD_SEQ` result code, however in this case the sequence number is
+zero or not set.
This result code will not be seen by most users because Horizon's transaction
submission system provides idempotency, identifying duplicate submissions and
@@ -206,8 +211,9 @@ returning the previous result.
### Backwards Incompatibilities
-This proposal is completely backwards compatible as it defines new functionality
-that is only accessible with transactions that are currently invalid.
+This proposal is completely backwards compatible as it defines new
+functionality that is only accessible with transactions that are currently
+invalid.
### Resource Utilization
@@ -222,9 +228,9 @@ operations. Therefore, the data set will be at most 1000 transactions, and will
consume at least 32KB if stored in memory, assuming transaction hashes are 32
bytes.
-This proposal requires validators to hold a list of all transactions hashes from
-the last ledger. Validators typically already store a list of the transactions
-from a number of recent ledgers and so no new storage is expected.
+This proposal requires validators to hold a list of all transactions hashes
+from the last ledger. Validators typically already store a list of the
+transactions from a number of recent ledgers and so no new storage is expected.
## Test Cases
diff --git a/core/cap-0042.md b/core/cap-0042.md
index 824f23f17..0232bbf2a 100644
--- a/core/cap-0042.md
+++ b/core/cap-0042.md
@@ -14,7 +14,8 @@ Protocol version: 20
## Simple Summary
-This CAP gives validators control of which fee policy to use for individual transactions included in a ledger.
+This CAP gives validators control of which fee policy to use for individual
+transactions included in a ledger.
## Working Group
@@ -22,36 +23,51 @@ TBD
## Motivation
-Right now, there is no mechanism that makes it possible to implement different fee structures based on transaction set composition; as a consequence, transaction sets can sometimes be heavily biased towards undesirable behaviors such as spikes of arbitrage trades that eventually fail.
+Right now, there is no mechanism that makes it possible to implement different
+fee structures based on transaction set composition; as a consequence,
+transaction sets can sometimes be heavily biased towards undesirable behaviors
+such as spikes of arbitrage trades that eventually fail.
-This CAP aims to correct this by isolating subsets of transactions that can compete on fees without causing overall fees to increase. At the same time, it makes it possible to expand policies to other dimensions in the future.
+This CAP aims to correct this by isolating subsets of transactions that can
+compete on fees without causing overall fees to increase. At the same time, it
+makes it possible to expand policies to other dimensions in the future.
### Goals Alignment
-The Stellar network aims to give equitable access to the global financial infrastructure, in particular the network should not benefit a minority of participants at the expense of others.
+The Stellar network aims to give equitable access to the global financial
+infrastructure, in particular the network should not benefit a minority of
+participants at the expense of others.
This CAP aims at restoring balance between use cases on the network.
## Abstract
-Right now transaction fees are computed for a given ledger based on [CAP-0005 Throttling and transaction pricing improvements](CAP-0005.md).
+Right now transaction fees are computed for a given ledger based on
+[CAP-0005 Throttling and transaction pricing improvements](CAP-0005.md).
CAP-005 defines:
-* How a transaction set (to be applied for the next ledger) gets constructed from an arbitrary number of candidate transactions as to enforce the policy that puts a bound on the number of operations present in any given ledger.
-* The base fee to use when applying a transaction set
+
+- How a transaction set (to be applied for the next ledger) gets constructed
+ from an arbitrary number of candidate transactions as to enforce the policy
+ that puts a bound on the number of operations present in any given ledger.
+- The base fee to use when applying a transaction set
This CAP generalizes the above in the following way:
-* It allows to subdivide transaction sets into components.
-* Components are sets of transactions with their own fee policy.
-* Validators get to select how to decompose and assign policies.
-Note that this CAP does not modify how transactions gets applied (ie: the apply order) other than fee computation.
+- It allows to subdivide transaction sets into components.
+- Components are sets of transactions with their own fee policy.
+- Validators get to select how to decompose and assign policies.
+
+Note that this CAP does not modify how transactions gets applied (ie: the apply
+order) other than fee computation.
## Specification
### XDR Changes
-This patch of XDR changes is based on the XDR files in commit (`b9501ae3288a5879d457841168cb4b249691cb43`) of stellar-core.
+This patch of XDR changes is based on the XDR files in commit
+(`b9501ae3288a5879d457841168cb4b249691cb43`) of stellar-core.
+
```diff mddiffcheck.base=b9501ae3288a5879d457841168cb4b249691cb43
---
src/protocol-curr/xdr/Stellar-ledger.x | 60 +++++++++++++++++++++++++-
@@ -64,7 +80,7 @@ index 84b84cbf..9c2cbcee 100644
@@ -176,6 +176,29 @@ case METAENTRY:
BucketMetadata metaEntry;
};
-
+
+enum TxSetComponentType
+{
+ // txs with effective fee <= bid derived from a base fee (if any).
@@ -94,7 +110,7 @@ index 84b84cbf..9c2cbcee 100644
@@ -184,6 +207,19 @@ struct TransactionSet
TransactionEnvelope txs<>;
};
-
+
+struct TransactionSetV1
+{
+ Hash previousLedgerHash;
@@ -114,7 +130,7 @@ index 84b84cbf..9c2cbcee 100644
@@ -203,11 +239,13 @@ struct TransactionHistoryEntry
uint32 ledgerSeq;
TransactionSet txSet;
-
+
- // reserved for future use
+ // when v != 0, txSet must be empty
union switch (int v)
@@ -129,7 +145,7 @@ index 84b84cbf..9c2cbcee 100644
@@ -358,9 +396,29 @@ struct LedgerCloseMetaV0
SCPHistoryEntry scpInfo<>;
};
-
+
+struct LedgerCloseMetaV1
+{
+ LedgerHeaderHistoryEntry ledgerHeader;
@@ -162,140 +178,229 @@ index 84b84cbf..9c2cbcee 100644
#### Consensus value
-SCP messages and ledger header reference transaction sets by hash. Consequently, the preimage supplied can transparently be migrated to `GeneralizedTransactionSet` when voting for a transaction set that can be applied using the version of the protocol with this CAP.
-
-The only place where it is not possible to transparently migrate to `GeneralizedTransactionSet` is in the `TransactionHistoryEntry` that are used in history archives where the `txSet` field is being deprecated instead.
-This allows to publish the actual hash preimage used during consensus rounds.
-
-In summary: if `lcl.ledgerHeader.version` is less than `P` (version this CAP takes effect), the preimage must be a `TransactionSet`, otherwise it must be a `GeneralizedTransactionSet`.
+SCP messages and ledger header reference transaction sets by hash.
+Consequently, the preimage supplied can transparently be migrated to
+`GeneralizedTransactionSet` when voting for a transaction set that can be
+applied using the version of the protocol with this CAP.
+The only place where it is not possible to transparently migrate to
+`GeneralizedTransactionSet` is in the `TransactionHistoryEntry` that are used
+in history archives where the `txSet` field is being deprecated instead. This
+allows to publish the actual hash preimage used during consensus rounds.
+In summary: if `lcl.ledgerHeader.version` is less than `P` (version this CAP
+takes effect), the preimage must be a `TransactionSet`, otherwise it must be a
+`GeneralizedTransactionSet`.
#### Value Validation
A `v1TxSet` is validated using the following rules:
-* `previousLedgerHash` is equal to the hash of the previous ledger.
-* `phases` contains exactly one element (this is an extension point).
-* `v0Components`
- * contains at least one transaction (hence empty `v1TxSet` can *only* be represented by a single phase with no components)
- * transactions from different components do not overlap
- * the union of all transactions together forms a valid transaction set that can be applied to a legder (transactions are valid, accounts can pay for fees, transactions sequence numbers form valid source account chains).
- * fee bids for any transaction satisfy the "minimum fee requirement", i.e. its fee bid is greater or equal to the minimum fee derived from `ledgerHeader.baseFee`
- * all the `baseFee` values in different `TXSET_COMP_TXS_MAYBE_DISCOUNTED_FEE` components are unique
- * `TXSET_COMP_TXS_MAYBE_DISCOUNTED_FEE` with non-empty `baseFee`:
- * `baseFee >= ledgerHeader.baseFee` (a corollary from the global minimum fee bid requirement).
- * each transaction in this component has a fee bid greater than or equal to the minimum fee bid derived from `baseFee` (for a regular transaction that would translate to `baseFee * numberOfTransactionOperations <= bidFee`)
+
+- `previousLedgerHash` is equal to the hash of the previous ledger.
+- `phases` contains exactly one element (this is an extension point).
+- `v0Components`
+ - contains at least one transaction (hence empty `v1TxSet` can _only_ be
+ represented by a single phase with no components)
+ - transactions from different components do not overlap
+ - the union of all transactions together forms a valid transaction set that
+ can be applied to a legder (transactions are valid, accounts can pay for
+ fees, transactions sequence numbers form valid source account chains).
+ - fee bids for any transaction satisfy the "minimum fee requirement", i.e.
+ its fee bid is greater or equal to the minimum fee derived from
+ `ledgerHeader.baseFee`
+ - all the `baseFee` values in different `TXSET_COMP_TXS_MAYBE_DISCOUNTED_FEE`
+ components are unique
+ - `TXSET_COMP_TXS_MAYBE_DISCOUNTED_FEE` with non-empty `baseFee`:
+ - `baseFee >= ledgerHeader.baseFee` (a corollary from the global minimum
+ fee bid requirement).
+ - each transaction in this component has a fee bid greater than or equal to
+ the minimum fee bid derived from `baseFee` (for a regular transaction
+ that would translate to
+ `baseFee * numberOfTransactionOperations <= bidFee`)
#### Effective fee computation
The effective fee for a given transaction is computed in the following way:
- * If the transaction is in a `TXSET_COMP_TXS_MAYBE_DISCOUNTED_FEE` component and `baseFee` is empty, its effective fee is its fee bid.
- * If the transaction is in a `TXSET_COMP_TXS_MAYBE_DISCOUNTED_FEE` component and `baseFee` is set, its effective fee is derived from a base fee equal to that component's `baseFee`.
+- If the transaction is in a `TXSET_COMP_TXS_MAYBE_DISCOUNTED_FEE` component
+ and `baseFee` is empty, its effective fee is its fee bid.
+- If the transaction is in a `TXSET_COMP_TXS_MAYBE_DISCOUNTED_FEE` component
+ and `baseFee` is set, its effective fee is derived from a base fee equal to
+ that component's `baseFee`.
#### Candidate value generation
-Just like today, this CAP does not specify the exact algorithm used to produce a valid value as to make it easier for implementations to compete on the quality of transaction sets.
+Just like today, this CAP does not specify the exact algorithm used to produce
+a valid value as to make it easier for implementations to compete on the
+quality of transaction sets.
Here are a few example strategies that could be employed:
-* Do not give any fee discount to transactions containing "high traffic operations" (validators may keep a tally of recent operations to decide when this makes sense).
-* Limit the number of operations in a ledger that interact with the DEX (ie: any operation that could cross an offer).
-* Reserve some ledger capacity for transactions that are simple payment.
-* Define a few "high priority lanes" that can be easily identified, and one "catch all" lane limited in capacity for unknown transactions.
+
+- Do not give any fee discount to transactions containing "high traffic
+ operations" (validators may keep a tally of recent operations to decide when
+ this makes sense).
+- Limit the number of operations in a ledger that interact with the DEX (ie:
+ any operation that could cross an offer).
+- Reserve some ledger capacity for transactions that are simple payment.
+- Define a few "high priority lanes" that can be easily identified, and one
+ "catch all" lane limited in capacity for unknown transactions.
#### Nomination value comparison
-The order used by the nomination protocol's combine candidate function needs to be modified to support `GeneralizedTransactionSet`.
+The order used by the nomination protocol's combine candidate function needs to
+be modified to support `GeneralizedTransactionSet`.
Values are sorted in lexicographic order by:
-* number of operations (as to favor network throughput) _unchanged_,
-* sum of all fee bids (as to maximize quality of transaction sets) _new_,
-* total fee to collect (as to make sure that bids are actually meaningful; without this sets with constant minimal base fees ignoring any surge pricng could be preferred) _unchanged_,
-* size in bytes of the XDR encoded `GeneralizedTransactionSet` _in decreasing order_ (as to reduce IO resource utilization) _new_,
-* hash as a tie-breaker _unchanged_
+
+- number of operations (as to favor network throughput) _unchanged_,
+- sum of all fee bids (as to maximize quality of transaction sets) _new_,
+- total fee to collect (as to make sure that bids are actually meaningful;
+ without this sets with constant minimal base fees ignoring any surge pricng
+ could be preferred) _unchanged_,
+- size in bytes of the XDR encoded `GeneralizedTransactionSet` _in decreasing
+ order_ (as to reduce IO resource utilization) _new_,
+- hash as a tie-breaker _unchanged_
#### Implications on transaction flooding strategy
Not technically part of the protocol but described here for completeness.
-Changes done at the transaction queue and flooding level should align as much as possible with what happens during [Candidate Value Generation](#candidate-value-generation) as to limit the number of transactions that could get delayed or dropped by nomination later on:
-* limit the number of transactions in memory as to also take advantage of any potential additional limit on certain types of operations.
-* flood higher fee transactions first from the accumulated set.
+Changes done at the transaction queue and flooding level should align as much
+as possible with what happens during
+[Candidate Value Generation](#candidate-value-generation) as to limit the
+number of transactions that could get delayed or dropped by nomination later
+on:
+
+- limit the number of transactions in memory as to also take advantage of any
+ potential additional limit on certain types of operations.
+- flood higher fee transactions first from the accumulated set.
#### `LedgerCloseMeta` update
-The update in `LedgerCloseMeta` will happen together with the switch to the new protocol. This way we don't need to maintain backwards compatibility with legacy `TransactionSet` in `LedgerCloseMetaV1`.
+The update in `LedgerCloseMeta` will happen together with the switch to the new
+protocol. This way we don't need to maintain backwards compatibility with
+legacy `TransactionSet` in `LedgerCloseMetaV1`.
## Design Rationale
-The proposal tries to limit to a minimum the number of things actually enforced at the protocol layer, leaving the door open to more flexibility at the node level.
-
-In turn, this flexibility should allow for faster iteration without having to take a dependency on network wide protocol changes.
+The proposal tries to limit to a minimum the number of things actually enforced
+at the protocol layer, leaving the door open to more flexibility at the node
+level.
-There is no enforcement at the protocol layer on which of fee regimes (discounted vs direct fee) is picked for a given transaction.
+In turn, this flexibility should allow for faster iteration without having to
+take a dependency on network wide protocol changes.
-This changes the contract from a client point of view only slightly:
-the contract between the user and the network is still that the network should try to reduce fees when it can.
+There is no enforcement at the protocol layer on which of fee regimes
+(discounted vs direct fee) is picked for a given transaction.
-The difference is that with this CAP, the user cannot rely on the previous requirement that for "surge pricing" to be active, the ledger had to be at capacity (as per CAP-0005, the fee bid for the entire transaction set was derived from the cheapest transaction included in a full ledger), which limited the risk of paying extremely large fee bids greatly.
-As a consequence, with this CAP, a single transaction can be isolated by a validator and be forced to pay for its fee bid.
+This changes the contract from a client point of view only slightly: the
+contract between the user and the network is still that the network should try
+to reduce fees when it can.
-With the introduction of "fee bump", the need to specify fee bids much higher than recent ledgers was removed even for pre-signed transactions.
+The difference is that with this CAP, the user cannot rely on the previous
+requirement that for "surge pricing" to be active, the ledger had to be at
+capacity (as per CAP-0005, the fee bid for the entire transaction set was
+derived from the cheapest transaction included in a full ledger), which limited
+the risk of paying extremely large fee bids greatly. As a consequence, with
+this CAP, a single transaction can be isolated by a validator and be forced to
+pay for its fee bid.
+With the introduction of "fee bump", the need to specify fee bids much higher
+than recent ledgers was removed even for pre-signed transactions.
## Protocol Upgrade Transition
-As soon as this CAP becomes active, validators will produce `TransactionSetV1` in SCP.
+As soon as this CAP becomes active, validators will produce `TransactionSetV1`
+in SCP.
### Backwards Incompatibilities
-As this CAP does not change transaction semantics, impact on the ecosystem should be minimal.
+As this CAP does not change transaction semantics, impact on the ecosystem
+should be minimal.
-Systems that try to compute transaction fees that are close to market rate will have to be adjusted (by possibly interacting more with a local stellar-core instance) as "ledger capacity" would only be losely related to "local surge pricing".
+Systems that try to compute transaction fees that are close to market rate will
+have to be adjusted (by possibly interacting more with a local stellar-core
+instance) as "ledger capacity" would only be losely related to "local surge
+pricing".
-As users are potentially exposed to higher fees, their bidding strategy can be updated to bid what they're comfortable paying at the time _not counting on any discount_, and gradually increase their bid if their transaction is rejected based on urgency.
+As users are potentially exposed to higher fees, their bidding strategy can be
+updated to bid what they're comfortable paying at the time _not counting on any
+discount_, and gradually increase their bid if their transaction is rejected
+based on urgency.
-The exact strategy for picking the starting bid and increasing transaction bid is outside the scope of this document.
-A possible approach for picking a good starting bid can be to use the fee stats endpoint, adjust it if rejected by core's tx endpoint (that rejects transactions based on fee bid and its local transaction queue state), and after that multiply its fee bid by 10 or 100 after each timeout.
+The exact strategy for picking the starting bid and increasing transaction bid
+is outside the scope of this document. A possible approach for picking a good
+starting bid can be to use the fee stats endpoint, adjust it if rejected by
+core's tx endpoint (that rejects transactions based on fee bid and its local
+transaction queue state), and after that multiply its fee bid by 10 or 100
+after each timeout.
-As transaction sets are transmitted over the network, the overlay network will have to be updated to support the new format.
+As transaction sets are transmitted over the network, the overlay network will
+have to be updated to support the new format.
#### Surge pricing behavior change
-While this CAP doesn't define the candidate transaction set generation strategy, the initial approach would be to just continue using surge pricing approach described in [CAP-0005](cap-0005.md). However, using it requires slightly changing the algorithm: instead of rounding up during the [base fee computation](cap-0005.md#computation-of-the-associated-effective-base-fee) we need to round it down. The reason for doing that is to satisfy the `baseFee` validation requirement of `TXSET_COMP_TXS_MAYBE_DISCOUNTED_FEE` component. With rounding up it's possible to have a valid transaction that doesn't satisfy that requirement. The impact of this change should be minimal.
+While this CAP doesn't define the candidate transaction set generation
+strategy, the initial approach would be to just continue using surge pricing
+approach described in [CAP-0005](cap-0005.md). However, using it requires
+slightly changing the algorithm: instead of rounding up during the
+[base fee computation](cap-0005.md#computation-of-the-associated-effective-base-fee)
+we need to round it down. The reason for doing that is to satisfy the `baseFee`
+validation requirement of `TXSET_COMP_TXS_MAYBE_DISCOUNTED_FEE` component. With
+rounding up it's possible to have a valid transaction that doesn't satisfy that
+requirement. The impact of this change should be minimal.
### Resource Utilization
-This change will require a small increase in CPU and memory utilization which should easily be recovered by the expected decrease in failed transactions propagating on the network during spikes.
+This change will require a small increase in CPU and memory utilization which
+should easily be recovered by the expected decrease in failed transactions
+propagating on the network during spikes.
## Security Concerns
-The changes on the security front are minimal as transaction semantics are not changed.
+The changes on the security front are minimal as transaction semantics are not
+changed.
The biggest change comes from validators having more control over fee policy.
This should not be a problem in practice:
-* transactions will never be charged more than their maximum bid, and people tend to put small multipliers on top of market rate (fees remain low).
-* as validators do not receive fees from transactions processing, there is little incentive for a validator to do this.
-* if certain validators adopt unfair fee policies
- * they will accrue negative reputation on the network. All activity from validators is recorded and archived, also thanks to CAP-0034, the validator that introduced the GeneralizedTransactionSet to the network is recorded.
- * thanks to SCP may cause other network participants to adjust their quorum configurations which could result from those validators to lose trust from the network.
- * standards around fee policies can be established outside of the protocol on expectations around well behaved validators to help with auditing.
-
+
+- transactions will never be charged more than their maximum bid, and people
+ tend to put small multipliers on top of market rate (fees remain low).
+- as validators do not receive fees from transactions processing, there is
+ little incentive for a validator to do this.
+- if certain validators adopt unfair fee policies
+ - they will accrue negative reputation on the network. All activity from
+ validators is recorded and archived, also thanks to CAP-0034, the validator
+ that introduced the GeneralizedTransactionSet to the network is recorded.
+ - thanks to SCP may cause other network participants to adjust their quorum
+ configurations which could result from those validators to lose trust from
+ the network.
+ - standards around fee policies can be established outside of the protocol on
+ expectations around well behaved validators to help with auditing.
## Future work
-The `TransactionPhase` union is an extension point to make it easy to add more "phases" to apply transactions.
+The `TransactionPhase` union is an extension point to make it easy to add more
+"phases" to apply transactions.
-As of this CAP, there is only one such phase: the existing transaction subsystem, and we expect new ones to be introduced over time, such as "Speedex transactions" or "smart contract transactions".
+As of this CAP, there is only one such phase: the existing transaction
+subsystem, and we expect new ones to be introduced over time, such as "Speedex
+transactions" or "smart contract transactions".
This CAP also leaves the door open to different types of components and phases.
Here are a few examples:
-* fee groups can be used to prioritize based on dimensions other than the order book as to rebalance use cases, for example based on the expected number of changes to the ledger.
-* phases could be used to alter the "apply order" policy: phases could be applied in the order they appear, or allow for parallel execution.
-* components could be added to add random numbers/seeds/parameters to update certain ledger entries (like oracles)
-* components could be added to provide information related to network partition/shard (nominators could flood differently based on that)
+
+- fee groups can be used to prioritize based on dimensions other than the order
+ book as to rebalance use cases, for example based on the expected number of
+ changes to the ledger.
+- phases could be used to alter the "apply order" policy: phases could be
+ applied in the order they appear, or allow for parallel execution.
+- components could be added to add random numbers/seeds/parameters to update
+ certain ledger entries (like oracles)
+- components could be added to provide information related to network
+ partition/shard (nominators could flood differently based on that)
## Test Cases
diff --git a/core/cap-0043.md b/core/cap-0043.md
index 1709e6722..07d4b17f3 100644
--- a/core/cap-0043.md
+++ b/core/cap-0043.md
@@ -25,31 +25,32 @@ TBD
## Motivation
This proposal has two motivatons:
-1. The common requirement in financial institutions to use security modules that
-are FIPS approved.
+
+1. The common requirement in financial institutions to use security modules
+ that are FIPS approved.
2. The inability that Stellar account holders have today to use cloud hosted
-HSMs (Hardware Security Modules) to store account keys.
+ HSMs (Hardware Security Modules) to store account keys.
Financial institutions typically require the cryptographic algorithms and
modules they use to appear in approved lists of the latest relevant FIPS
standard, at this time FIPS 140-3 and FIPS 140-2 and their relevant annexes and
-related standards. The US and Canadian governments require FIPS for systems they
-run, operate, or buy. This makes FIPS relevant to corporations that work with
-governments.
+related standards. The US and Canadian governments require FIPS for systems
+they run, operate, or buy. This makes FIPS relevant to corporations that work
+with governments.
Cloud hosted HSMs have made HSMs accessible and they have become popular for
generating and storing keys securely. Cloud HSMs typically support a small
subset of FIPS approved algorithm implementations along with ECDSA secp256k1
which isn’t FIPS approved, but popular due to Bitcoin’s and Ethereum's use and
-broad use in other blockchains. See [Appendix: Cloud Hosted
-HSMs](#appendix-cloud-hosted-hsms).
+broad use in other blockchains. See
+[Appendix: Cloud Hosted HSMs](#appendix-cloud-hosted-hsms).
Stellar supports a single assymetric key type and signing algorithm for
controlling accounts, ed255519. Ed25519 is not included in approved lists of
FIPS 140-2, and there are no known HSMs supporting ed25519 certified as FIPS
compliant. While ed25519 is mentioned in drafts of FIPS 186-5 there is no
-evidence that FIPS approved security modules will arise in the immediate future.
-Ed25519 is not supported by any cloud hosted HSMs today.
+evidence that FIPS approved security modules will arise in the immediate
+future. Ed25519 is not supported by any cloud hosted HSMs today.
There may be other benefits to supporting secp256k1 signing keys for
compatibility with some other blockchains in certain cross-chain protocols, but
@@ -63,7 +64,8 @@ support the use of off-the-shelf security products.
## Abstract
The proposal adds ECDSA keys as an option to use as signers of accounts. The
-proposal adds includes support for the NIST P-256 curve and the secp256k1 curve.
+proposal adds includes support for the NIST P-256 curve and the secp256k1
+curve.
This proposal makes it possible for a Stellar account holder to store their
account key in FIPS certified modules, and/or in cloud hosted HSMs.
@@ -77,7 +79,9 @@ utilize a HSM.
### XDR Changes
-This patch of XDR changes is based on the XDR files in commit (`394b9413180969e2035e19742194d9c04c5bf5d9`) of stellar-core.
+This patch of XDR changes is based on the XDR files in commit
+(`394b9413180969e2035e19742194d9c04c5bf5d9`) of stellar-core.
+
```diff mddiffcheck.base=394b9413180969e2035e19742194d9c04c5bf5d9
diff --git a/src/xdr/Stellar-types.x b/src/xdr/Stellar-types.x
index 8f7d5c20..9394ada7 100644
@@ -102,7 +106,7 @@ index 8f7d5c20..9394ada7 100644
+ SIGNER_KEY_TYPE_ECDSA_P256 = KEY_TYPE_ECDSA_P256,
+ SIGNER_KEY_TYPE_ECDSA_SECP256K1 = KEY_TYPE_ECDSA_SECP256K1
};
-
+
union PublicKey switch (PublicKeyType type)
@@ -52,6 +57,16 @@ case SIGNER_KEY_TYPE_PRE_AUTH_TX:
case SIGNER_KEY_TYPE_HASH_X:
@@ -119,7 +123,7 @@ index 8f7d5c20..9394ada7 100644
+ uint256 y;
+ } ecdsaSecp256k1;
};
-
+
// variable size as the size depends on the signature scheme used
@@ -80,4 +95,16 @@ struct HmacSha256Mac
{
@@ -171,9 +175,10 @@ the parts of the protocol that are required to enable signing keys.
The proposal does not add support for the new key types as account identifiers,
because that would be unnecessary. Any Stellar account holder who wishes to
-control a Stellar account with a new key type can do so by changing the signers.
-Also, a change to account identifiers would have a substantially larger
-downstream impact on Horizon, SDKs, wallets, and other ecosystem applications.
+control a Stellar account with a new key type can do so by changing the
+signers. Also, a change to account identifiers would have a substantially
+larger downstream impact on Horizon, SDKs, wallets, and other ecosystem
+applications.
The proposal does not add support for the new key types as node identifiers,
because that would be unnecessary for the motivation of the proposal. If a need
@@ -186,13 +191,13 @@ per key, because decompressing the key requires solving y^2 = x^3 - 3x + b and
finding the square-root of a value. This is a trade-off between ledger storage
space and the CPU time a validator must expend to verify a signature. It would
not be ideal for validators, as part of verifying signatures, to need to
-decompress the key each time. Compressed form would not reduce the size of ECDSA
-signatures, and so would have no impact on the largest dimension of scale that
-the network has, transactions. The choice to use uncompressed form in the XDR
-does not limit whether the strkey definition uses compressed form, and so the
-use of uncompressed form in the protocol does not impact the UX. See [Appendix:
-Compressed vs Uncompressed
-Benchmark](#appendix-compressed-vs-uncompressed-benchmark) for an example.
+decompress the key each time. Compressed form would not reduce the size of
+ECDSA signatures, and so would have no impact on the largest dimension of scale
+that the network has, transactions. The choice to use uncompressed form in the
+XDR does not limit whether the strkey definition uses compressed form, and so
+the use of uncompressed form in the protocol does not impact the UX. See
+[Appendix: Compressed vs Uncompressed Benchmark](#appendix-compressed-vs-uncompressed-benchmark)
+for an example.
The raw r and s points that form the ECDSA signature are stored as is, as this
is the most raw and convenient format. Some standard libraries may prefer to
@@ -207,20 +212,20 @@ curves are added in the future their points lengths may be different.
The names for the new signer types, `SIGNER_KEY_TYPE_ECDSA_P256` and
`SIGNER_KEY_TYPE_ECDSA_SECP256K1`, were selected to lean on the most common
-terms used to refer to the algorithm and curve types. The most consistent naming
-scheme would be to name the former `_ECDSA_SECP256R1`, but that name would
-probably not be easily recognized by many who encounter it.
+terms used to refer to the algorithm and curve types. The most consistent
+naming scheme would be to name the former `_ECDSA_SECP256R1`, but that name
+would probably not be easily recognized by many who encounter it.
## Protocol Upgrade Transition
As soon as this CAP becomes active, Stellar accounts may have the new signer
types. Any application, such as Horizon, that performs analysis on the signers
of accounts may encounter the new types. Any application that performs analysis
-on transactions signed by the new types, or transactions that use a SetOptionsOp
-with the new types, may encounter the new types.
+on transactions signed by the new types, or transactions that use a
+SetOptionsOp with the new types, may encounter the new types.
-An accompanying change will be required to [SEP-23 Strkeys] to add new algorithm
-discrimants.
+An accompanying change will be required to [SEP-23 Strkeys] to add new
+algorithm discrimants.
### Backwards Incompatibilities
@@ -232,19 +237,20 @@ This proposal will require Stellar validators to verify ECDSA signatures.
This proposal may introduce some change in performance characteristics due to
the CPU utilization differences between the ECDSA and EdDSA (ed25519)
-algorithms. Ed25519 has assembly optimized implementations in multiple langauges
-and is typically considered to be faster than ECDSA. These differences are
-difficult to comment on generally as it depends on CPU architecture and the
-implementation in use.
+algorithms. Ed25519 has assembly optimized implementations in multiple
+langauges and is typically considered to be faster than ECDSA. These
+differences are difficult to comment on generally as it depends on CPU
+architecture and the implementation in use.
The reference implementation of the Stellar protocol, stellar-core, uses
libsodium. Libsodium uses optimized vector instructions and 128-bit arithmetic
and is considered to be an optimal implementation for ed25519 on amd64/x86.
-Simiarily optimized versions of ECDSA in OpenSSL may be able to verify only 0.5x
-the signatures as libsodium.
+Simiarily optimized versions of ECDSA in OpenSSL may be able to verify only
+0.5x the signatures as libsodium.
Related material:
+
- https://monocypher.org/speed
- https://essay.utwente.nl/75354/1/DNSSEC%20curves.pdf
@@ -264,6 +270,7 @@ TBD
The following cloud hosted HSMs were reviewed when assessing the motivation of
the proposal.
+
- [Microsoft Key Vault](https://docs.microsoft.com/en-us/azure/key-vault/keys/about-keys)
- [Google Cloud Key Management Service](https://cloud.google.com/kms/docs/algorithms)
- [AWS Key Management Service](https://docs.aws.amazon.com/kms/latest/cryptographic-details/crypto-primitives.html)
diff --git a/core/cap-0044.md b/core/cap-0044.md
index e8f42bfc9..91aa6cf32 100644
--- a/core/cap-0044.md
+++ b/core/cap-0044.md
@@ -14,31 +14,36 @@ Protocol version: TBD
```
## Simple Summary
+
Provide validators the ability to configure SPEEDEX.
## Working Group
+
This proposal is based on an earlier draft written by Geoff Ramseyer, which
Nicolas Barry has also contributed to.
## Motivation
+
SPEEDEX requires two kinds of configuration: the set of assets comprising the
market, and the parameters used by the built-in solver. The set of assets must
-be configurable because the time to compute a solution increases with the number
-of assets, so we cannot compute a solution for all assets. The parameters must
-be configurable because different parameters may work better during different
-market regimes.
+be configurable because the time to compute a solution increases with the
+number of assets, so we cannot compute a solution for all assets. The
+parameters must be configurable because different parameters may work better
+during different market regimes.
### Goals Alignment
+
This proposal supports the development of SPEEDEX on Stellar, which in turn
supports the Stellar Network Goals
-- The Stellar Network should run at scale and at low cost to all participants of
-the network.
+- The Stellar Network should run at scale and at low cost to all participants
+ of the network.
- The Stellar Network should enable cross-border payments, i.e. payments via
-exchange of assets, throughout the globe, enabling users to make payments
-between assets in a manner that is fast, cheap, and highly usable.
+ exchange of assets, throughout the globe, enabling users to make payments
+ between assets in a manner that is fast, cheap, and highly usable.
## Abstract
+
This proposal introduces `SpeedexConfigurationEntry`, a new type of
`LedgerEntry`, that tracks the assets that can be traded in SPEEDEX and the
solver configuration for computing the prices.
@@ -46,6 +51,7 @@ solver configuration for computing the prices.
## Specification
### XDR changes
+
This patch of XDR changes is based on the XDR files in commit
(`a211531148f13ab38725bc176630793d657b7f88`) of stellar-core.
@@ -57,7 +63,7 @@ index c870fe09..cb91c48d 100644
@@ -473,6 +473,31 @@ struct LiquidityPoolEntry
body;
};
-
+
+enum SpeedexSolutionComparisonHeuristic
+{
+ PRICE_WEIGHTED_SQUARED_L2_NORM = 0
@@ -94,7 +100,7 @@ index c870fe09..cb91c48d 100644
+ SpeedexConfigurationEntry speedexConfiguration;
}
data;
-
+
@@ -557,6 +584,9 @@ case LIQUIDITY_POOL:
{
PoolID liquidityPoolID;
@@ -103,7 +109,7 @@ index c870fe09..cb91c48d 100644
+case SPEEDEX_CONFIGURATION:
+ void;
};
-
+
// list of all envelope types used in the application
diff --git a/src/xdr/Stellar-ledger.x b/src/xdr/Stellar-ledger.x
index 84b84cbf..141c0a70 100644
@@ -119,7 +125,7 @@ index 84b84cbf..141c0a70 100644
+ LEDGER_UPGRADE_REMOVE_SPEEDEX_ASSET = 7,
+ LEDGER_UPGRADE_SPEEDEX_TATONNEMENT_CONFIGURATION = 8
};
-
+
union LedgerUpgrade switch (LedgerUpgradeType type)
@@ -137,6 +140,12 @@ case LEDGER_UPGRADE_BASE_RESERVE:
uint32 newBaseReserve; // update baseReserve
@@ -132,12 +138,13 @@ index 84b84cbf..141c0a70 100644
+case LEDGER_UPGRADE_SPEEDEX_SOLVER_CONFIGURATION:
+ SpeedexSolverConfiguration speedexNewConfig; // update solver configuration
};
-
+
/* Entries used to define the bucket list */
```
### Semantics
+
During the protocol upgrade, a `SpeedexConfigurationEntry` will be initialized
with an empty list of assets and a reasonable default solver configuration. The
default solver configuration will be detailed when the actual parameters are
@@ -146,25 +153,29 @@ specified.
There is only ever one `SpeedexConfigurationEntry`.
### The set of assets
+
`SpeedexConfigurationEntry.asset` is an ordered list of assets, using the same
ordering as CAP-0038. When an asset is added to the set, it must be inserted in
the appropriate location. The set can be modified by the upgrades
`LEDGER_UPGRADE_SPEEDEX_ADD_ASSET` and `LEDGER_UPGRADE_SPEEDEX_REMOVE_ASSET`.
### The add/remove asset upgrades
+
The upgrade `LEDGER_UPGRADE_SPEEDEX_ADD_ASSET` is valid if the asset is valid,
-the asset is not a pool share, and the asset is not currently a SPEEDEX asset.
+the asset is not a pool share, and the asset is not currently a SPEEDEX asset.
The upgrade `LEDGER_UPGRADE_SPEEDEX_REMOVE_ASSET` is valid if the asset is
currently a SPEEDEX asset (which implies that the asset is valid, and that the
asset is not a pool share).
### The solver configuration upgrade
+
The upgrade `LEDGER_UPGRADE_SPEEDEX_SOLVER_CONFIGURATION` is valid if the
-configuration is valid. The validity conditions will be detailed when the actual
-parameters are specified.
+configuration is valid. The validity conditions will be detailed when the
+actual parameters are specified.
## Design Rationale
+
SPEEDEX has a number of control parameters determining how to run the solver
(current default solver is Tatonnement). Most of these parameters can take any
value in a reasonable range and produce a reasonably good result. However,
@@ -189,23 +200,24 @@ to handle smaller smoothness multipliers. As such, Stellar may want to adjust
the smoothness multiplier in response to network traffic.
### Alternative configuration mechanisms
+
A number of different approaches to SPEEDEX configuration were considered, in
addition to this proposal:
- validators propose the entire configuration, including the set of assets,
-during nomination
-- validators propose the solver configuration, but not the set of assets, during
-nomination
+ during nomination
+- validators propose the solver configuration, but not the set of assets,
+ during nomination
The first proposal would be the SPEEDEX equivalent of anarchy. The market would
-be different in every ledger, and people may not be able to trade when they want
-to depending on which node nominates.
+be different in every ledger, and people may not be able to trade when they
+want to depending on which node nominates.
-The second proposal is more palatable, but is still worse than this proposal. In
-order for SPEEDEX to be robust to the nomination of a pathological solver
+The second proposal is more palatable, but is still worse than this proposal.
+In order for SPEEDEX to be robust to the nomination of a pathological solver
configuration, it must run a backup computation with a more trustworthy
-configuration. But how should that configuration be chosen? It could be fixed or
-chosen via upgrades. If it is fixed, then it won't be robust to changes in
+configuration. But how should that configuration be chosen? It could be fixed
+or chosen via upgrades. If it is fixed, then it won't be robust to changes in
market regime. If it is chosen via upgrades, then it reduces to this proposal.
With this proposal, the solver configuration _is_ for the backup computation.
@@ -213,6 +225,7 @@ But instead of nominating a solver configuration, nodes nominate a complete set
of candidate prices (not detailed in this proposal).
### All SPEEDEX assets in a single ledger entry
+
This proposal stores all SPEEDEX assets in a single ledger entry. Initially, I
had favored a design where every SPEEDEX asset has its own ledger entry because
it keeps the ledger entry size bounded. But every node needs to know the full
@@ -222,16 +235,21 @@ necessitate loading all of the SPEEDEX asset ledger entries anyway.
## Protocol Upgrade Transition
### Backwards Incompatibilities
+
This proposal does not introduce any backwards incompatibilities.
### Resource Utilization
+
This proposal does not change resource utilization.
## Security Concerns
+
This proposal does not introduce any security concerns.
## Test Cases
+
None yet.
## Implementation
+
None yet.
diff --git a/core/cap-0045.md b/core/cap-0045.md
index e94d1d2ab..99ce72673 100644
--- a/core/cap-0045.md
+++ b/core/cap-0045.md
@@ -14,27 +14,37 @@ Protocol version: TBD
```
## Simple Summary
+
Describe how [SPEEDEX](https://arxiv.org/abs/2111.02719) prices will be
computed.
## Working Group
+
This proposal is based on an earlier draft written by Geoff Ramseyer, to which
Nicolas Barry has also contributed.
## Motivation
-In order for SPEEDEX to facilitate efficient and commutative trades over a set of assets across multiple market participants (orders, liquidity pools), a set of prices (one for each asset) need to be computed. The prices are chosen to minimize a heuristic objective function which ensures equilibrium between demands and endowments over the entire asset set. This CAP outlines the methodology by which the objective is computed and prices are computed.
+
+In order for SPEEDEX to facilitate efficient and commutative trades over a set
+of assets across multiple market participants (orders, liquidity pools), a set
+of prices (one for each asset) need to be computed. The prices are chosen to
+minimize a heuristic objective function which ensures equilibrium between
+demands and endowments over the entire asset set. This CAP outlines the
+methodology by which the objective is computed and prices are computed.
### Goals Alignment
+
This proposal supports the development of SPEEDEX on Stellar, which in turn
supports the Stellar Network Goals
-- The Stellar Network should run at scale and at low cost to all participants of
-the network.
+- The Stellar Network should run at scale and at low cost to all participants
+ of the network.
- The Stellar Network should enable cross-border payments, i.e. payments via
-exchange of assets, throughout the globe, enabling users to make payments
-between assets in a manner that is fast, cheap, and highly usable.
+ exchange of assets, throughout the globe, enabling users to make payments
+ between assets in a manner that is fast, cheap, and highly usable.
## Abstract
+
This proposal describes two methods by which SPEEDEX prices are computed, and a
method for choosing between these two methods. The first method takes advantage
of `GeneralizedTransactionSet` introduced in CAP-0042 to endow validators with
@@ -45,6 +55,7 @@ to run an iterative solver.
## Specification
### XDR changes
+
This patch of XDR changes is based on the XDR files in commit
(`90dbf13c483a4c126741a0e5ad0a222fb51ff299`) of stellar-core.
@@ -56,7 +67,7 @@ index 84b84cbf..ec66ffb0 100644
@@ -176,14 +176,35 @@ case METAENTRY:
BucketMetadata metaEntry;
};
-
+
-// Transaction sets are the unit used by SCP to decide on transitions
-// between ledgers
struct TransactionSet
@@ -64,7 +75,7 @@ index 84b84cbf..ec66ffb0 100644
Hash previousLedgerHash;
TransactionEnvelope txs<>;
};
-
+
+struct TransactionSetV1
+{
+ Hash previousLedgerHash;
@@ -94,7 +105,7 @@ index 84b84cbf..ec66ffb0 100644
@@ -203,11 +224,13 @@ struct TransactionHistoryEntry
uint32 ledgerSeq;
TransactionSet txSet;
-
+
- // reserved for future use
+ // when v != 0, txSet must be empty
union switch (int v)
@@ -109,7 +120,7 @@ index 84b84cbf..ec66ffb0 100644
@@ -358,9 +381,30 @@ struct LedgerCloseMetaV0
SCPHistoryEntry scpInfo<>;
};
-
+
+struct LedgerCloseMetaV1
+{
+ LedgerHeaderHistoryEntry ledgerHeader;
@@ -141,37 +152,41 @@ index 84b84cbf..ec66ffb0 100644
```
### Semantics
+
SPEEDEX pricing has two phases:
- The first phase takes place before consensus. Different validators may use
-different implementations, and these implementations are free to make use of
-any techniques including nondeterminism and parallelism.
+ different implementations, and these implementations are free to make use of
+ any techniques including nondeterminism and parallelism.
- The second phase takes place during transaction application. Every validator
-must implement the same semantics, and these semantics must be deterministic.
+ must implement the same semantics, and these semantics must be deterministic.
The prices ultimately used to settle trades will be either the prices from the
first phase, or the prices from the second phase. All nodes must use the same
heuristic to determine which prices are closer to market.
#### GeneralizedTransactionSet
+
This proposal takes advantage of `GeneralizedTransactionSet` introduced in
CAP-0042. Refer to that proposal to understand how `GeneralizedTransactionSet`
interacts with `LedgerHeader`.
#### The first phase
+
Because the first phase occurs before consensus, this proposal can only specify
what constitutes valid output. The `GeneralizedTransactionSet` is valid if
- `GeneralizedTransactionSet.v() == 1`
- `GeneralizedTransactionSet.v1TxSet().previousLedgerHash` is the hash of the
-previous ledger
+ previous ledger
- every transaction in `GeneralizedTransactionSet.v1TxSet().txs` is valid
- `GeneralizedTransactionSet.v1TxSet().prices.size() == 0` or
-`GeneralizedTransactionSet.v1TxSet().prices.size() == SpeedexConfigurationEntry.assets.size()`
+ `GeneralizedTransactionSet.v1TxSet().prices.size() == SpeedexConfigurationEntry.assets.size()`
- every price in `GeneralizedTransactionSet.v1TxSet().prices` is strictly
-positive
+ positive
#### The second phase
+
Because the second phase occurs during transaction application, this proposal
fully specifies the output.
@@ -184,9 +199,10 @@ which we will describe independently.
The first component of tatonnement is the demand aggregator, which determines
the total amount of an asset demanded by all market participants at given
prices. The demand aggregator sums the demand for every asset over liquidity
-pools and orders.
+pools and orders.
A liquidity pool holding assets `X` and `Y` has demand
+
```
X' = { (pX + Y) (1 - F) / (p * (2 - F)) : p < (1 - F) Y/X
{ (pX + Y) / (p * (2 - F)) : 1/p < (1 - F) X/Y
@@ -196,9 +212,12 @@ Y' = { (pX + Y) / (2 - F) : p < (1 - F) Y/X
{ (pX + Y) (1 - F) / (2 - F) : 1/p < (1 - F) X/Y
{ Y : otherwise
```
-where `p = p(X) / p(Y)` is the market price ratio. `F` is the fee ratio required by the liquidity pool.
+
+where `p = p(X) / p(Y)` is the market price ratio. `F` is the fee ratio
+required by the liquidity pool.
An offer selling `X` for `Y` at a minimum price ratio of `P` has demand
+
```
X' = { 0 : P / (1 - S) <= p
{ X - (p - P) X / (S * p) : P < p < P / (1 - S)
@@ -208,18 +227,22 @@ Y' = { pX : P / (1 - S) <= p
{ (p - P) X / S : P < p < P / (1 - S)
{ 0 : p <= P
```
+
where `p = p(X) / p(Y)` is the market price ratio, and
`S = 2^(-SpeedexConfigurationEntry.solverConfig.smoothness)`.
The second component of tatonnement is the heuristic objective function. A step
should only be taken if the step size is minimal or the objective function
-decreases. The heuristic function is the squared l2 norm of price-weighted excess demand of all assets, and is configurable via
-`SpeedexSolverConfiguration.SpeedexSolutionComparisonHeuristic`. The excess demand for an asset is defined as the sum of
-all demands from liquidity pools and orders for that asset minus the sum of all
-endowments from liquidity pools and orders for that asset.
+decreases. The heuristic function is the squared l2 norm of price-weighted
+excess demand of all assets, and is configurable via
+`SpeedexSolverConfiguration.SpeedexSolutionComparisonHeuristic`. The excess
+demand for an asset is defined as the sum of all demands from liquidity pools
+and orders for that asset minus the sum of all endowments from liquidity pools
+and orders for that asset.
The third component of tatonnement is the actual solver. The following is a
pseudocode implementation of the solver:
+
```
// Input:
// numAssets is the number of assets (uint32_t)
@@ -284,22 +307,28 @@ for i in 0..numIterations
// Reduce the step size so future steps will try to reduce the objective
invStepSize *= stepSizeFactor
```
-There are several places that this pseudocode can overflow, which is discussed in the [Possible places of overflow](#possible-places-of-overflow) section and future versions
-of this proposal will explicitly handle that.
+
+There are several places that this pseudocode can overflow, which is discussed
+in the [Possible places of overflow](#possible-places-of-overflow) section and
+future versions of this proposal will explicitly handle that.
#### Choosing between the prices
-If the first phase does not propose a price, i.e. `GeneralizedTransactionSet.v1TxSet().prices.size() == 0`,
-then the prices from the second phase will be used.
-Otherwise, the prices from the first phase and from the second phase will be evaluated
-using the heuristic objective function also used in tatonnement (see the second
-component of tatonnement above). If the heuristic values are different, then
-whichever prices produce a lower value will be used. If the heuristic values
-are the same, then the prices from the second phase will be used.
+If the first phase does not propose a price, i.e.
+`GeneralizedTransactionSet.v1TxSet().prices.size() == 0`, then the prices from
+the second phase will be used.
+
+Otherwise, the prices from the first phase and from the second phase will be
+evaluated using the heuristic objective function also used in tatonnement (see
+the second component of tatonnement above). If the heuristic values are
+different, then whichever prices produce a lower value will be used. If the
+heuristic values are the same, then the prices from the second phase will be
+used.
## Design Rationale
### Two-phase pricing
+
Computing prices during before and after consensus maximizes the flexibility of
the protocol. Before consensus, validators have a great deal of flexibility to
use parallelism, randomness, outside data sources like the price stream from an
@@ -313,30 +342,52 @@ could, for example, be generated by searching for a good configuration during
nomination or using outside data sources. But this is a very indirect approach;
why nominate the configuration when you can simply nominate the result. This
approach also doesn't avoid the need to perform tatonnement after validation,
-because a byzantine validator could nominate a bad configuration. Therefore this
-approach is also less performant than what is proposed in this CAP because every
-node must do tatonnement twice instead of once.
+because a byzantine validator could nominate a bad configuration. Therefore
+this approach is also less performant than what is proposed in this CAP because
+every node must do tatonnement twice instead of once.
### Empty set of prices is valid in the first phase
+
The first phase is optional because a validator that does not want to perform
-any calculations could always choose random prices. If we tolerate random prices,
-then we should acknowledge that no prices at all is probably just as useful.
+any calculations could always choose random prices. If we tolerate random
+prices, then we should acknowledge that no prices at all is probably just as
+useful.
### Demand from a liquidity pool
+
Consider a liquidity pool holding assets `X` and `Y` (we will also use `X` and
-`Y` to denote the reserves of those assets). The market price ratio between the two assets is defined as:
+`Y` to denote the reserves of those assets). The market price ratio between the
+two assets is defined as:
+
+$$ -->
-where and are prices of `X` and `Y` respectively, in arbitrary price unit. and are the infinitesimal quantities of `X` and `Y` that can be exchanged for each other.
+where
+
+and
+
+are prices of `X` and `Y` respectively, in arbitrary price unit.
-For a constant-product liquidity pool (the only type this proposal is concerned about), the constant-product invariant requires that any transaction within the pool must not reduce the the product of the two reserves (i.e. liquidity).
+
+
+and
+
+are the infinitesimal quantities of `X` and `Y` that can be exchanged for each
+other.
-The liquidity pool starts at equilibrium point `E`, with reserve amounts `X` and `Y` respectively.
+For a constant-product liquidity pool (the only type this proposal is concerned
+about), the constant-product invariant requires that any transaction within the
+pool must not reduce the the product of the two reserves (i.e. liquidity).
-Consider the scenario where the market decides on a lower price for `X` relative to `Y` than what is manifested in the current equilibrium `E`, the liquidity pool will buy `X` and sell `Y` (at the market rate) until a new equilibrium is established (call it `E’`) (see figure below).
+The liquidity pool starts at equilibrium point `E`, with reserve amounts `X`
+and `Y` respectively.
+
+Consider the scenario where the market decides on a lower price for `X`
+relative to `Y` than what is manifested in the current equilibrium `E`, the
+liquidity pool will buy `X` and sell `Y` (at the market rate) until a new
+equilibrium is established (call it `E’`) (see figure below).

@@ -344,18 +395,32 @@ From the constant-product invariant, the pool will buy X sell Y as long as :
-where and are infinitesimal amount of `X` and `Y` to trade, whose relative price ratio is set by the market. `F` the liquidity pool's fee ratio. Expanding the above equation, rearranging, and dividing both sides by :
+where
+
+and
+
+are infinitesimal amount of `X` and `Y` to trade, whose relative price ratio is
+set by the market. `F` the liquidity pool's fee ratio. Expanding the above
+equation, rearranging, and dividing both sides by
+
+:
-Taking and using the price definition, we get:
+Taking
+
+and using the price definition, we get:
Rearranging:
+
-In other words, as long as the external market price of `X` is less than or equal to the ratio, the liquidity pool will keep buying `X` and selling `Y` (thus reducing the ratio). The equilibrium is again reached when the inequality becomes equality:
+In other words, as long as the external market price of `X` is less than or
+equal to the ratio, the liquidity pool will keep buying `X` and selling `Y`
+(thus reducing the ratio). The equilibrium is again reached when the inequality
+becomes equality:
@@ -363,115 +428,185 @@ For an Arrow-Debreu market, the budget-constraint must also hold:
-
-Solving the budget constraint and marginal price constraint together, we get the demand for `X` and `Y` when
+Solving the budget constraint and marginal price constraint together, we get
+the demand for `X` and `Y` when
+
:
+
-Observe that in this case,
-
+Observe that in this case,
+
-and
+and
+
Thus, the liquidity pool, as expected, sells `Y` and purchases `X`.
-In the reverse scenario, when the market decides on a higher price of `X` relative to `Y`, the pool needs to sell `X` and purchase `Y` to reach new equilibrium `E'` (figure below).
+In the reverse scenario, when the market decides on a higher price of `X`
+relative to `Y`, the pool needs to sell `X` and purchase `Y` to reach new
+equilibrium `E'` (figure below).

-By the exact same notion, when , the demand for `X` and `Y` are:
+By the exact same notion, when
+,
+the demand for `X` and `Y` are:
+
-Observe in this case, and
+Observe in this case,
+
+and
+
Thus, the liquidity pool, as expected, sells `X` and purchases `Y`.
-When the price is in between, i.e. , no trade will happen and both reserves stay the same.
+When the price is in between, i.e.
+,
+no trade will happen and both reserves stay the same.
### Demand from an order
-Tatonnement does not handle discontinuities well. The demand from an order is
-intrinsically discontinuous, so a natural solution is to smooth the demand. This
-can be achieved by linearly interpolating the price-weighted demand between the case where the order does not execute and the case where the order does execute.
-
-It doesn't matter whether you linearly interpolate the asset sold or the asset bought. The two quantities are linearly related according to the budget
-constraint , so linear interpolation of one asset implies linear interpolation of the other.
-To fully illustrate the demand interpolation heuristics, consider an offer selling asset `X` (we also use `X` to denote the amount of asset `X` being offered) for asset `Y` at limit price . At any point, the budget must be conserved:
+Tatonnement does not handle discontinuities well. The demand from an order is
+intrinsically discontinuous, so a natural solution is to smooth the demand.
+This can be achieved by linearly interpolating the price-weighted demand
+between the case where the order does not execute and the case where the order
+does execute.
+
+It doesn't matter whether you linearly interpolate the asset sold or the asset
+bought. The two quantities are linearly related according to the budget
+constraint
+,
+so linear interpolation of one asset implies linear interpolation of the other.
+
+To fully illustrate the demand interpolation heuristics, consider an offer
+selling asset `X` (we also use `X` to denote the amount of asset `X` being
+offered) for asset `Y` at limit price
+.
+At any point, the budget must be conserved:
-Define , the price ratio between `X` and `Y` (`p` is also equivalent to the price of `X` when `Y` is the denominated asset), and ignore the fee component `F` (we do not collect fees from the orders), we get:
+Define
+,
+the price ratio between `X` and `Y` (`p` is also equivalent to the price of `X`
+when `Y` is the denominated asset), and ignore the fee component `F` (we do not
+collect fees from the orders), we get:
+
+$$ -->
-
The order cannot execute below its limit price. We designate some price above
-its limit price at which the order executes fully. The ratio between this
-price and the limit price is a function of the smoothness parameter .
-Specifically, .
-
-When , no trade will happen, demand for `Y` is `0`, and demand for `X` is the full amount of `X` valued at .
+its limit price at which the order executes fully. The ratio between this price
+
+and the limit price
+
+is a function of the smoothness parameter
+.
+Specifically,
+.
+
+When
+,
+no trade will happen, demand for `Y` is `0`, and demand for `X` is the full
+amount of `X` valued at
+.
+
+When
+,
+the entire amount `X` is traded for `Y`, thus demand for `Y` is
+
+
+
+,
+and demand for `X` is `0`.
+
+For
+,
+the demands are linearly interpolated from the end points (see figure below):
-When , the entire amount `X` is traded for `Y`, thus demand for `Y` is , and demand for `X` is `0`.
-
-For , the demands are linearly interpolated from the end points (see figure below):
+$$ -->

-Substituting and and , rearrange, we get when the demand for `Y` is:
+Substituting
+
+and
+
+and
+,
+rearrange, we get when
+
+the demand for `Y` is:
+
+$$ -->
The amount of `X` sold for `Y` is:
+
+$$ -->
-
And finally the demand for `X` is:
+
+$$ -->
### Possible places of overflow
-As mentioned before, there are multiple places in the tatonnement process where overflowing an `int64_t` can occur:
-- Endowment: the total endowment of an asset is calculated by summing over endowments from all orders and liquidity pools of that asset.
-- Demand from an offer and liquidity pool: both offer and liquidity pool demand formulas contain `p*X` terms, where `p` is the price (`int64_t`) and `X` is the offer amount (`int64_t`) or the liquidity pool reserve amount (`int64_t`).
-- `step`: calculating the price update step involves multiplying the price (`int64_t`) by the net demand (`int64_t`).
-- The objective heuristics: the objective takes the squared l2-norm of price weighted demand, which has upper bound of `2 * (64 + 64) + 32 = 288 bits`.
+
+As mentioned before, there are multiple places in the tatonnement process where
+overflowing an `int64_t` can occur:
+
+- Endowment: the total endowment of an asset is calculated by summing over
+ endowments from all orders and liquidity pools of that asset.
+- Demand from an offer and liquidity pool: both offer and liquidity pool demand
+ formulas contain `p*X` terms, where `p` is the price (`int64_t`) and `X` is
+ the offer amount (`int64_t`) or the liquidity pool reserve amount
+ (`int64_t`).
+- `step`: calculating the price update step involves multiplying the price
+ (`int64_t`) by the net demand (`int64_t`).
+- The objective heuristics: the objective takes the squared l2-norm of price
+ weighted demand, which has upper bound of `2 * (64 + 64) + 32 = 288 bits`.
## Protocol Upgrade Transition
### Backwards Incompatibilities
+
This proposal does not introduce any backwards incompatibilities.
### Resource Utilization
+
This proposal increases resource utilization. Validators nominating values will
need to compute prices prior to consensus. After consensus, all validators will
-need to use tatonnement to compute prices as well. The part before consensus can
-be scaled to the capacity of the validator, but the part after consensus will
-have to seek a balance between accuracy and the capacity of the weakest
+need to use tatonnement to compute prices as well. The part before consensus
+can be scaled to the capacity of the validator, but the part after consensus
+will have to seek a balance between accuracy and the capacity of the weakest
validators.
## Security Concerns
+
This proposal does not introduce any security concerns.
## Test Cases
+
None yet.
## Implementation
+
None yet.
diff --git a/core/cap-0046-01.md b/core/cap-0046-01.md
index 911cbe65b..6a7af96c2 100644
--- a/core/cap-0046-01.md
+++ b/core/cap-0046-01.md
@@ -15,17 +15,31 @@ Protocol version: 20
## Simple Summary
-This CAP specifies the lowest-level **code execution** and **data model** aspects of a WebAssembly-based (Wasm) "smart contract" system for the Stellar network, called Soroban. Wasm smart contract code runs as a **guest** inside of a virtual machine (VM) which is embedded in a **host** environment.
-
-Higher-level components of a smart contract system such as ledger entries, host objects and host functions, and transactions to manage and invoke contracts will be specified in additional CAPs. This CAP focuses only on the lowest-level components.
-
-No new operations or ledger entries are introduced in this CAP. Nothing observably changes in the protocol available to users. This CAP is best understood as a set of building blocks for later CAPs, introducing a vocabulary of concepts, data types and implementation components.
-
-The design in this CAP is derived from a working and much more complete prototype that includes much that is left out of this CAP. This CAP is being proposed separately to facilitate early discussion of the building blocks, and to help decompose the inevitably-large volume of interrelated changes required for a complete smart contract system into smaller, more understandable pieces.
+This CAP specifies the lowest-level **code execution** and **data model**
+aspects of a WebAssembly-based (Wasm) "smart contract" system for the Stellar
+network, called Soroban. Wasm smart contract code runs as a **guest** inside of
+a virtual machine (VM) which is embedded in a **host** environment.
+
+Higher-level components of a smart contract system such as ledger entries, host
+objects and host functions, and transactions to manage and invoke contracts
+will be specified in additional CAPs. This CAP focuses only on the lowest-level
+components.
+
+No new operations or ledger entries are introduced in this CAP. Nothing
+observably changes in the protocol available to users. This CAP is best
+understood as a set of building blocks for later CAPs, introducing a vocabulary
+of concepts, data types and implementation components.
+
+The design in this CAP is derived from a working and much more complete
+prototype that includes much that is left out of this CAP. This CAP is being
+proposed separately to facilitate early discussion of the building blocks, and
+to help decompose the inevitably-large volume of interrelated changes required
+for a complete smart contract system into smaller, more understandable pieces.
## Working Group
-This protocol change was authored by Graydon Hoare, with input from the consulted individuals mentioned at the top of this document.
+This protocol change was authored by Graydon Hoare, with input from the
+consulted individuals mentioned at the top of this document.
## Motivation and Goals Alignment
@@ -35,53 +49,105 @@ See the Soroban overview CAP.
### Primary requirements
-The primary requirement for any smart contract system is to enable, within certain parameters, arbitrary new functionality to be added to a blockchain's state-transition function _by users_. This can be further decomposed to two requirements of concern in this CAP:
-
- 1. Code: stellar-core's state transition function must be extended with some means of executing, within parameters, some form of user-provided Turing-complete instruction code. Preferably in a compact form that can be stored within the ledger.
-
- 2. Data: stellar-core's model of data -- comprising transaction input, output, persistent state and temporary working memory -- must be extended to include data of concern to smart contracts: their input, output, persistent state, and temporary working memory during execution. Any transformations between each of these sorts of data must be specified, even if partially delegated to contract logic.
-### Required parameters to mitigate risks
-
-While the primary requirements seem simple enough to meet -- "just add a VM" -- there are many risks associated with a naive implementation. Therefore subsequent requirements take the form of parameters that constrain implementations in order to mitigate risks, including:
+The primary requirement for any smart contract system is to enable, within
+certain parameters, arbitrary new functionality to be added to a blockchain's
+state-transition function _by users_. This can be further decomposed to two
+requirements of concern in this CAP:
- 3. Secure: Soroban should be secure against benign or malicious smart contract code as well as contract-code input that could imperil system availability, integrity, or confidentiality (in the few cases where secret data exists). In particular at the level of this CAP, the design should guard against:
- - The risk of resource exhaustion, leading to denial of service by validators.
- - The risk of VM escape, leading to arbitrary Byzantine failures on validators, including data corruption or unauthorized transactions.
- - The risk of side channels, allowing VM code to extract validator private keys or other secret data on validators.
- - The risk of unintended contract behaviour due to invocation with malicious input data.
- - The risk of unintended contract behaviour due to calls to or from malicious contracts.
+1. Code: stellar-core's state transition function must be extended with some
+ means of executing, within parameters, some form of user-provided
+ Turing-complete instruction code. Preferably in a compact form that can be
+ stored within the ledger.
- 4. Well-defined: Soroban should not compromise the network's bit-precise consensus or historical replay functions, and should have a well-defined and unambiguous semantics for any code or data added by users. Where possible this should be maintained by reference to existing, well-defined standards. In particular at the level of this CAP, the design should guard against:
- - The risk of underspecified or nondeterministic VM code.
- - The risk of underspecified or nondeterministic datatypes.
+2. Data: stellar-core's model of data -- comprising transaction input, output,
+ persistent state and temporary working memory -- must be extended to include
+ data of concern to smart contracts: their input, output, persistent state,
+ and temporary working memory during execution. Any transformations between
+ each of these sorts of data must be specified, even if partially delegated
+ to contract logic.
- 5. Performant: Soroban should not compromise the performance of the network, and should perform competitively with other smart contract systems. Users should not be subject to a significant performance penalty for using smart contracts instead of built-in transactions. In particular at the level of this CAP, the design should guard against:
- - The risk of needing to load, compile, instantiate or run a large amount of VM code per transaction. Contracts should be small.
- - The risk of contending on shared mutable data that may defeat parallel execution of transactions. Contracts should be isolated.
- - The risk of requiring smart contract developers to do extensive optimization to achieve acceptable performance.
-
- 6. Interoperable: Soroban will necessarily introduce _some_ new user-defined semantics which are by definition unknown to _some_ users and 3rd parties. But beyond such _necessary_ risks, Soroban should avoid introducing _unnecessary_ hazards to interoperability, especially through choice of data encoding for input, output and persistent state. In particular at the level of this CAP, the design should guard against:
- - The risk of being unable to share data between different contracts, or different versions of the same contract.
- - The risk of being forced to write contracts in, or invoke contracts from, a single programming language.
- - The risk of having no tools or only immature tools for working with any programming language targeting the VM.
- - The risk of being unable to passively observe contract state for testing, debugging, diagnosis or monitoring.
- - The risk of 3rd parties being unable to exchange data with contracts.
-
- 7. Simple: Soroban should be as simple as possible while achieving other requirements. It should not require excessive innovation or expensive engineering by either developers or users of stellar-core. Smart contracts are late in coming to the Stellar Network, there is plenty of prior art to draw from, and there is a limited window of time to complete the work. At the level of this CAP, the design should guard against:
- - The risk of designing or implementing a novel VM, programming language, client library, or serialization format.
- - The risk of selecting an existing platform that is incompatible with or causes major changes to stellar-core.
- - The risk of delivering a system that is too challenging to learn for users or 3rd parties.
+### Required parameters to mitigate risks
+While the primary requirements seem simple enough to meet -- "just add a VM" --
+there are many risks associated with a naive implementation. Therefore
+subsequent requirements take the form of parameters that constrain
+implementations in order to mitigate risks, including:
+
+3. Secure: Soroban should be secure against benign or malicious smart contract
+ code as well as contract-code input that could imperil system availability,
+ integrity, or confidentiality (in the few cases where secret data exists).
+ In particular at the level of this CAP, the design should guard against:
+ - The risk of resource exhaustion, leading to denial of service by
+ validators.
+ - The risk of VM escape, leading to arbitrary Byzantine failures on
+ validators, including data corruption or unauthorized transactions.
+ - The risk of side channels, allowing VM code to extract validator private
+ keys or other secret data on validators.
+ - The risk of unintended contract behaviour due to invocation with malicious
+ input data.
+ - The risk of unintended contract behaviour due to calls to or from
+ malicious contracts.
+
+4. Well-defined: Soroban should not compromise the network's bit-precise
+ consensus or historical replay functions, and should have a well-defined and
+ unambiguous semantics for any code or data added by users. Where possible
+ this should be maintained by reference to existing, well-defined standards.
+ In particular at the level of this CAP, the design should guard against:
+ - The risk of underspecified or nondeterministic VM code.
+ - The risk of underspecified or nondeterministic datatypes.
+
+5. Performant: Soroban should not compromise the performance of the network,
+ and should perform competitively with other smart contract systems. Users
+ should not be subject to a significant performance penalty for using smart
+ contracts instead of built-in transactions. In particular at the level of
+ this CAP, the design should guard against:
+ - The risk of needing to load, compile, instantiate or run a large amount of
+ VM code per transaction. Contracts should be small.
+ - The risk of contending on shared mutable data that may defeat parallel
+ execution of transactions. Contracts should be isolated.
+ - The risk of requiring smart contract developers to do extensive
+ optimization to achieve acceptable performance.
+
+6. Interoperable: Soroban will necessarily introduce _some_ new user-defined
+ semantics which are by definition unknown to _some_ users and 3rd parties.
+ But beyond such _necessary_ risks, Soroban should avoid introducing
+ _unnecessary_ hazards to interoperability, especially through choice of data
+ encoding for input, output and persistent state. In particular at the level
+ of this CAP, the design should guard against:
+ - The risk of being unable to share data between different contracts, or
+ different versions of the same contract.
+ - The risk of being forced to write contracts in, or invoke contracts from,
+ a single programming language.
+ - The risk of having no tools or only immature tools for working with any
+ programming language targeting the VM.
+ - The risk of being unable to passively observe contract state for testing,
+ debugging, diagnosis or monitoring.
+ - The risk of 3rd parties being unable to exchange data with contracts.
+
+7. Simple: Soroban should be as simple as possible while achieving other
+ requirements. It should not require excessive innovation or expensive
+ engineering by either developers or users of stellar-core. Smart contracts
+ are late in coming to the Stellar Network, there is plenty of prior art to
+ draw from, and there is a limited window of time to complete the work. At
+ the level of this CAP, the design should guard against:
+ - The risk of designing or implementing a novel VM, programming language,
+ client library, or serialization format.
+ - The risk of selecting an existing platform that is incompatible with or
+ causes major changes to stellar-core.
+ - The risk of delivering a system that is too challenging to learn for users
+ or 3rd parties.
## Abstract
The specification consists of three parts:
- 1. A general description of the concepts of host and guest contexts, their relationships, constraints, and methods of implementation.
+1. A general description of the concepts of host and guest contexts, their
+ relationships, constraints, and methods of implementation.
- 2. A specification of the new components that provide the host and guest contexts, their means of interaction, and their lifecycle phases.
+2. A specification of the new components that provide the host and guest
+ contexts, their means of interaction, and their lifecycle phases.
- 3. A specification of the data model shared between host and guest.
+3. A specification of the data model shared between host and guest.
## Specification
@@ -89,124 +155,261 @@ The specification consists of three parts:
This CAP specifies aspects of two separate but related contexts:
- - The **host** context: this consists of portions of the existing C++ code making up stellar-core that can be accessed by smart contracts, as well as some new C++ and Rust code implied by this CAP. New C++ and Rust code includes the implementation of a WebAssembly (Wasm) virtual machine, a set of host objects, and a host environment that contains and manages the lifecycle and interaction of the host objects and virtual machines. The host environment, like the rest of stellar-core, is compiled to native code and runs with full access to its enclosing operating system environment, the ledger, the network, etc. The term "host environment" here corresponds to the term with that name in the WebAssembly specification.
-
- - The **guest** context: this consists of Wasm code _executed by_ a Wasm virtual machine embedded in the host environment. Guest code may originate in any programming language able to target Wasm, and will be provided by means unspecified in this CAP. Guest code has very limited access to its enclosing host environment: it can only consume CPU and memory resources to the extent that the host environment permits, and it can only call host functions that the host environment explicitly provides access to. The purpose of the guest context is to act as a so-called "sandbox" to attenuate potential harms caused by erroneous or malicious guest code, while allowing "just enough" programmability to satisfy the needs of users.
+- The **host** context: this consists of portions of the existing C++ code
+ making up stellar-core that can be accessed by smart contracts, as well as
+ some new C++ and Rust code implied by this CAP. New C++ and Rust code
+ includes the implementation of a WebAssembly (Wasm) virtual machine, a set of
+ host objects, and a host environment that contains and manages the lifecycle
+ and interaction of the host objects and virtual machines. The host
+ environment, like the rest of stellar-core, is compiled to native code and
+ runs with full access to its enclosing operating system environment, the
+ ledger, the network, etc. The term "host environment" here corresponds to the
+ term with that name in the WebAssembly specification.
+
+- The **guest** context: this consists of Wasm code _executed by_ a Wasm
+ virtual machine embedded in the host environment. Guest code may originate in
+ any programming language able to target Wasm, and will be provided by means
+ unspecified in this CAP. Guest code has very limited access to its enclosing
+ host environment: it can only consume CPU and memory resources to the extent
+ that the host environment permits, and it can only call host functions that
+ the host environment explicitly provides access to. The purpose of the guest
+ context is to act as a so-called "sandbox" to attenuate potential harms
+ caused by erroneous or malicious guest code, while allowing "just enough"
+ programmability to satisfy the needs of users.
### Components
-The guest and host contexts are provided by two new components added to stellar-core: a virtual machine and a host environment.
+The guest and host contexts are provided by two new components added to
+stellar-core: a virtual machine and a host environment.
#### Virtual Machine
-Code for a [WebAssembly 1.0](https://www.w3.org/TR/wasm-core-1/) **virtual machine** (VM) is embedded in stellar-core. The VM can be instantiated multiple times in the same stellar-core process, effectively supporting multiple separate guest contexts. The VM is configured with specific limits, and **excludes** support for any subsequent WebAssembly specification revisions or proposals.
+Code for a [WebAssembly 1.0](https://www.w3.org/TR/wasm-core-1/) **virtual
+machine** (VM) is embedded in stellar-core. The VM can be instantiated multiple
+times in the same stellar-core process, effectively supporting multiple
+separate guest contexts. The VM is configured with specific limits, and
+**excludes** support for any subsequent WebAssembly specification revisions or
+proposals.
-Furthermore to limit potential nondeterminism risks (see below), floating point instructions are prohibited and any Wasm code that includes floating point instructions will not proceed past validation, but be rejected with an error.
+Furthermore to limit potential nondeterminism risks (see below), floating point
+instructions are prohibited and any Wasm code that includes floating point
+instructions will not proceed past validation, but be rejected with an error.
-Input guest code for a guest context is a single Wasm module in the specified Wasm binary format, and guest code will pass through all 4 semantic phases defined in the Wasm specification: decoding, validation, instantiation and execution. See the linked specification for details.
+Input guest code for a guest context is a single Wasm module in the specified
+Wasm binary format, and guest code will pass through all 4 semantic phases
+defined in the Wasm specification: decoding, validation, instantiation and
+execution. See the linked specification for details.
#### Host environment
-A new structure called a **host environment** is added to the transaction-processing subsystem of stellar-core. A host environment is a container carrying:
- - Zero or more Wasm VMs.
- - Any host objects that guest code in a Wasm VM can refer to.
- - Any resource-accounting mechanisms for guest code.
- - Any host functions that guest code in a Wasm VM can import.
- - A set of in-memory XDR values called "storage", representing a portion of the ledger.
-
-#### Interface
-
-The **interface** between the host environment and guest code is very narrow and is defined as a subset of the Wasm specification of "embedding". A summary of some relevant aspects is repeated here:
+A new structure called a **host environment** is added to the
+transaction-processing subsystem of stellar-core. A host environment is a
+container carrying:
- - Guest memory ("Wasm linear memory") is separated from host memory. The host may have a mechanism to access guest memory, but **the guest has no mechanism to access host memory**.
+- Zero or more Wasm VMs.
+- Any host objects that guest code in a Wasm VM can refer to.
+- Any resource-accounting mechanisms for guest code.
+- Any host functions that guest code in a Wasm VM can import.
+- A set of in-memory XDR values called "storage", representing a portion of the
+ ledger.
- - Wasm itself supports only 4 types of data value: `i32`, `i64`, `f32`, and `f64`. To further simplify the interface, we restrict it to support **exactly one type of data value**: `i64`. Everything Soroban passes back and forth between guest and host is encoded in one or more `i64` values. The the bits comprising such an `i64` may be interpreted in one of 3 ways depending on context: as a signed 64-bit 2s complement integer, as an unsigned 64-bit 2s complement integer, or as a polymorphic **value** type as described below in the "Data Model" section.
-
- - Guest code modules carry a list of **exported** functions (that the guest provides and the host can call) and a list of **imported** "host functions" (that the host provides and the guest can call). Both imported and exported functions can only pass a sequence of `i64` parameters and return a single `i64` value, or a trap. The set of host functions available for import is detailed in [CAP-0046-03 - Smart Contract Host Functions](./cap-0046-03.md).
+#### Interface
- - Various error conditions may result in a guest **trap** condition, which is a terminal state for the Wasm VM running the guest code: no further VM execution can occur after it traps. A trap may be generated by guest code due to an execution error, or may be generated by a host function called from guest code. Therefore any call from guest to host or host to guest may produce a trap result rather than a value.
+The **interface** between the host environment and guest code is very narrow
+and is defined as a subset of the Wasm specification of "embedding". A summary
+of some relevant aspects is repeated here:
+
+- Guest memory ("Wasm linear memory") is separated from host memory. The host
+ may have a mechanism to access guest memory, but **the guest has no mechanism
+ to access host memory**.
+
+- Wasm itself supports only 4 types of data value: `i32`, `i64`, `f32`, and
+ `f64`. To further simplify the interface, we restrict it to support **exactly
+ one type of data value**: `i64`. Everything Soroban passes back and forth
+ between guest and host is encoded in one or more `i64` values. The the bits
+ comprising such an `i64` may be interpreted in one of 3 ways depending on
+ context: as a signed 64-bit 2s complement integer, as an unsigned 64-bit 2s
+ complement integer, or as a polymorphic **value** type as described below in
+ the "Data Model" section.
+
+- Guest code modules carry a list of **exported** functions (that the guest
+ provides and the host can call) and a list of **imported** "host functions"
+ (that the host provides and the guest can call). Both imported and exported
+ functions can only pass a sequence of `i64` parameters and return a single
+ `i64` value, or a trap. The set of host functions available for import is
+ detailed in [CAP-0046-03 - Smart Contract Host Functions](./cap-0046-03.md).
+
+- Various error conditions may result in a guest **trap** condition, which is a
+ terminal state for the Wasm VM running the guest code: no further VM
+ execution can occur after it traps. A trap may be generated by guest code due
+ to an execution error, or may be generated by a host function called from
+ guest code. Therefore any call from guest to host or host to guest may
+ produce a trap result rather than a value.
#### Lifecycles
-A host environment has its own **lifecycle**: it is created before any of the host objects or VMs it contains, and destroyed after any of the host objects or VMs it contains.
+A host environment has its own **lifecycle**: it is created before any of the
+host objects or VMs it contains, and destroyed after any of the host objects or
+VMs it contains.
When a host environment is created, it contains no host objects and no VMs.
-Adding a Wasm VM to a host environment involves passing Wasm code through the 4 lifecycle phases in the Wasm specification: decoding, validation, instantiation and invocation. If any phase fails, no further phases will be performed on the failed Wasm VM.
+Adding a Wasm VM to a host environment involves passing Wasm code through the 4
+lifecycle phases in the Wasm specification: decoding, validation, instantiation
+and invocation. If any phase fails, no further phases will be performed on the
+failed Wasm VM.
-Multiple Wasm VMs can coexist in a single host environment. The intention is that one host environment and one Wasm VM will be created for an "outermost" invocation of a smart contract, and that "inner" contracts can be invoked by guest code calling a host function that constructs an additional VM and invokes a guest function in that new VM, within the same shared host environment. The specific mechanism of calling between contracts is not specified in this CAP.
+Multiple Wasm VMs can coexist in a single host environment. The intention is
+that one host environment and one Wasm VM will be created for an "outermost"
+invocation of a smart contract, and that "inner" contracts can be invoked by
+guest code calling a host function that constructs an additional VM and invokes
+a guest function in that new VM, within the same shared host environment. The
+specific mechanism of calling between contracts is not specified in this CAP.
-Multiple Wasm VMs in the same host environment can refer to the same host objects: this is the mechanism for passing (immutable) information between different smart contracts.
+Multiple Wasm VMs in the same host environment can refer to the same host
+objects: this is the mechanism for passing (immutable) information between
+different smart contracts.
#### Storage
-The host environment's **storage** is initialized with some set of XDR objects loaded from the ledger. The set of XDR objects to load is statically declared by the transaction that causes instantiation of the host environment. After execution, when a host environment is being finalized, the modified portion of the host's storage is written back to the ledger. Between initialization and finalization, storage exists only in the host environment's memory. For more details on the semantics of storage see [CAP 0046-05 Smart Contract Data](./cap-0046-05.md).
+The host environment's **storage** is initialized with some set of XDR objects
+loaded from the ledger. The set of XDR objects to load is statically declared
+by the transaction that causes instantiation of the host environment. After
+execution, when a host environment is being finalized, the modified portion of
+the host's storage is written back to the ledger. Between initialization and
+finalization, storage exists only in the host environment's memory. For more
+details on the semantics of storage see
+[CAP 0046-05 Smart Contract Data](./cap-0046-05.md).
#### Limits
-The host maintains a per-transaction budget of CPU and memory resources, and as resources are consumed both by host functions and by the Wasm VM execution steps, the budget is reduced until it is exhausted. If the budget is exhausted before transaction completion, the host will trap with an error.
+The host maintains a per-transaction budget of CPU and memory resources, and as
+resources are consumed both by host functions and by the Wasm VM execution
+steps, the budget is reduced until it is exhausted. If the budget is exhausted
+before transaction completion, the host will trap with an error.
-An important aspect of resource limiting is that it is performed against a _deterministic model_ of the computational budget -- with "model" costs incrementally deducted from the budget model by explicit calls placed throughout the host function and VM code -- rather than by measuring real computational resources (time or memory) consumed during execution. This is necessary to maintain deterministic execution: any resource exhaustion that might occur must occur exactly the same way, at exactly the same instant, on every node in the Stellar network processing a Soroban transaction.
+An important aspect of resource limiting is that it is performed against a
+_deterministic model_ of the computational budget -- with "model" costs
+incrementally deducted from the budget model by explicit calls placed
+throughout the host function and VM code -- rather than by measuring real
+computational resources (time or memory) consumed during execution. This is
+necessary to maintain deterministic execution: any resource exhaustion that
+might occur must occur exactly the same way, at exactly the same instant, on
+every node in the Stellar network processing a Soroban transaction.
-The detailed structure and logic for the budget is given in [CAP-0046-10 - Smart Contract Budget Metering](./cap-0046-10.md)
+The detailed structure and logic for the budget is given in
+[CAP-0046-10 - Smart Contract Budget Metering](./cap-0046-10.md)
#### Determinism
-Both guest code and any part of the host environment controlled by guest code must execute deterministically in response to inputs, and must be sufficiently well-specified that replaying historical guest code in an upgraded host environment (i.e. a new version of stellar-core) will produce observably-identical results. This includes the result of observable resource exhaustion within host-controlled CPU or memory limits, which implies the need for careful resource accounting on all guest-controlled actions.
-
-The Wasm spec has [carefully limited nondeterminism](https://github.com/WebAssembly/design/blob/main/Nondeterminism.md) to a small set of cases, which we consider here:
- - New features: only minor, fully deterministic Wasm features beyond the 1.0 spec are supported by Soroban. Specifically the `sign-ext` and `mutable-globals` extensions, which are commonly included as target features in high level language compilers (eg. both Rust and C/C++ compilers).
- - Threads: not supported by Soroban.
- - NaN-related behaviour for floating point: all floating point code is prohibited.
- - SIMD-related behaviour: all SIMD extensions are prohibited.
- - Environment-resource limit exhaustion: enforced through a deterministic budget model as discussed above.
-
+Both guest code and any part of the host environment controlled by guest code
+must execute deterministically in response to inputs, and must be sufficiently
+well-specified that replaying historical guest code in an upgraded host
+environment (i.e. a new version of stellar-core) will produce
+observably-identical results. This includes the result of observable resource
+exhaustion within host-controlled CPU or memory limits, which implies the need
+for careful resource accounting on all guest-controlled actions.
+
+The Wasm spec has
+[carefully limited nondeterminism](https://github.com/WebAssembly/design/blob/main/Nondeterminism.md)
+to a small set of cases, which we consider here:
+
+- New features: only minor, fully deterministic Wasm features beyond the 1.0
+ spec are supported by Soroban. Specifically the `sign-ext` and
+ `mutable-globals` extensions, which are commonly included as target features
+ in high level language compilers (eg. both Rust and C/C++ compilers).
+- Threads: not supported by Soroban.
+- NaN-related behaviour for floating point: all floating point code is
+ prohibited.
+- SIMD-related behaviour: all SIMD extensions are prohibited.
+- Environment-resource limit exhaustion: enforced through a deterministic
+ budget model as discussed above.
### Data Model
-This CAP defines a **data model** shared between guest and host environments. It consists of a set of _values_ and a set of _objects_:
+This CAP defines a **data model** shared between guest and host environments.
+It consists of a set of _values_ and a set of _objects_:
- - **Values** can be packed into a 64-bit integer, and can therefore be easily passed back and forth between the host environment and guest code, as arguments or return values from imported or exported functions.
- - **Objects** (also called "host objects") exist only in host memory, in the host context, and can only be _referenced_ by guest code through values containing **handles** that _refer to_ objects. If guest code wishes to perform an operation on a host object, it must call a host function with values containing handles that _refer to_ any host object(s) to operate on.
+- **Values** can be packed into a 64-bit integer, and can therefore be easily
+ passed back and forth between the host environment and guest code, as
+ arguments or return values from imported or exported functions.
+- **Objects** (also called "host objects") exist only in host memory, in the
+ host context, and can only be _referenced_ by guest code through values
+ containing **handles** that _refer to_ objects. If guest code wishes to
+ perform an operation on a host object, it must call a host function with
+ values containing handles that _refer to_ any host object(s) to operate on.
#### Immutability
-Host objects are **immutable**: they cannot be changed once created. Any operation on a host object that implies a modification of the object's state will allocate a new object (with a new handle) containing the modified state, and return a value that refers to the new object by its new handle. Objects must therefore be relatively small. Objects are _not_ necessarily unique; two objects may be equal (in the sense of containing the same data) but have different handles.
+Host objects are **immutable**: they cannot be changed once created. Any
+operation on a host object that implies a modification of the object's state
+will allocate a new object (with a new handle) containing the modified state,
+and return a value that refers to the new object by its new handle. Objects
+must therefore be relatively small. Objects are _not_ necessarily unique; two
+objects may be equal (in the sense of containing the same data) but have
+different handles.
-Values may also be considered "immutable" in some sense, but since they are typically machine primitives and any two equal values are indistinguishable, mutability or immutability is not a particularly meaningful concept for values.
+Values may also be considered "immutable" in some sense, but since they are
+typically machine primitives and any two equal values are indistinguishable,
+mutability or immutability is not a particularly meaningful concept for values.
#### Forms
The data model is specified in two separate **forms**:
- - In XDR, for inclusion in serial forms such as transactions and ledger entries.
- - In a set of "host types", of which the "host _value_ type" is shared between host and guest.
+- In XDR, for inclusion in serial forms such as transactions and ledger
+ entries.
+- In a set of "host types", of which the "host _value_ type" is shared between
+ host and guest.
-The rationale for the two separate forms is given below, in the rationale section.
+The rationale for the two separate forms is given below, in the rationale
+section.
### XDR changes
-See the new XDR files in [CAP-0046 - Soroban overview](./cap-0046.md) for a complete listing.
+See the new XDR files in [CAP-0046 - Soroban overview](./cap-0046.md) for a
+complete listing.
One XDR union type, and its variants, are worth discussing in this CAP: SCVal.
#### SCVal
-`SCVal` is a new XDR type. Its name is short for **smart contract ("SC") value**. It is a _general, polymorphic type_ in the sense that it is a union with many possible cases: numbers, strings, booleans, maps, vectors, error codes, and several special cases. It exists because many subsystems of the smart contract system, as well as many smart contracts themselves, must often act on values of interest to contracts without knowing their specific types ahead of time.
-
-For example, the smart contract transaction invocation path must pass user-provided values to a contract and return values from a contract, and must do so generically without knowledge of the types of those values, so it accepts and returns `SCVal`s. Similarly the smart contract storage system allows loading and storing `SCVal`s in the ledger. And within a contract's own code, often some logic wishes to deal with values without knowing their precise type, such as forwarding values from one contract to another or extracting them from containers.
-
-`SCVal` is keyed by the enum `SCValType` which has 22 variants. They are described in comments in `Stellar-contract.x`.
+`SCVal` is a new XDR type. Its name is short for **smart contract ("SC")
+value**. It is a _general, polymorphic type_ in the sense that it is a union
+with many possible cases: numbers, strings, booleans, maps, vectors, error
+codes, and several special cases. It exists because many subsystems of the
+smart contract system, as well as many smart contracts themselves, must often
+act on values of interest to contracts without knowing their specific types
+ahead of time.
+
+For example, the smart contract transaction invocation path must pass
+user-provided values to a contract and return values from a contract, and must
+do so generically without knowledge of the types of those values, so it accepts
+and returns `SCVal`s. Similarly the smart contract storage system allows
+loading and storing `SCVal`s in the ledger. And within a contract's own code,
+often some logic wishes to deal with values without knowing their precise type,
+such as forwarding values from one contract to another or extracting them from
+containers.
+
+`SCVal` is keyed by the enum `SCValType` which has 22 variants. They are
+described in comments in `Stellar-contract.x`.
#### Host value type
-The **host value type** -- in the Rust host and SDK code this is simply called `Val` -- is a 64-bit integer carrying a bit-packed disjoint union of several cases, each identified by a different `Tag` value.
+The **host value type** -- in the Rust host and SDK code this is simply called
+`Val` -- is a 64-bit integer carrying a bit-packed disjoint union of several
+cases, each identified by a different `Tag` value.
##### Bit-packed representation
-The low 8 bits of a `Val` are referred to as the **tag** and the remaining high 56 bits are referred to as the **body**. The tag's value determines the interpretation of the body. In some cases the body is itself further subdivided into 24 low bits, called the body's **minor component**, and 32 high bits, called the body's **major component**.
+The low 8 bits of a `Val` are referred to as the **tag** and the remaining high
+56 bits are referred to as the **body**. The tag's value determines the
+interpretation of the body. In some cases the body is itself further subdivided
+into 24 low bits, called the body's **minor component**, and 32 high bits,
+called the body's **major component**.
-In other words, a value schematically looks like one of the following two cases:
+In other words, a value schematically looks like one of the following two
+cases:
```
bit 64 56 48 40 32 24 16 8 0
@@ -221,336 +424,792 @@ bit 64 56 48 40 32 24 16 8 0
+-------+-------+-------+-------+-------+-------+-------+-------+
```
-When accessing the body, the bit pattern may be considered as either a signed or unsigned 64-bit value. If signed, the body is extracted by a signed (arithmetic) right shift, properly sign-extending from 56 to 64 bits any negative values stored in the body. Similarly the major component may be treated as a signed or unsigned 32-bit integer. The minor component is only ever treated as an unsigned 32-bit integer, and is zero-extended from 24 to 32 bits on access.
+When accessing the body, the bit pattern may be considered as either a signed
+or unsigned 64-bit value. If signed, the body is extracted by a signed
+(arithmetic) right shift, properly sign-extending from 56 to 64 bits any
+negative values stored in the body. Similarly the major component may be
+treated as a signed or unsigned 32-bit integer. The minor component is only
+ever treated as an unsigned 32-bit integer, and is zero-extended from 24 to 32
+bits on access.
##### Tag values
-The different cases of the XDR value type `SCVal` are differentiated by the XDR enum `SCValType`, which is subsequently encoded as `Tag`s in a `Val`, though the mapping is 1:N rather than 1:1. Specifically, for each 1 `SCVal` case (i.e. `SCValType` code) at the XDR level, there may be N (usually 1 or 2) different _refinements_ of that type as a specialized `Tag` case in the host value type, usually to enable a more compact representation when small special cases of `SCVal` are projected into host values.
+The different cases of the XDR value type `SCVal` are differentiated by the XDR
+enum `SCValType`, which is subsequently encoded as `Tag`s in a `Val`, though
+the mapping is 1:N rather than 1:1. Specifically, for each 1 `SCVal` case (i.e.
+`SCValType` code) at the XDR level, there may be N (usually 1 or 2) different
+_refinements_ of that type as a specialized `Tag` case in the host value type,
+usually to enable a more compact representation when small special cases of
+`SCVal` are projected into host values.
`Tag` values are organized in two contiguous blocks:
- - A low-valued block (initially between values 0 and 15 inclusive) that covers "small" `Val`s, where the entire semantic content of the `Val` is contained in its body.
- - A high-valued block (initially between values 64 and 77 inclusive) that covers "object handle" values, where the body of the `Val` just carries an object handle in its "major" component.
-
-The two blocks are kept separate to enable an efficient single-comparison `Tag` test for all object handle values. The split between blocks happens at tag value 64 rather than 128 (as might be expected given the 8 bit range of `Tag`) so that all initially assigned tags are less than 127, which is the maximum size of a single Wasm ULEB128 code unit (another minor space optimization). We anticipate the system will grow to support some additional tags in the future, but believe the available tag space will be sufficient to accommodate such growth.
+- A low-valued block (initially between values 0 and 15 inclusive) that covers
+ "small" `Val`s, where the entire semantic content of the `Val` is contained
+ in its body.
+- A high-valued block (initially between values 64 and 77 inclusive) that
+ covers "object handle" values, where the body of the `Val` just carries an
+ object handle in its "major" component.
+
+The two blocks are kept separate to enable an efficient single-comparison `Tag`
+test for all object handle values. The split between blocks happens at tag
+value 64 rather than 128 (as might be expected given the 8 bit range of `Tag`)
+so that all initially assigned tags are less than 127, which is the maximum
+size of a single Wasm ULEB128 code unit (another minor space optimization). We
+anticipate the system will grow to support some additional tags in the future,
+but believe the available tag space will be sufficient to accommodate such
+growth.
The specific `Tag` values are:
- - `Tag::False = 0`, a refinement of the `SCVal` case for `SCV_BOOL` encoding just boolean false. The body is zero.
- - `Tag::True = 1`, a refinement of the `SCVal` case for `SCV_BOOL` encoding just boolean true. The body is zero.
- - `Tag::Void = 2`, corresponding to the `SCVal` case for `SCV_VOID`. The body is zero.
- - `Tag::Error = 3`, corresponding to the `SCVal` case for `SCV_ERROR`. The body takes the major/minor form:
- - The minor component is an "error type", one of the values of the XDR enumeration `SCErrorType`.
- - The major component is an "error code":
- - If the "error type" is `SCE_CONTRACT`, the major component is the `uint32` error code in the `SCE_CONTRACT` case of `SCError`, a contract-defined error code with no specific meaning to the runtime.
- - Otherwise the major component is the `SCErrorCode` value of the corresponding `SCE_*` case of `SCError`.
- - `Tag::U32Val = 4`, corresponding to the `SCVal` case for `SCV_U32`. The major component carries an unsigned 32-bit integer.
- - `Tag::I32Val = 5`, corresponding to the `SCVal` case for `SCV_I32`. The major component carries a signed 32-bit integer.
- - `Tag::U64Small = 6`, a refinement of the `SCVal` case for `SCV_U64` for unsigned 64-bit integer values that are small enough to fit in the 56 bits of the `Val`'s body without data loss. Specifically those values in the range from `0` to `0x00ff_ffff_ffff_ffff` inclusive.
- - `Tag::I64Small = 7`, a refinement of the `SCVal` case for `SCV_I64` for signed 64-bit integer values that are small enough to fit in the 56 bits of the `Val`'s body without data loss. Specifically those `int64` values in the range from `-36_028_797_018_963_968` to `36_028_797_018_963_967` inclusive.
- - `Tag::TimepointSmall = 8`, the same as `U64Small` but for the `SCVal` case for `SCV_TIMEPOINT`.
- - `Tag::DurationSmall = 9`, the same as `U64Small` but for the `SCVal` case for `SCV_DURATION`.
- - `Tag::U128Small = 10`, the same as `U64Small` but for the `SCVal` case for `SCV_U128`.
- - `Tag::I128Small = 11`, the same as `I64Small` but for the `SCVal` case for `SCV_I128`.
- - `Tag::U256Small = 12`, the same as `U64Small` but for the `SCVal` case for `SCV_U256`.
- - `Tag::I256Small = 13`, the same as `I64Small` but for the `SCVal` case for `SCV_I256`.
- - `Tag::SymbolSmall = 14`, a refinement of the `SCVal` case for `SCV_SYMBOL` for small symbols up to 9 characters long. The body of the `Val` contains between 0 and 9 characters, with each character encoded as a 6-bit, 1-based code that indexes into the 63-character repertoire allowed by the general `SCV_SYMBOL` type: `[_0-9-A-Za-z]`. That is, the character `_` is coded by the six bits `0b00_0001`, the character `0` is coded by the six bits `0b00_0010`, and so on, with the final allowed character `z` coded by the six bits `0b11_1111`. Then these 6-bit codes are packed into the 56 bit body such that the lowest 6 bits of the body always code for the last character in the symbol, and if the symbol is less than 9 characters long then the body's _high bits_ are padded with all-zero 6-bit codes (this representation optimizes for encoding in Wasm's ULEB128 format).
- - `Tag::LedgerKeyContractInstance = 15`, a refinement of the `SCVal` case for `SCV_LEDGER_KEY_CONTRACT_INSTANCE`, a special value reserved for use as a key identifying contract instances in the storage system. The body is zero.
- - `Tag::U64Object = 64`, for object-handle `Val`s referring to the `SCVal` case for `SCV_U64`, typically only used when the `uint64` is larger than 56 bits and so cannot fit in a `U64Small`, though small integers stored in `U64Object` are legal. The body's major component is a 32-bit object handle, referring to a host object. The minor component is zero.
- - `Tag::I64Object = 65`, the same as `U64Object` but for the `SCVal` case for `SCV_I64`.
- - `Tag::TimepointObject = 66`, the same as `U64Object` but for the `SCVal` case for `SCV_TIMEPOINT`.
- - `Tag::DurationObject = 67`, the same as `U64Object` but for the `SCVal` case for `SCV_DURATION`.
- - `Tag::U128Object = 68`, the same as `U64Object` but for the `SCVal` case for `SCV_U128`.
- - `Tag::I128Object = 69`, the same as `U64Object` but for the `SCVal` case for `SCV_I128`.
- - `Tag::U256Object = 70`, the same as `U64Object` but for the `SCVal` case for `SCV_U256`.
- - `Tag::I256Object = 71`, the same as `U64Object` but for the `SCVal` case for `SCV_I256`.
- - `Tag::BytesObject = 72`, for object-handle `Val`s referring to the `ScVal` case for `SCV_BYTES`.
- - `Tag::StringObject = 73`, for object-handle `Val`s referring to the `ScVal` case for `SCV_STRING`.
- - `Tag::SymbolObject = 74`, for object-handle `Val`s referring to the `ScVal` case for `SCV_SYMBOL`, typically only used when the symbol is longer than 9 characters, so cannot fit in a `SymbolSmall`.
- - `Tag::VecObject = 75`, for object-handle `Val`s referring to the `ScVal` case for `SCV_VEC`.
- - `Tag::MapObject = 76`, for object-handle `Val`s referring to the `ScVal` case for `SCV_MAP`.
- - `Tag::AddressObject = 77`, for object-handle `Val`s referring to the `ScVal` case for `SCV_ADDRESS`.
-
-The Rust code defining the `Tag` datatype includes some additional symbolic names for the boundaries of the assigned tag codes, as well as a sentinel for unassigned tags, but these are not part of the interface specified by this CAP. All tag values not described above are reserved for future use.
+- `Tag::False = 0`, a refinement of the `SCVal` case for `SCV_BOOL` encoding
+ just boolean false. The body is zero.
+- `Tag::True = 1`, a refinement of the `SCVal` case for `SCV_BOOL` encoding
+ just boolean true. The body is zero.
+- `Tag::Void = 2`, corresponding to the `SCVal` case for `SCV_VOID`. The body
+ is zero.
+- `Tag::Error = 3`, corresponding to the `SCVal` case for `SCV_ERROR`. The body
+ takes the major/minor form:
+ - The minor component is an "error type", one of the values of the XDR
+ enumeration `SCErrorType`.
+ - The major component is an "error code":
+ - If the "error type" is `SCE_CONTRACT`, the major component is the
+ `uint32` error code in the `SCE_CONTRACT` case of `SCError`, a
+ contract-defined error code with no specific meaning to the runtime.
+ - Otherwise the major component is the `SCErrorCode` value of the
+ corresponding `SCE_*` case of `SCError`.
+- `Tag::U32Val = 4`, corresponding to the `SCVal` case for `SCV_U32`. The major
+ component carries an unsigned 32-bit integer.
+- `Tag::I32Val = 5`, corresponding to the `SCVal` case for `SCV_I32`. The major
+ component carries a signed 32-bit integer.
+- `Tag::U64Small = 6`, a refinement of the `SCVal` case for `SCV_U64` for
+ unsigned 64-bit integer values that are small enough to fit in the 56 bits of
+ the `Val`'s body without data loss. Specifically those values in the range
+ from `0` to `0x00ff_ffff_ffff_ffff` inclusive.
+- `Tag::I64Small = 7`, a refinement of the `SCVal` case for `SCV_I64` for
+ signed 64-bit integer values that are small enough to fit in the 56 bits of
+ the `Val`'s body without data loss. Specifically those `int64` values in the
+ range from `-36_028_797_018_963_968` to `36_028_797_018_963_967` inclusive.
+- `Tag::TimepointSmall = 8`, the same as `U64Small` but for the `SCVal` case
+ for `SCV_TIMEPOINT`.
+- `Tag::DurationSmall = 9`, the same as `U64Small` but for the `SCVal` case for
+ `SCV_DURATION`.
+- `Tag::U128Small = 10`, the same as `U64Small` but for the `SCVal` case for
+ `SCV_U128`.
+- `Tag::I128Small = 11`, the same as `I64Small` but for the `SCVal` case for
+ `SCV_I128`.
+- `Tag::U256Small = 12`, the same as `U64Small` but for the `SCVal` case for
+ `SCV_U256`.
+- `Tag::I256Small = 13`, the same as `I64Small` but for the `SCVal` case for
+ `SCV_I256`.
+- `Tag::SymbolSmall = 14`, a refinement of the `SCVal` case for `SCV_SYMBOL`
+ for small symbols up to 9 characters long. The body of the `Val` contains
+ between 0 and 9 characters, with each character encoded as a 6-bit, 1-based
+ code that indexes into the 63-character repertoire allowed by the general
+ `SCV_SYMBOL` type: `[_0-9-A-Za-z]`. That is, the character `_` is coded by
+ the six bits `0b00_0001`, the character `0` is coded by the six bits
+ `0b00_0010`, and so on, with the final allowed character `z` coded by the six
+ bits `0b11_1111`. Then these 6-bit codes are packed into the 56 bit body such
+ that the lowest 6 bits of the body always code for the last character in the
+ symbol, and if the symbol is less than 9 characters long then the body's
+ _high bits_ are padded with all-zero 6-bit codes (this representation
+ optimizes for encoding in Wasm's ULEB128 format).
+- `Tag::LedgerKeyContractInstance = 15`, a refinement of the `SCVal` case for
+ `SCV_LEDGER_KEY_CONTRACT_INSTANCE`, a special value reserved for use as a key
+ identifying contract instances in the storage system. The body is zero.
+- `Tag::U64Object = 64`, for object-handle `Val`s referring to the `SCVal` case
+ for `SCV_U64`, typically only used when the `uint64` is larger than 56 bits
+ and so cannot fit in a `U64Small`, though small integers stored in
+ `U64Object` are legal. The body's major component is a 32-bit object handle,
+ referring to a host object. The minor component is zero.
+- `Tag::I64Object = 65`, the same as `U64Object` but for the `SCVal` case for
+ `SCV_I64`.
+- `Tag::TimepointObject = 66`, the same as `U64Object` but for the `SCVal` case
+ for `SCV_TIMEPOINT`.
+- `Tag::DurationObject = 67`, the same as `U64Object` but for the `SCVal` case
+ for `SCV_DURATION`.
+- `Tag::U128Object = 68`, the same as `U64Object` but for the `SCVal` case for
+ `SCV_U128`.
+- `Tag::I128Object = 69`, the same as `U64Object` but for the `SCVal` case for
+ `SCV_I128`.
+- `Tag::U256Object = 70`, the same as `U64Object` but for the `SCVal` case for
+ `SCV_U256`.
+- `Tag::I256Object = 71`, the same as `U64Object` but for the `SCVal` case for
+ `SCV_I256`.
+- `Tag::BytesObject = 72`, for object-handle `Val`s referring to the `ScVal`
+ case for `SCV_BYTES`.
+- `Tag::StringObject = 73`, for object-handle `Val`s referring to the `ScVal`
+ case for `SCV_STRING`.
+- `Tag::SymbolObject = 74`, for object-handle `Val`s referring to the `ScVal`
+ case for `SCV_SYMBOL`, typically only used when the symbol is longer than 9
+ characters, so cannot fit in a `SymbolSmall`.
+- `Tag::VecObject = 75`, for object-handle `Val`s referring to the `ScVal` case
+ for `SCV_VEC`.
+- `Tag::MapObject = 76`, for object-handle `Val`s referring to the `ScVal` case
+ for `SCV_MAP`.
+- `Tag::AddressObject = 77`, for object-handle `Val`s referring to the `ScVal`
+ case for `SCV_ADDRESS`.
+
+The Rust code defining the `Tag` datatype includes some additional symbolic
+names for the boundaries of the assigned tag codes, as well as a sentinel for
+unassigned tags, but these are not part of the interface specified by this CAP.
+All tag values not described above are reserved for future use.
#### Host object type(s)
-There are many different **host object types**, and we refer to the disjoint union of all possible host object types as **the host object type**. This may be implemented in terms of a variant type, an object hierarchy, or any other similar mechanism in the host.
-
-Every host object is held in host memory and **cannot be accessed directly from guest code**. Host objects can be _referred to_ by host values in either host or guest code: specifically those values with tags between `64` and `77` inclusive refer to host objects by handle.
-
-**Host object handles** are integers that identify host objects. They come in two forms: **relative** handles and **absolute** handles. Relative handles are, as their name suggests, only meaningful _relative_ to a specific Wasm VM: they are indexes into an indirection table attached to each Wasm VM that maps relative handles to absolute handles. Absolute handles identify host objects within the host independently of any Wasm VM. When guest code running in a Wasm VM has a value of some object-handle type, it is always a _relative_ handle. When guest code calls the host, any relative handle being passed is translated to an absolute handle, and when an absolute handle is returned from the host to the guest it is translated from an absolute to a relative handle. This way guests never see absolute handles, and cannot access any host objects that they have not explicitly been passed references to (eg. as invocation arguments or return values from host functions).
-
-If a host object is accessed through an invalid handle -- a number that does not identify an object -- the access fails with an error.
-
-If a host object is accessed through a value with a tag that does not match the actual type of the underlying host object, the access fails with an error. While not strictly necessary -- it would be possible to simply ignore the tag -- this helps catch coding errors. Similarly if a host function expects a host object handle argument with a specific tag, and is passed a value with a different tag, it is rejected with an error even if the object handle number is valid.
-
-The specific operations that can be performed on each host object are defined by host functions, described in [CAP-0046-03 - Smart Contract Host Functions](./cap-0046-03.md).
+There are many different **host object types**, and we refer to the disjoint
+union of all possible host object types as **the host object type**. This may
+be implemented in terms of a variant type, an object hierarchy, or any other
+similar mechanism in the host.
+
+Every host object is held in host memory and **cannot be accessed directly from
+guest code**. Host objects can be _referred to_ by host values in either host
+or guest code: specifically those values with tags between `64` and `77`
+inclusive refer to host objects by handle.
+
+**Host object handles** are integers that identify host objects. They come in
+two forms: **relative** handles and **absolute** handles. Relative handles are,
+as their name suggests, only meaningful _relative_ to a specific Wasm VM: they
+are indexes into an indirection table attached to each Wasm VM that maps
+relative handles to absolute handles. Absolute handles identify host objects
+within the host independently of any Wasm VM. When guest code running in a Wasm
+VM has a value of some object-handle type, it is always a _relative_ handle.
+When guest code calls the host, any relative handle being passed is translated
+to an absolute handle, and when an absolute handle is returned from the host to
+the guest it is translated from an absolute to a relative handle. This way
+guests never see absolute handles, and cannot access any host objects that they
+have not explicitly been passed references to (eg. as invocation arguments or
+return values from host functions).
+
+If a host object is accessed through an invalid handle -- a number that does
+not identify an object -- the access fails with an error.
+
+If a host object is accessed through a value with a tag that does not match the
+actual type of the underlying host object, the access fails with an error.
+While not strictly necessary -- it would be possible to simply ignore the tag
+-- this helps catch coding errors. Similarly if a host function expects a host
+object handle argument with a specific tag, and is passed a value with a
+different tag, it is rejected with an error even if the object handle number is
+valid.
+
+The specific operations that can be performed on each host object are defined
+by host functions, described in
+[CAP-0046-03 - Smart Contract Host Functions](./cap-0046-03.md).
#### Comparison
-Values and objects in the data model have a total order. When comparing two values A and B:
-
- - If both values have an equal bit-pattern, their order is equal.
- - If _either_ value is an object-handle type, they are compared through object comparison (via the host function `obj_cmp`) as described below.
- - Otherwise A and B are both small-value types:
- - If A's `Tag` differs from B's `Tag`, they are ordered by numeric `Tag` value (which, for small values, match the order of the corresponding XDR `SCValType`s).
- - Otherwise A and B have the same `Tag` value:
- - If A and B have common tag `Tag::False`, `Tag::True`, `Tag::Void`, or `Tag::LedgerKeyContractInstance`, A and B are equal.
- - If A and B have common tag `Tag::Error`, A and B are ordered first by their minor components (the "error type"), then by their major components (the "error code"), both treated as unsigned 32-bit integers.
- - If A and B have common tag `Tag::U32Val`, A and B are ordered by their major components, treated as unsigned 32-bit integers.
- - If A and B have common tag `Tag::I32Val`, A and B are ordered by their major components, treated as signed 32-bit integers.
- - If A and B have common tag `Tag::U64Small`, `Tag::U128Small` or `Tag::U256Small`, A and B are ordered by their bodies, treated as unsigned 64-bit integers.
- - If A and B have common tag `Tag::I64Small`, `Tag::I128Small` or `Tag::I256Small`, A and B are ordered by their bodies, treated as signed 64-bit integers.
-
-Object comparison can be accessed by either guest or host: it is provided to guests as a host function `obj_cmp` via the host environment interface. It performs a recursive structural comparison of objects, as well as values embedded in objects, using the following rules:
-
- - If A and B have the same `Tag` value, they are directly compared as objects:
- - If A and B have common tag `Tag::VecObject`, they are ordered by lexicographic extension of the value order.
- - If A and B have common tag `Tag::MapObject` objects, they are ordered lexicographically as ordered vectors of (key, value) pairs.
- - If A and B have common tag `Tag::U64Object`, `Tag::I64Object`, `Tag::U128Object`, `Tag::I128Object`, `Tag::U256Object` or `Tag::I256Object`, they are ordered using the numerical order for those types.
- - If A and B have common tag `Tag::BytesObject`, `Tag::StringObject`, `Tag::SymbolObject`, or `Tag::Address` they are ordered (recursively) in the natural order of their corresponding XDR representations: lexicographically by structure field order, sequence order, union discriminant and structure field numerical orders.
- - Otherwise only one of A or B are object handles:
- - If either has tag `Tag::U64Small` and the other has tag `Tag::U64Object`, both are compared as their underlying unsigned 64-bit integers.
- - Similarly when comparing a combination of tags `Tag::I64Small` and `Tag::I64Object`, or `Tag::TimepointSmall` and `Tag::TimePoint`, or `Tag::DurationSmall` and `Tag::Duration`, or `Tag::U128Small` and `Tag::U128Object`, or `Tag::I128Small` and `Tag::I128Object`, or `Tag::U256Small` and `Tag::U256Object`, or `Tag::I256Small` and `Tag::I256Object`, a small-value case and large-value case of the same underlying numeric type are compared in terms of that underlying numeric type.
- - Similarly if either has tag `Tag::SymbolSmall` and the other has tag `Tag::SymbolObject`, both are compared lexicographically as the underlying sequence of characters in each symbol.
- - Otherwise some object type and an unrelated non-object type are being compared, so their actual values are ignored and they are compared by the numerical value of the `SCValType` of the un-refined XDR `SCVal` type they represent (i.e. both `Tag::I64Small` and `Tag::I64Object` are projected to their `SCValType` `SCV_I64` for numerical code-comparison with the `SCValType` of the other value).
+Values and objects in the data model have a total order. When comparing two
+values A and B:
+
+- If both values have an equal bit-pattern, their order is equal.
+- If _either_ value is an object-handle type, they are compared through object
+ comparison (via the host function `obj_cmp`) as described below.
+- Otherwise A and B are both small-value types:
+ - If A's `Tag` differs from B's `Tag`, they are ordered by numeric `Tag`
+ value (which, for small values, match the order of the corresponding XDR
+ `SCValType`s).
+ - Otherwise A and B have the same `Tag` value:
+ - If A and B have common tag `Tag::False`, `Tag::True`, `Tag::Void`, or
+ `Tag::LedgerKeyContractInstance`, A and B are equal.
+ - If A and B have common tag `Tag::Error`, A and B are ordered first by
+ their minor components (the "error type"), then by their major components
+ (the "error code"), both treated as unsigned 32-bit integers.
+ - If A and B have common tag `Tag::U32Val`, A and B are ordered by their
+ major components, treated as unsigned 32-bit integers.
+ - If A and B have common tag `Tag::I32Val`, A and B are ordered by their
+ major components, treated as signed 32-bit integers.
+ - If A and B have common tag `Tag::U64Small`, `Tag::U128Small` or
+ `Tag::U256Small`, A and B are ordered by their bodies, treated as
+ unsigned 64-bit integers.
+ - If A and B have common tag `Tag::I64Small`, `Tag::I128Small` or
+ `Tag::I256Small`, A and B are ordered by their bodies, treated as signed
+ 64-bit integers.
+
+Object comparison can be accessed by either guest or host: it is provided to
+guests as a host function `obj_cmp` via the host environment interface. It
+performs a recursive structural comparison of objects, as well as values
+embedded in objects, using the following rules:
+
+- If A and B have the same `Tag` value, they are directly compared as objects:
+ - If A and B have common tag `Tag::VecObject`, they are ordered by
+ lexicographic extension of the value order.
+ - If A and B have common tag `Tag::MapObject` objects, they are ordered
+ lexicographically as ordered vectors of (key, value) pairs.
+ - If A and B have common tag `Tag::U64Object`, `Tag::I64Object`,
+ `Tag::U128Object`, `Tag::I128Object`, `Tag::U256Object` or
+ `Tag::I256Object`, they are ordered using the numerical order for those
+ types.
+ - If A and B have common tag `Tag::BytesObject`, `Tag::StringObject`,
+ `Tag::SymbolObject`, or `Tag::Address` they are ordered (recursively) in
+ the natural order of their corresponding XDR representations:
+ lexicographically by structure field order, sequence order, union
+ discriminant and structure field numerical orders.
+- Otherwise only one of A or B are object handles:
+ - If either has tag `Tag::U64Small` and the other has tag `Tag::U64Object`,
+ both are compared as their underlying unsigned 64-bit integers.
+ - Similarly when comparing a combination of tags `Tag::I64Small` and
+ `Tag::I64Object`, or `Tag::TimepointSmall` and `Tag::TimePoint`, or
+ `Tag::DurationSmall` and `Tag::Duration`, or `Tag::U128Small` and
+ `Tag::U128Object`, or `Tag::I128Small` and `Tag::I128Object`, or
+ `Tag::U256Small` and `Tag::U256Object`, or `Tag::I256Small` and
+ `Tag::I256Object`, a small-value case and large-value case of the same
+ underlying numeric type are compared in terms of that underlying numeric
+ type.
+ - Similarly if either has tag `Tag::SymbolSmall` and the other has tag
+ `Tag::SymbolObject`, both are compared lexicographically as the underlying
+ sequence of characters in each symbol.
+ - Otherwise some object type and an unrelated non-object type are being
+ compared, so their actual values are ignored and they are compared by the
+ numerical value of the `SCValType` of the un-refined XDR `SCVal` type they
+ represent (i.e. both `Tag::I64Small` and `Tag::I64Object` are projected to
+ their `SCValType` `SCV_I64` for numerical code-comparison with the
+ `SCValType` of the other value).
#### Validity
-The following additional validity constraints are imposed on the XDR types. Values not conforming to these constraints are rejected during conversion to host form:
+The following additional validity constraints are imposed on the XDR types.
+Values not conforming to these constraints are rejected during conversion to
+host form:
- - `SCVal.sym` must consist only of the characters `[_0-9A-Za-z]` and be no longer than `SCSYMBOL_LIMIT` (currently 32 characters).
- - `SCVal.map` and `SCVal.vec` must not be empty (they are optional in the XDR only to enable type-recursion)
- - `SCVal.map` must be populated by `SCMapEntry` pairs in increasing `key`-order, with no duplicate keys.
+- `SCVal.sym` must consist only of the characters `[_0-9A-Za-z]` and be no
+ longer than `SCSYMBOL_LIMIT` (currently 32 characters).
+- `SCVal.map` and `SCVal.vec` must not be empty (they are optional in the XDR
+ only to enable type-recursion)
+- `SCVal.map` must be populated by `SCMapEntry` pairs in increasing
+ `key`-order, with no duplicate keys.
#### Conversion
Conversion from an XDR `SCVal` to a host value `Val` is as follows:
- - The `true` and `false` cases of `SCV_BOOL` are separately encoded as `Val`s with `Tag::True` or `Tag::False`, and zero bodies.
- - The `SCV_VOID` and `SCV_LEDGER_KEY_CONTRACT_INSTANCE` cases are encoded as `Val`s with `Tag::Void` and `Tag::LedgerKeyContractInstance`, respectively, and zero bodies.
- - The `SCV_ERROR` case is encoded as a `Val` with `Tag::Error`, with the `SCErrorType` stored in the `Val`'s minor component and the major component either storing:
- - The `uint32` in the `contractCode` field, if the `SCError` is in case `SCE_CONTRACT`
- - Otherwise the numeric value of the `SCErrorCode` in the `code` field of all other `SCE_*` cases.
- - Case `SCV_U32` is encoded as a `Val` with `Tag::U32`, with the `u32` field stored in its major component.
- - Case `SCV_I32` is encoded as a `Val` with `Tag::I32`, with the `i32` field stored in its major component.
- - Cases `SCV_U64`, `SCV_TIMEPOINT`, `SCV_DURATION`, `SCV_U128`, `SCV_U256` are encoded by first considering whether the underlying numeric value, when considered as an unsigned 64-bit value, fits in 56 bits. If so, it is encoded as a `Val` with `Tag::U64Small`, `Tag::TimepointSmall`, `Tag::DurationSmall`, `Tag::U128Small` or `Tag::U256Small` respectively, with the small unsigned integer value packed into the body. Otherwise they are stored as new host objects and the handle to the object is stored in the major component of a `Val` with `Tag::U64Object`, `Tag::TimepointObject`, `Tag::DurationObject`, `Tag::U128Object` or `Tag::U256Object` respectively.
- - Similarly cases `SCV_I64`, `SCV_I128`, and `SCV_I256` are encoded either as the 56-bit body of `Val`s with their corresponding small value tags `Tag::I64Small`, `Tag::I128Small` or `Tag::I256Small` or as object handles in the 32-bit major component of `Val`s with their corresponding general object tags `Tag::I64Object`, `Tag::I128Object`, `Tag::I256Object` depending on whether thir underlying numeric value, when considered as a signed 64 bit value, can be encoded in 56 bits without data loss.
- - Similarly case `SCV_SYMBOL` is bit-packed as 6 bit codes (as described above) in the body of a `Val` with `Tag::SymbolSmall` if the symbols length is 9 characters or less, otherwise it's stored as a new host object with its handle stored in the major component of a `Val` with `Tag::SymbolObject`.
- - Cases `SCV_BYTES`, `SCV_STRING` and `SCV_ADDRESS` are each stored unconditionally as new host object, with the object handle stored as the major component of a `Val` with `Tag::Bytes`, `Tag::String`, `Tag::Map`, `Tag::Vec` and `Tag::Address` respectively. Each `SCVal` contained within the `map` or `vec` components of the container types, they are converted to host values recursively.
- - Case `SCV_VEC` unconditionally stores a new host object, with the object handle stored as the major component of a `Val` with `Tag::Vec`, but only after _recursively_ converting its contained `SCVal`s to `Val`s using the same rules specified here. In other words the host object stores a vector of _converted_ `Val`s, not unconverted `SCVal`s.
- - Similarly case `SCV_MAP` unconditionally stores a new host object, with the object handle stored as the major component of a `Val` with `Tag::Map`, and only after _recursively_ converting its contained `SCMapEntry`s to _pairs_ of `Val`s using the same rules specified here. In other words the host object stores a vector of pairs of _converted_ `Val`s, not unconverted `SCMapEntry`s or `SCVal`s.
- - Cases `SCV_LEDGER_KEY_NONCE` and `SCV_CONTRACT_INSTANCE` are reserved for host-managed storage keys, and are only ever represented in their XDR form. They therefore do not have corresponding cases in `Tag`, so attempted conversion to `Val` fails with an error.
+- The `true` and `false` cases of `SCV_BOOL` are separately encoded as `Val`s
+ with `Tag::True` or `Tag::False`, and zero bodies.
+- The `SCV_VOID` and `SCV_LEDGER_KEY_CONTRACT_INSTANCE` cases are encoded as
+ `Val`s with `Tag::Void` and `Tag::LedgerKeyContractInstance`, respectively,
+ and zero bodies.
+- The `SCV_ERROR` case is encoded as a `Val` with `Tag::Error`, with the
+ `SCErrorType` stored in the `Val`'s minor component and the major component
+ either storing:
+ - The `uint32` in the `contractCode` field, if the `SCError` is in case
+ `SCE_CONTRACT`
+ - Otherwise the numeric value of the `SCErrorCode` in the `code` field of all
+ other `SCE_*` cases.
+- Case `SCV_U32` is encoded as a `Val` with `Tag::U32`, with the `u32` field
+ stored in its major component.
+- Case `SCV_I32` is encoded as a `Val` with `Tag::I32`, with the `i32` field
+ stored in its major component.
+- Cases `SCV_U64`, `SCV_TIMEPOINT`, `SCV_DURATION`, `SCV_U128`, `SCV_U256` are
+ encoded by first considering whether the underlying numeric value, when
+ considered as an unsigned 64-bit value, fits in 56 bits. If so, it is encoded
+ as a `Val` with `Tag::U64Small`, `Tag::TimepointSmall`, `Tag::DurationSmall`,
+ `Tag::U128Small` or `Tag::U256Small` respectively, with the small unsigned
+ integer value packed into the body. Otherwise they are stored as new host
+ objects and the handle to the object is stored in the major component of a
+ `Val` with `Tag::U64Object`, `Tag::TimepointObject`, `Tag::DurationObject`,
+ `Tag::U128Object` or `Tag::U256Object` respectively.
+- Similarly cases `SCV_I64`, `SCV_I128`, and `SCV_I256` are encoded either as
+ the 56-bit body of `Val`s with their corresponding small value tags
+ `Tag::I64Small`, `Tag::I128Small` or `Tag::I256Small` or as object handles in
+ the 32-bit major component of `Val`s with their corresponding general object
+ tags `Tag::I64Object`, `Tag::I128Object`, `Tag::I256Object` depending on
+ whether thir underlying numeric value, when considered as a signed 64 bit
+ value, can be encoded in 56 bits without data loss.
+- Similarly case `SCV_SYMBOL` is bit-packed as 6 bit codes (as described above)
+ in the body of a `Val` with `Tag::SymbolSmall` if the symbols length is 9
+ characters or less, otherwise it's stored as a new host object with its
+ handle stored in the major component of a `Val` with `Tag::SymbolObject`.
+- Cases `SCV_BYTES`, `SCV_STRING` and `SCV_ADDRESS` are each stored
+ unconditionally as new host object, with the object handle stored as the
+ major component of a `Val` with `Tag::Bytes`, `Tag::String`, `Tag::Map`,
+ `Tag::Vec` and `Tag::Address` respectively. Each `SCVal` contained within the
+ `map` or `vec` components of the container types, they are converted to host
+ values recursively.
+- Case `SCV_VEC` unconditionally stores a new host object, with the object
+ handle stored as the major component of a `Val` with `Tag::Vec`, but only
+ after _recursively_ converting its contained `SCVal`s to `Val`s using the
+ same rules specified here. In other words the host object stores a vector of
+ _converted_ `Val`s, not unconverted `SCVal`s.
+- Similarly case `SCV_MAP` unconditionally stores a new host object, with the
+ object handle stored as the major component of a `Val` with `Tag::Map`, and
+ only after _recursively_ converting its contained `SCMapEntry`s to _pairs_ of
+ `Val`s using the same rules specified here. In other words the host object
+ stores a vector of pairs of _converted_ `Val`s, not unconverted `SCMapEntry`s
+ or `SCVal`s.
+- Cases `SCV_LEDGER_KEY_NONCE` and `SCV_CONTRACT_INSTANCE` are reserved for
+ host-managed storage keys, and are only ever represented in their XDR form.
+ They therefore do not have corresponding cases in `Tag`, so attempted
+ conversion to `Val` fails with an error.
Conversion from a host value `Val` to an XDR `SCVal` is as follows:
- - `Val`s with `Tag::True` or `Tag::False` are encoded as booleans in `SCVal` case `SCV_BOOL`
- - `Val`s with `Tag::Void` and `Tag::LedgerKeyContractInstance` are encoded as the void `SCVal` cases `SCV_VOID` and `SCV_LEDGER_KEY_CONTRACT_INSTANCE`, respectively.
- - `Val`s with case `Tag::Error` are encoded as case `SCV_ERROR` with `SCError` cases chosen by the `Val`'s major component interpreted as an `SCErrorType`:
- - In case `SCE_CONTRACT`, the minor component becomes the `uint32` field `contractCode`
- - In all other `SCE_*` cases, the minor component becomes the `SCErrorCode` field `code`
- - `Val`s with `Tag::U32` are encoded as case `SCV_U32` with the `u32` field taken from the `Val`'s major component interpreted as an unsigned 32-bit integer.
- - `Val`s with `Tag::I32` are encoded as case `SCV_U32` with the `i32` field taken from the `Val`'s major component interpreted as an signed 32-bit integer.
- - `Val`s with `Tag::U64Small`, `Tag::TimepointSmall`, `Tag::DurationSmall`, `Tag::U128Small`, or `Tag::U256Small` are encoded as `SCV_U64`, `SCV_TIMEPOINT`, `SCV_DURATION`, `SCV_U128` and `SCV_U256` with their numeric values taken from the `Val`'s body interpreted as an unsigned 64-bit integer.
- - Similarly, `Val`s with `Tag::I64Small`, `Tag::I128Small`, or `Tag::I256Small` are encoded as `SCV_I64`, `SCV_I128` and `SCV_I256` with their numeric values taken from the `Val`'s body interpreted as a signed 64-bit integer.
- - `Val`s with `Tag::SymbolSmall` are encoded as `SCV_SYMBOL` with characters extracted from the sequence of characters bit-packed into the body of the `Val`.
- - `Val`s that encode object handles are dereferenced and the underlying object is converted back to its unique `SCVal` case: `Tag::U64Object` to `SCV_U64`, `Tag::I64Object` to `SCV_I64`, `Tag::TimepointObject` to `SCV_TIMEPOINT`, `Tag::DurationObject` to `SCV_DURATION`, `Tag::U128Object` to `SCV_U128`, `Tag::I128Object` to `SCV_I128`, `Tag::U256Object` to `SCV_U256`, `Tag::I256Object` to `SCV_I256`, `Tag::SymbolObject` to `SCV_SYMBOL`, `Tag::BytesObject` to `SCV_BYTES`, `Tag::StringObject` to `SCV_STRING`, `Tag::VecObject` to `SCV_VEC`, `Tag::MapObject` to `SCV_MAP`, and `Tag::Address` to `SCV_ADDRESS`. As with conversion into `Val`, converting the container types `Tag::Vec` and `Tag::Map` back to `SCVal`s first recursively convert their contained `Val` elements to `SCVal`s, using the same rules described here.
+
+- `Val`s with `Tag::True` or `Tag::False` are encoded as booleans in `SCVal`
+ case `SCV_BOOL`
+- `Val`s with `Tag::Void` and `Tag::LedgerKeyContractInstance` are encoded as
+ the void `SCVal` cases `SCV_VOID` and `SCV_LEDGER_KEY_CONTRACT_INSTANCE`,
+ respectively.
+- `Val`s with case `Tag::Error` are encoded as case `SCV_ERROR` with `SCError`
+ cases chosen by the `Val`'s major component interpreted as an `SCErrorType`:
+ - In case `SCE_CONTRACT`, the minor component becomes the `uint32` field
+ `contractCode`
+ - In all other `SCE_*` cases, the minor component becomes the `SCErrorCode`
+ field `code`
+- `Val`s with `Tag::U32` are encoded as case `SCV_U32` with the `u32` field
+ taken from the `Val`'s major component interpreted as an unsigned 32-bit
+ integer.
+- `Val`s with `Tag::I32` are encoded as case `SCV_U32` with the `i32` field
+ taken from the `Val`'s major component interpreted as an signed 32-bit
+ integer.
+- `Val`s with `Tag::U64Small`, `Tag::TimepointSmall`, `Tag::DurationSmall`,
+ `Tag::U128Small`, or `Tag::U256Small` are encoded as `SCV_U64`,
+ `SCV_TIMEPOINT`, `SCV_DURATION`, `SCV_U128` and `SCV_U256` with their numeric
+ values taken from the `Val`'s body interpreted as an unsigned 64-bit integer.
+- Similarly, `Val`s with `Tag::I64Small`, `Tag::I128Small`, or `Tag::I256Small`
+ are encoded as `SCV_I64`, `SCV_I128` and `SCV_I256` with their numeric values
+ taken from the `Val`'s body interpreted as a signed 64-bit integer.
+- `Val`s with `Tag::SymbolSmall` are encoded as `SCV_SYMBOL` with characters
+ extracted from the sequence of characters bit-packed into the body of the
+ `Val`.
+- `Val`s that encode object handles are dereferenced and the underlying object
+ is converted back to its unique `SCVal` case: `Tag::U64Object` to `SCV_U64`,
+ `Tag::I64Object` to `SCV_I64`, `Tag::TimepointObject` to `SCV_TIMEPOINT`,
+ `Tag::DurationObject` to `SCV_DURATION`, `Tag::U128Object` to `SCV_U128`,
+ `Tag::I128Object` to `SCV_I128`, `Tag::U256Object` to `SCV_U256`,
+ `Tag::I256Object` to `SCV_I256`, `Tag::SymbolObject` to `SCV_SYMBOL`,
+ `Tag::BytesObject` to `SCV_BYTES`, `Tag::StringObject` to `SCV_STRING`,
+ `Tag::VecObject` to `SCV_VEC`, `Tag::MapObject` to `SCV_MAP`, and
+ `Tag::Address` to `SCV_ADDRESS`. As with conversion into `Val`, converting
+ the container types `Tag::Vec` and `Tag::Map` back to `SCVal`s first
+ recursively convert their contained `Val` elements to `SCVal`s, using the
+ same rules described here.
## Design Rationale
### Rationale for Wasm
-WebAssembly was chosen as a basis for this CAP after extensive evaluation of alternative virtual machines. See ["choosing wasm"](https://www.stellar.org/blog/project-jump-cannon-choosing-wasm) for details, or the underlying [stack selection criteria](https://docs.google.com/document/d/1ggXNHVas-PpazfOY87nAz2TiAjH4MkUqHnASR02C6xg/edit#heading=h.p25wrykk29al) document.
+WebAssembly was chosen as a basis for this CAP after extensive evaluation of
+alternative virtual machines. See
+["choosing wasm"](https://www.stellar.org/blog/project-jump-cannon-choosing-wasm)
+for details, or the underlying
+[stack selection criteria](https://docs.google.com/document/d/1ggXNHVas-PpazfOY87nAz2TiAjH4MkUqHnASR02C6xg/edit#heading=h.p25wrykk29al)
+document.
Relative to requirements listed in this CAP, Wasm addresses many of them:
- - Secure:
- - Resource limits: Wasm has good (though not ideal) mechanisms for enforcing resource limits.
- - VM escape and side channels: Wasm is designed as a secure sandbox and has a good security track record so far.
- - Well-defined:
- - Wasm has a rigorous formal semantics and conformance testsuite, it is well specified.
- - Wasm's nondeterminism is narrowly circumscribed and this CAP excludes all cases.
- - Performance:
- - Code size: Wasm code is compact but low level, risks being large. The host-centric data model in this CAP minimizes code size.
- - Optimization: stock compilers emit efficient Wasm code.
- - Interoperable:
- - Multi-language: many PLs have at least preliminary Wasm target support, though only a few are mature enough to use.
- - Tool maturity: languages targeting Wasm -- especially Rust -- have high quality, mature tools.
- - Simple:
- - Non-novelty: Wasm is a complete, mature, well-supported spec with many off-the-shelf implementations to choose from.
- - Compatibility: many Wasm interpreters are written in C++ and/or Rust, can be embedded easily in stellar-core.
- - Learnability: Wasm is not as familiar as EVM but is relatively widely known and appears easy to learn.
+- Secure:
+ - Resource limits: Wasm has good (though not ideal) mechanisms for enforcing
+ resource limits.
+ - VM escape and side channels: Wasm is designed as a secure sandbox and has a
+ good security track record so far.
+- Well-defined:
+ - Wasm has a rigorous formal semantics and conformance testsuite, it is well
+ specified.
+ - Wasm's nondeterminism is narrowly circumscribed and this CAP excludes all
+ cases.
+- Performance:
+ - Code size: Wasm code is compact but low level, risks being large. The
+ host-centric data model in this CAP minimizes code size.
+ - Optimization: stock compilers emit efficient Wasm code.
+- Interoperable:
+ - Multi-language: many PLs have at least preliminary Wasm target support,
+ though only a few are mature enough to use.
+ - Tool maturity: languages targeting Wasm -- especially Rust -- have high
+ quality, mature tools.
+- Simple:
+ - Non-novelty: Wasm is a complete, mature, well-supported spec with many
+ off-the-shelf implementations to choose from.
+ - Compatibility: many Wasm interpreters are written in C++ and/or Rust, can
+ be embedded easily in stellar-core.
+ - Learnability: Wasm is not as familiar as EVM but is relatively widely known
+ and appears easy to learn.
### Rationale for host value / host object split
-The split between host value types (`Val`s that can traverse the host/guest interface) and host objects (that remain on the host side, are identified only by handles, and are managed by host functions) is justified as a response to a number of observations we made when considering existing blockchains:
-
- - Many systems spend a lot of guest code footprint (time and space) implementing data serialization and deserialization to and from opaque byte arrays. This code suffers from a variety of problems:
- - It is often to and from an opaque, non-standard or contract-specific format, making a contract's data difficult to browse or debug, and making SDKs that invoke contracts need to carry special code to serialize and deserialize data for the contract.
- - It is often coupled to a specific version or layout of a data structure, such that data cannot be easily be migrated between versions of a contract.
- - It requires that a contract potentially contains extra copies of serialization support code for the formats used by any contracts it calls.
- - It is often intermixed with argument processing and contract logic, representing a significant class of security problems in contracts.
- - It is usually unshared code: each contract implements its own copy of serialization and deserialization, and does so inefficiently in the guest rather than efficiently on the host.
-
- - Similarly, when guest code is CPU-intensive it is often performing numerical or cryptographic operations which would be better supported by a common library of efficient (native) host functions.
-
- - As of this writing, Wasm defines no standardized, mature, widely-supported mechanism of directly sharing code, which makes it impossible to reuse common guest functions needed by many contracts. Possibly in the future the [Wasm component model](https://github.com/WebAssembly/component-model) may present such a mechanism for sharing code between modules, but at present it is still incomplete and not widely implemented. Sharing common host functions is comparatively straightforward, and much more so if we define a common data model on which host functions operate.
-
- - The more time is spent in the guest, the more the overall system performance depends directly on the speed of the guest VM's bytecode-dispatch mechanism (a.k.a. the VM's "inner loop"). By contrast, if the guest VM spends most of its time making a sequence of host calls, the bytecode-dispatch speed of the guest VM is less of a concern. This gives us much more flexibility in choice of VM, for example to choose simple, low-latency and comparatively-secure interpreters rather than complex, high-latency and fragile JITs.
-
-Some systems mitigate these issues by providing byte-buffers of data to guests in a guaranteed input format, such as JSON. This eliminates some of the interoperability concerns but none of the efficiency concerns: the guest still spends too much time parsing input and building data structures.
-
-Ultimately we settled on an approach in which the system will spend _as little time in the guest as possible_, and will furnish the guest with a rich enough repertoire of host objects that it should not need many or any of its own guest-local data structures. Our experience suggests that many guests will be able to run without a guest memory allocator at all.
-
-There are various costs and benefits to this strategy. We compared in detail to many other blockchains with different approaches before settling on this one.
+The split between host value types (`Val`s that can traverse the host/guest
+interface) and host objects (that remain on the host side, are identified only
+by handles, and are managed by host functions) is justified as a response to a
+number of observations we made when considering existing blockchains:
+
+- Many systems spend a lot of guest code footprint (time and space)
+ implementing data serialization and deserialization to and from opaque byte
+ arrays. This code suffers from a variety of problems:
+ - It is often to and from an opaque, non-standard or contract-specific
+ format, making a contract's data difficult to browse or debug, and making
+ SDKs that invoke contracts need to carry special code to serialize and
+ deserialize data for the contract.
+ - It is often coupled to a specific version or layout of a data structure,
+ such that data cannot be easily be migrated between versions of a contract.
+ - It requires that a contract potentially contains extra copies of
+ serialization support code for the formats used by any contracts it calls.
+ - It is often intermixed with argument processing and contract logic,
+ representing a significant class of security problems in contracts.
+ - It is usually unshared code: each contract implements its own copy of
+ serialization and deserialization, and does so inefficiently in the guest
+ rather than efficiently on the host.
+
+- Similarly, when guest code is CPU-intensive it is often performing numerical
+ or cryptographic operations which would be better supported by a common
+ library of efficient (native) host functions.
+
+- As of this writing, Wasm defines no standardized, mature, widely-supported
+ mechanism of directly sharing code, which makes it impossible to reuse common
+ guest functions needed by many contracts. Possibly in the future the
+ [Wasm component model](https://github.com/WebAssembly/component-model) may
+ present such a mechanism for sharing code between modules, but at present it
+ is still incomplete and not widely implemented. Sharing common host functions
+ is comparatively straightforward, and much more so if we define a common data
+ model on which host functions operate.
+
+- The more time is spent in the guest, the more the overall system performance
+ depends directly on the speed of the guest VM's bytecode-dispatch mechanism
+ (a.k.a. the VM's "inner loop"). By contrast, if the guest VM spends most of
+ its time making a sequence of host calls, the bytecode-dispatch speed of the
+ guest VM is less of a concern. This gives us much more flexibility in choice
+ of VM, for example to choose simple, low-latency and comparatively-secure
+ interpreters rather than complex, high-latency and fragile JITs.
+
+Some systems mitigate these issues by providing byte-buffers of data to guests
+in a guaranteed input format, such as JSON. This eliminates some of the
+interoperability concerns but none of the efficiency concerns: the guest still
+spends too much time parsing input and building data structures.
+
+Ultimately we settled on an approach in which the system will spend _as little
+time in the guest as possible_, and will furnish the guest with a rich enough
+repertoire of host objects that it should not need many or any of its own
+guest-local data structures. Our experience suggests that many guests will be
+able to run without a guest memory allocator at all.
+
+There are various costs and benefits to this strategy. We compared in detail to
+many other blockchains with different approaches before settling on this one.
Costs:
- - Larger host-object API attack surface to defend.
- - Larger host-object API compatibility surface to maintain.
- - More challenging task to quantify memory and CPU costs.
- - More specification work to do defining host interface.
- - Risks redundant work, guest _may_ choose to ignore host objects.
+
+- Larger host-object API attack surface to defend.
+- Larger host-object API compatibility surface to maintain.
+- More challenging task to quantify memory and CPU costs.
+- More specification work to do defining host interface.
+- Risks redundant work, guest _may_ choose to ignore host objects.
Benefits:
- - Much faster execution due to more logic being in natively-compiled host Rust code.
- - Smaller guest input-parsing attack surfaces to defend.
- - Smaller guest data compatibility surfaces to maintain.
- - Much smaller guest code, minimizing storage and instantiation costs:
- - Little or no code to serialize or deserialize data in guest.
- - Little or no common memory-management or data structure code in guest.
- - Auxiliary benefits from common data model:
- - Easier to browse contract data by 3rd party tools.
- - Easier to debug contracts by inspecting state.
- - Easier to test contracts by generating / capturing data.
- - Easier to pass data from one contract to another.
- - Easier to use same data model from different source languages.
-
-It is especially important to note that the (enlarged) attack and maintenance surfaces on the host are costs borne by Soroban's developers, while the (diminished) attack and maintenance surfaces are benefits that accrue to smart contract developers. We believe this is a desirable balance of costs and benefits, as contract developers are likely to significantly outnumber Soroban developers.
+
+- Much faster execution due to more logic being in natively-compiled host Rust
+ code.
+- Smaller guest input-parsing attack surfaces to defend.
+- Smaller guest data compatibility surfaces to maintain.
+- Much smaller guest code, minimizing storage and instantiation costs:
+ - Little or no code to serialize or deserialize data in guest.
+ - Little or no common memory-management or data structure code in guest.
+- Auxiliary benefits from common data model:
+ - Easier to browse contract data by 3rd party tools.
+ - Easier to debug contracts by inspecting state.
+ - Easier to test contracts by generating / capturing data.
+ - Easier to pass data from one contract to another.
+ - Easier to use same data model from different source languages.
+
+It is especially important to note that the (enlarged) attack and maintenance
+surfaces on the host are costs borne by Soroban's developers, while the
+(diminished) attack and maintenance surfaces are benefits that accrue to smart
+contract developers. We believe this is a desirable balance of costs and
+benefits, as contract developers are likely to significantly outnumber Soroban
+developers.
### Rationale for value and object type repertoires
These are chosen based on two criteria:
- - Reasonably-foreseeable use in a large number of smart contracts.
- - Widely-available implementations with efficient immutable forms.
+- Reasonably-foreseeable use in a large number of smart contracts.
+- Widely-available implementations with efficient immutable forms.
-In addition, _values_ are constrained by the ability to be packed into a 64-bit tagged disjoint union. Special cases for common small values such as symbols, booleans, integer types and error codes are provided on the basis of presumed utility in a variety of contexts.
+In addition, _values_ are constrained by the ability to be packed into a 64-bit
+tagged disjoint union. Special cases for common small values such as symbols,
+booleans, integer types and error codes are provided on the basis of presumed
+utility in a variety of contexts.
#### Numeric types
-The value repertoire includes **signed and unsigned integer types** as its sole number types:
- - 32 and 64-bit types, as these are standard Wasm types and useful for most purposes
- - 128-bit types, which are natively supported by Rust (the host and guest language Soroban ships with support for). This type is also large enough to act as a very high precision fixed-point number for currency calculations: 19 decimal digits on either side of the decimal point. As this is larger than the standard 18 decimal places used by default by Ethereum's ERC20 token standard, 128-bit integers are used by Soroban's native contract interface as a common type for expressing quantities.
- - 256-bit types, which are useful for two distinct reasons:
- - For interoperation with Ethereum or other 256-bit integer blockchains
- - To store and operate on various cryptographic values as scalars: several hash functions and encryption functions use 256-bit values as inputs or outputs, and it is frequently convenient to perform 256-bit integer-arithmetic or bitwise operations when working with those functions.
-
-Two additional integral-wrapper types -- `Duration` and `TimePoint` -- exist merely for the sake of avoiding errors and meaningful display formatting when working with time values (eg. to hint to a user interface to display a `TimePoint` as `2023-08-24T04:00:18+00:00` rather than `1692874818`). Internally both types are `u64`.
-
-Floating-point arithmetic is disabled in the Wasm VM, and floating-point types are not used anywhere in the `SCVal` value repertoire or the host interface, out of concern for nondeterminism and survey feedback from potential users that they would not be used.
-
-Fixed-point arithmetic functions could potentially be provided in the host, but feedback during development indicated that most users would be doing fixed point calculations with the 128-bit type, which is expected to remain on the guest as a 128-bit guest arithmetic operation costs roughly the same amount of CPU work as a host call. Users are therefore encouraged to simply include their own fixed-point library code in contracts. Some support code for this may be added to the Soroban guest SDK.
+The value repertoire includes **signed and unsigned integer types** as its sole
+number types:
+
+- 32 and 64-bit types, as these are standard Wasm types and useful for most
+ purposes
+- 128-bit types, which are natively supported by Rust (the host and guest
+ language Soroban ships with support for). This type is also large enough to
+ act as a very high precision fixed-point number for currency calculations: 19
+ decimal digits on either side of the decimal point. As this is larger than
+ the standard 18 decimal places used by default by Ethereum's ERC20 token
+ standard, 128-bit integers are used by Soroban's native contract interface as
+ a common type for expressing quantities.
+- 256-bit types, which are useful for two distinct reasons:
+ - For interoperation with Ethereum or other 256-bit integer blockchains
+ - To store and operate on various cryptographic values as scalars: several
+ hash functions and encryption functions use 256-bit values as inputs or
+ outputs, and it is frequently convenient to perform 256-bit
+ integer-arithmetic or bitwise operations when working with those functions.
+
+Two additional integral-wrapper types -- `Duration` and `TimePoint` -- exist
+merely for the sake of avoiding errors and meaningful display formatting when
+working with time values (eg. to hint to a user interface to display a
+`TimePoint` as `2023-08-24T04:00:18+00:00` rather than `1692874818`).
+Internally both types are `u64`.
+
+Floating-point arithmetic is disabled in the Wasm VM, and floating-point types
+are not used anywhere in the `SCVal` value repertoire or the host interface,
+out of concern for nondeterminism and survey feedback from potential users that
+they would not be used.
+
+Fixed-point arithmetic functions could potentially be provided in the host, but
+feedback during development indicated that most users would be doing fixed
+point calculations with the 128-bit type, which is expected to remain on the
+guest as a 128-bit guest arithmetic operation costs roughly the same amount of
+CPU work as a host call. Users are therefore encouraged to simply include their
+own fixed-point library code in contracts. Some support code for this may be
+added to the Soroban guest SDK.
#### Container types
-Implementations of the map and vector object types are based on Rust's standard vector type, are always precisely sized to their data and immutable once constructed. The map type is a sorted vector of key-value pairs that is binary searched during map lookup, but otherwise lacks any advanced structure.
-
-Earlier versions of this CAP suggested the use of container objects with "shared substructure" such as HAMTs, functional red-black trees or RRBs. These were used early in Soroban's development, but it was observed that most host objects were small due to pressure from the persistent storage system and transaction system, and the overhead of objects with shared substructure exceeded the cost of a simpler approach of merely duplicating objects in full every time they are modified. As a result, the simpler approach was adopted.
-
-Containers **are** nonetheless converted from their XDR forms to internal forms. The host's internal form of an `SCVec` is a vector of `Val` host values, each only 64 bits, rather than a vector of arbitrarily large `SCVal`s. Similar the host's internal form of an `SCMap` is a map of pairs of `Val` host values. In both cases this helps minimize the size overhead of the (frequently duplicated) host containers, and simplifies accounting for operations on them, since all `Val`s within them are the same small size.
+Implementations of the map and vector object types are based on Rust's standard
+vector type, are always precisely sized to their data and immutable once
+constructed. The map type is a sorted vector of key-value pairs that is binary
+searched during map lookup, but otherwise lacks any advanced structure.
+
+Earlier versions of this CAP suggested the use of container objects with
+"shared substructure" such as HAMTs, functional red-black trees or RRBs. These
+were used early in Soroban's development, but it was observed that most host
+objects were small due to pressure from the persistent storage system and
+transaction system, and the overhead of objects with shared substructure
+exceeded the cost of a simpler approach of merely duplicating objects in full
+every time they are modified. As a result, the simpler approach was adopted.
+
+Containers **are** nonetheless converted from their XDR forms to internal
+forms. The host's internal form of an `SCVec` is a vector of `Val` host values,
+each only 64 bits, rather than a vector of arbitrarily large `SCVal`s. Similar
+the host's internal form of an `SCMap` is a map of pairs of `Val` host values.
+In both cases this helps minimize the size overhead of the (frequently
+duplicated) host containers, and simplifies accounting for operations on them,
+since all `Val`s within them are the same small size.
#### Buffer types
-Three types in the `SCVal` / `Val` repertoire are all variations on "a byte buffer":
-
- - `Bytes` which carries no implication about its content. This is the most general type.
- - `String` which carries an implication that its content is text in some format (most likely UTF-8 unicode). No structure is _mandated_ for `String` but at a user-interface level it is often helpful to parse and display text differently from general byte sequences.
- - `Symbol` is like `String` but imposes additional constraints: a maximum size of 32 characters, and a repertoire of characters drawn from the set `[a-zA-Z0-9_]`. The size limit is imposed to help support `Symbol`s in guest code without needing a heap allocator. The limited repertoire is chosen for several reasons:
- - It is visually unambiguous in many typefaces, and so reduces the security risks from confusible Unicode codepoints or non-canonical code sequences, which can result in `String`s that "look the same" but contain different bytes.
- - It has only 63 codes, which (combined with a code for null) is small enough to be packed into 6 bits, which in turn enables bit-packing small 9 character XDR `Symbol`s into the body of the `SymbolSmall` case of the host `Val` type, an important space optimization as `Symbol`s are relatively ubiquitous.
- - It is a widely-used repertoire in surveys of the ecosystem and legacy systems: it covers most program identifiers, such as datatype and function names, as well as most asset identifier codes.
+Three types in the `SCVal` / `Val` repertoire are all variations on "a byte
+buffer":
+
+- `Bytes` which carries no implication about its content. This is the most
+ general type.
+- `String` which carries an implication that its content is text in some format
+ (most likely UTF-8 unicode). No structure is _mandated_ for `String` but at a
+ user-interface level it is often helpful to parse and display text
+ differently from general byte sequences.
+- `Symbol` is like `String` but imposes additional constraints: a maximum size
+ of 32 characters, and a repertoire of characters drawn from the set
+ `[a-zA-Z0-9_]`. The size limit is imposed to help support `Symbol`s in guest
+ code without needing a heap allocator. The limited repertoire is chosen for
+ several reasons:
+ - It is visually unambiguous in many typefaces, and so reduces the security
+ risks from confusible Unicode codepoints or non-canonical code sequences,
+ which can result in `String`s that "look the same" but contain different
+ bytes.
+ - It has only 63 codes, which (combined with a code for null) is small enough
+ to be packed into 6 bits, which in turn enables bit-packing small 9
+ character XDR `Symbol`s into the body of the `SymbolSmall` case of the host
+ `Val` type, an important space optimization as `Symbol`s are relatively
+ ubiquitous.
+ - It is a widely-used repertoire in surveys of the ecosystem and legacy
+ systems: it covers most program identifiers, such as datatype and function
+ names, as well as most asset identifier codes.
### Rationale for separate XDR and host forms
-It would be possible to store all data in memory in the host in its XDR format, but we choose instead to define a separate "host form" for both values and objects in this specification for the following reasons:
-
- - In the host form, values are bit-packed in order to fit in exactly 64 bits. This bit-packing is implemented in Rust code in the Soroban host (and _partially_ available to Rust guest code) but many parts of it are host-specific, and quite delicate, and would in any case be undesirable to reimplement in every client SDK and data browser. In the XDR form, the various cases that make up the value union are represented in a standard XDR union, which is automatically supported by many languages' XDR bindings.
-
- - In the host form, objects and values are separated for reasons explained above, and their separation is mediated through object _handles_ and the _host environment_ that maps references to objects. In the XDR form, objects and values are _not_ separated, because they should not be: there is no implicit context in which to resolve handles, and even if there were it would introduce a new category of potential handle-mismatch error in the serialized form to support it. Instead, in the XDR form values _directly contain_ objects.
-
- - As mentioned above, containers in the host form are actually be more efficient and simpler to work with having been converted from containers of XDR `SCVal`s to containers of host `Val`s.
+It would be possible to store all data in memory in the host in its XDR format,
+but we choose instead to define a separate "host form" for both values and
+objects in this specification for the following reasons:
+
+- In the host form, values are bit-packed in order to fit in exactly 64 bits.
+ This bit-packing is implemented in Rust code in the Soroban host (and
+ _partially_ available to Rust guest code) but many parts of it are
+ host-specific, and quite delicate, and would in any case be undesirable to
+ reimplement in every client SDK and data browser. In the XDR form, the
+ various cases that make up the value union are represented in a standard XDR
+ union, which is automatically supported by many languages' XDR bindings.
+
+- In the host form, objects and values are separated for reasons explained
+ above, and their separation is mediated through object _handles_ and the
+ _host environment_ that maps references to objects. In the XDR form, objects
+ and values are _not_ separated, because they should not be: there is no
+ implicit context in which to resolve handles, and even if there were it would
+ introduce a new category of potential handle-mismatch error in the serialized
+ form to support it. Instead, in the XDR form values _directly contain_
+ objects.
+
+- As mentioned above, containers in the host form are actually be more
+ efficient and simpler to work with having been converted from containers of
+ XDR `SCVal`s to containers of host `Val`s.
### Rationale for immutable objects
-We considered the potential costs and benefits of immutable objects, and decided in favor of them.
+We considered the potential costs and benefits of immutable objects, and
+decided in favor of them.
Costs:
- - More memory allocation.
- - Risk of referring to an old/stale object rather than a fresh/new one.
+
+- More memory allocation.
+- Risk of referring to an old/stale object rather than a fresh/new one.
Benefits:
- - Reduced risk of error through mutating a shared object.
- - Stable total order, for using structured values as map keys.
- - Simple model of security: no covert channels, only passed values.
- - Simple model for transactions: discard objects on rollback.
-Since we expect smart contracts to run to completion very quickly, and then free all objects allocated, we do not consider the additional memory allocation cost a likely problem in practice. Furthermore as mentioned in the object-repertoire rationale above, most objects are small.
+- Reduced risk of error through mutating a shared object.
+- Stable total order, for using structured values as map keys.
+- Simple model of security: no covert channels, only passed values.
+- Simple model for transactions: discard objects on rollback.
-Therefore the only real risk we foresee is the increased risk of unintentionally referring to an old/stale object, and we believe this is outweighed by the reduced risk of unintentionally referring to a shared mutable object that it mutated through an alias.
+Since we expect smart contracts to run to completion very quickly, and then
+free all objects allocated, we do not consider the additional memory allocation
+cost a likely problem in practice. Furthermore as mentioned in the
+object-repertoire rationale above, most objects are small.
+Therefore the only real risk we foresee is the increased risk of
+unintentionally referring to an old/stale object, and we believe this is
+outweighed by the reduced risk of unintentionally referring to a shared mutable
+object that it mutated through an alias.
## Protocol Upgrade Transition
-The initial protocol upgrade to enable Soroban is outside the scope of this CAP, as it will simply enable Soroban transaction types where no previous Soroban transactions were allowed.
-
-Subsequent protocol upgrades must be carefully managed to ensure compatibility. Specifically the following mechanisms will assist in maintaining compatibility across upgrades:
-
- 1. Every contract must carry a custom Wasm section called `contractenvmetav0`. This section must contain the serialized bytes of a sequence of the XDR type `SCEnvMetaEntry` which is a union switching on `SCEnvMetaEntryKind` that, initially, only contains a single possible case `SC_ENV_META_KIND_INTERFACE_VERSION`. This carries a `uint64` that defines an "interface version" of the contract, which encodes both a protocol version number (in the high 32 bits) and a prerelease number (in the low 32 bits). The prerelease number is only meaningful during Soroban's development and must be zero once Soroban is enabled. The SDK currently arranges to include this information automatically, based on the version of the Rust `soroban-env-common` crate it is compiled against.
- 2. A contract's protocol number indicates the minimum required protocol for a contract to run, and is checked by the host when instantiating the contract: instantiating a contract with an unsupported protocol number results in an error before execution.
- 3. Extensions to the host interface will always be accompanied by a protocol change. This allows contracts to be deployed before they are fully supported, and to activate only when the network votes to support new features.
- 4. If the host needs to intentionally deprecate or change the behaviour of any host function or any other aspect of the host interface, it should also accompany this change with a protocol change. Since historical ledgers always specify the protocol number they were recorded under, marking different ledgers with different protocols is the intended (and only reliable) way to enable the host to switch between different forms of logic, replaying old ledgers on old backward-compatibility logic and new ledgers on new logic.
- 5. To minimize the risk of _unintentional_ changes to the host's logic (and divergence among versions) entering the network due to, say, periodic software maintenance and dependency updates, the host is designed to support (and stellar-core is equipped to provide) _multiversioning_: to embed two full copies of the entire transitive tree of software dependencies of the host in process simultaneously, and to "switch over" between one version and another instantaneously, during a protocol upgrade. This allows delaying and then grouping together "all potentially risky" changes to dependencies until the next protocol-upgrade boundary, and then deploying them all simultaneously across the network. In other words, it is expected that the Soroban host will remain relatively static between protocol versions, only taking very minor updates that we have high certainty in the identical observable semantics of.
-
- The process of safely upgrading the network with Soroban enabled is described in more detail in [this document inside the stellar-core repository](https://github.com/stellar/stellar-core/blob/master/docs/versioning-soroban.md).
+The initial protocol upgrade to enable Soroban is outside the scope of this
+CAP, as it will simply enable Soroban transaction types where no previous
+Soroban transactions were allowed.
+
+Subsequent protocol upgrades must be carefully managed to ensure compatibility.
+Specifically the following mechanisms will assist in maintaining compatibility
+across upgrades:
+
+1. Every contract must carry a custom Wasm section called `contractenvmetav0`.
+ This section must contain the serialized bytes of a sequence of the XDR type
+ `SCEnvMetaEntry` which is a union switching on `SCEnvMetaEntryKind` that,
+ initially, only contains a single possible case
+ `SC_ENV_META_KIND_INTERFACE_VERSION`. This carries a `uint64` that defines
+ an "interface version" of the contract, which encodes both a protocol
+ version number (in the high 32 bits) and a prerelease number (in the low 32
+ bits). The prerelease number is only meaningful during Soroban's development
+ and must be zero once Soroban is enabled. The SDK currently arranges to
+ include this information automatically, based on the version of the Rust
+ `soroban-env-common` crate it is compiled against.
+2. A contract's protocol number indicates the minimum required protocol for a
+ contract to run, and is checked by the host when instantiating the contract:
+ instantiating a contract with an unsupported protocol number results in an
+ error before execution.
+3. Extensions to the host interface will always be accompanied by a protocol
+ change. This allows contracts to be deployed before they are fully
+ supported, and to activate only when the network votes to support new
+ features.
+4. If the host needs to intentionally deprecate or change the behaviour of any
+ host function or any other aspect of the host interface, it should also
+ accompany this change with a protocol change. Since historical ledgers
+ always specify the protocol number they were recorded under, marking
+ different ledgers with different protocols is the intended (and only
+ reliable) way to enable the host to switch between different forms of logic,
+ replaying old ledgers on old backward-compatibility logic and new ledgers on
+ new logic.
+5. To minimize the risk of _unintentional_ changes to the host's logic (and
+ divergence among versions) entering the network due to, say, periodic
+ software maintenance and dependency updates, the host is designed to support
+ (and stellar-core is equipped to provide) _multiversioning_: to embed two
+ full copies of the entire transitive tree of software dependencies of the
+ host in process simultaneously, and to "switch over" between one version and
+ another instantaneously, during a protocol upgrade. This allows delaying and
+ then grouping together "all potentially risky" changes to dependencies until
+ the next protocol-upgrade boundary, and then deploying them all
+ simultaneously across the network. In other words, it is expected that the
+ Soroban host will remain relatively static between protocol versions, only
+ taking very minor updates that we have high certainty in the identical
+ observable semantics of.
+
+The process of safely upgrading the network with Soroban enabled is described
+in more detail in
+[this document inside the stellar-core repository](https://github.com/stellar/stellar-core/blob/master/docs/versioning-soroban.md).
### Backwards Incompatibilities
+
This CAP does not introduce any backward incompatibilities.
### Resource Utilization
+
TBD. Performance evaluation is ongoing on in-progress implementation.
## Security Concerns
-In order to describe the security implications of this CAP we use the [STRIDE](https://en.wikipedia.org/wiki/STRIDE_(security)) methodology. This is a common framework used in the industry to identify security threats.
-For each categories we use attack scenarios to better explain the threat.
-- **Spoofing**: Attackers are able to let the system believe they are privileged users
- - A logical vulnerability exists in the Wasm code of the smart contract and lets a standard user perform privileged tasks
- - A logical vulnerability exists in a host function and leads to a failure in access control checks
-- **Tampering**: Attackers are able to modify unauthorized data in the ledger database
- - A write-anywhere vulnerability exists in the Wasm interpreter. A specially crafted Wasm code triggers this bug and lets a user write custom data in the host memory which then get reflected in the database
- - A write-anywhere vulnerability exists in a host function. A smart-contract code calls the vulnerable host function and triggers the vulnerability. A user calls the smart-contract and uses it to write custom data in the host memory or directly in the database
- - A logical vulnerability exists in the implementation of the serialization and deserialization of the data model. A smart-contract code instantiates specific objects on the host side and triggers the vulnerable part of the serializer to tamper with the data saved in the database
+
+In order to describe the security implications of this CAP we use the
+[STRIDE]() methodology. This
+is a common framework used in the industry to identify security threats. For
+each categories we use attack scenarios to better explain the threat.
+
+- **Spoofing**: Attackers are able to let the system believe they are
+ privileged users
+ - A logical vulnerability exists in the Wasm code of the smart contract and
+ lets a standard user perform privileged tasks
+ - A logical vulnerability exists in a host function and leads to a failure in
+ access control checks
+- **Tampering**: Attackers are able to modify unauthorized data in the ledger
+ database
+ - A write-anywhere vulnerability exists in the Wasm interpreter. A specially
+ crafted Wasm code triggers this bug and lets a user write custom data in
+ the host memory which then get reflected in the database
+ - A write-anywhere vulnerability exists in a host function. A smart-contract
+ code calls the vulnerable host function and triggers the vulnerability. A
+ user calls the smart-contract and uses it to write custom data in the host
+ memory or directly in the database
+ - A logical vulnerability exists in the implementation of the serialization
+ and deserialization of the data model. A smart-contract code instantiates
+ specific objects on the host side and triggers the vulnerable part of the
+ serializer to tamper with the data saved in the database
- **Repudiation**: _Not applicable here_
-- **Information disclosure**: Attackers are able to access unauthorized information on the validators (secret seed for example), on the ledger database (other smart contract data) or guest memory data from another contract:
- - A read-anywhere vulnerability exists in the Wasm interpreter. A specially crafted Wasm code triggers this vulnerability and lets a user read custom data in the host memory
- - A read-anywhere vulnerability exists in a host function. A smart-contract code calls the vulnerable host function and triggers the vulnerability. A user calls the smart-contract and uses it to read custom data in the host memory
- - During a smart contract execution a function from another smart contract is called. This call exploits a read-anywhere vulnerability in the access control checks of new contract data. This result in the caller contract being able to programmatically access the data of the callee contract. This is an issue for contracts like Oracles.
+- **Information disclosure**: Attackers are able to access unauthorized
+ information on the validators (secret seed for example), on the ledger
+ database (other smart contract data) or guest memory data from another
+ contract:
+ - A read-anywhere vulnerability exists in the Wasm interpreter. A specially
+ crafted Wasm code triggers this vulnerability and lets a user read custom
+ data in the host memory
+ - A read-anywhere vulnerability exists in a host function. A smart-contract
+ code calls the vulnerable host function and triggers the vulnerability. A
+ user calls the smart-contract and uses it to read custom data in the host
+ memory
+ - During a smart contract execution a function from another smart contract is
+ called. This call exploits a read-anywhere vulnerability in the access
+ control checks of new contract data. This result in the caller contract
+ being able to programmatically access the data of the callee contract. This
+ is an issue for contracts like Oracles.
- **Denial of Service**: Network halts because consensus cannot be reached
- - A logical vulnerability exists in the implementation which validates that only deterministic Wasm code is executed. A specially crafted Wasm code triggers this vulnerability and creates nondeterminism across the network
- - A logical vulnerability exists in the implementation which compute the amount of gas needed to execute a smart-contract code. A smart-contract code exploits this vulnerability and requires too many computing resources for the validators, preventing them to close the ledger in an acceptable time frame
-- **Elevation of privilege**: Attackers are able to execute non authorized code on the validators
- - A code execution vulnerability exists in the Wasm interpreter. A specially crafted Wasm code triggers this vulnerability and lets a user execute code within the host context (stellar-core process)
- - A code execution vulnerability exists in a host function. A smart-contract code calls the vulnerable host function and triggers the vulnerability. A user calls the smart-contract and uses it to execute code within the host context (stellar-core process)
-
+ - A logical vulnerability exists in the implementation which validates that
+ only deterministic Wasm code is executed. A specially crafted Wasm code
+ triggers this vulnerability and creates nondeterminism across the network
+ - A logical vulnerability exists in the implementation which compute the
+ amount of gas needed to execute a smart-contract code. A smart-contract
+ code exploits this vulnerability and requires too many computing resources
+ for the validators, preventing them to close the ledger in an acceptable
+ time frame
+- **Elevation of privilege**: Attackers are able to execute non authorized code
+ on the validators
+ - A code execution vulnerability exists in the Wasm interpreter. A specially
+ crafted Wasm code triggers this vulnerability and lets a user execute code
+ within the host context (stellar-core process)
+ - A code execution vulnerability exists in a host function. A smart-contract
+ code calls the vulnerable host function and triggers the vulnerability. A
+ user calls the smart-contract and uses it to execute code within the host
+ context (stellar-core process)
## Test Cases
+
TBD. See in-progress implementation.
## Implementation
An implementation is provided in two parts:
- 1. The [rs-soroban-env repository](https://github.com/stellar/rs-soroban-env) which contains three Rust crates defining:
- - `soroban-env-host`: a Rust implementation of the host environment
- - `soroban-env-guest`: a Rust interface for Rust guest code to interact with the host environment
- - `soroban-env-common`: a set of definitions common to both
- 2. The [stellar-core repository](https://github.com/stellar/stellar-core/) which contains (by reference) the XDR definitions above and provides an embedding of the `soroban-env-host` crate inside `stellar-core`.
+1. The [rs-soroban-env repository](https://github.com/stellar/rs-soroban-env)
+ which contains three Rust crates defining:
+ - `soroban-env-host`: a Rust implementation of the host environment
+ - `soroban-env-guest`: a Rust interface for Rust guest code to interact with
+ the host environment
+ - `soroban-env-common`: a set of definitions common to both
+
+2. The [stellar-core repository](https://github.com/stellar/stellar-core/)
+ which contains (by reference) the XDR definitions above and provides an
+ embedding of the `soroban-env-host` crate inside `stellar-core`.
diff --git a/core/cap-0046-02.md b/core/cap-0046-02.md
index 4e2239729..af32b83f3 100644
--- a/core/cap-0046-02.md
+++ b/core/cap-0046-02.md
@@ -24,8 +24,8 @@ See the Soroban overview CAP.
## Abstract
-Users need a way to manage smart contracts on the network. This CAP allows users
-to deploy the smart contracts to the network and specifies the supported
+Users need a way to manage smart contracts on the network. This CAP allows
+users to deploy the smart contracts to the network and specifies the supported
contract code kinds.
## Specification
@@ -40,27 +40,28 @@ See the XDR diffs in the Soroban overview CAP, specifically those referring to
### Contract structure
-This defines the terms we use in the following sections without going into their
-design and implementation details.
+This defines the terms we use in the following sections without going into
+their design and implementation details.
#### Contract source
-Contract source can be thought of as a 'class' of a contract. Multiple contracts
-can share the same source, but have their own state. Thanks to that sharing
-capability, we can reduce the amount of duplication in ledger and only store
-unique contract sources.
+Contract source can be thought of as a 'class' of a contract. Multiple
+contracts can share the same source, but have their own state. Thanks to that
+sharing capability, we can reduce the amount of duplication in ledger and only
+store unique contract sources.
This CAP defines two possible kinds of contract sources:
-- Wasm source: a blob of Wasm code that is stored in a separate ledger entry and
- is deduplicated based on contents. This is uploaded to ledger by the users.
+- Wasm source: a blob of Wasm code that is stored in a separate ledger entry
+ and is deduplicated based on contents. This is uploaded to ledger by the
+ users.
- Built-in contract: this is a 'source' compiled into host directly that has a
protocol-defined interface and behavior.
#### Contract executable
-The Contract executable contains a pointer to the Wasm source or a tag of a built-in
-contract.
+The Contract executable contains a pointer to the Wasm source or a tag of a
+built-in contract.
#### Contract instance
@@ -68,8 +69,8 @@ Contract instance can be thought of as an instance of the contract 'class'.
Contract instance consists of:
- A `ContractExecutable`.
-- An optional `SCMap*` for users to store data
-that should be tied to the lifetime of the instance (./cap-0046-02.md).
+- An optional `SCMap*` for users to store data that should be tied to the
+ lifetime of the instance (./cap-0046-02.md).
A contract instance may own an arbitrary amount of ledger entries attributed to
its identifier. Contracts that share the same source in no way may influence
@@ -85,10 +86,10 @@ other hashes in the protocol. It contains the `Hash` of the `networkID` to
ensure that every network has unique set of contract identifiers, along with a
`ContractIDPreimage`, which is a union that supports both
`CONTRACT_ID_PREIMAGE_FROM_ADDRESS` and `CONTRACT_ID_PREIMAGE_FROM_ASSET`.
+
- `CONTRACT_ID_PREIMAGE_FROM_ADDRESS`: built from an an `SCAddress` and the
user-specified `uint256` salt.
-- `CONTRACT_ID_PREIMAGE_FROM_ASSET`: built from a Stellar `Asset`
- structure.
+- `CONTRACT_ID_PREIMAGE_FROM_ASSET`: built from a Stellar `Asset` structure.
### Uploading Wasm sources using `InvokeHostFunctionOp`
@@ -97,17 +98,15 @@ contract via `InvokeHostFunctionOp`(defined in [CAP-0046-04](./cap-0046-04.md))
with `HOST_FUNCTION_TYPE_UPLOAD_CONTRACT_WASM` host function type in
`hostFunction`.
-This function accepts `opaque wasm<>` that contains the Wasm
-contract code.
+This function accepts `opaque wasm<>` that contains the Wasm contract code.
Uploaded contracts are stored in `ContractCodeEntry` ledger entries. These
-entries are keyed by the hash of the Wasm used to upload
-them.
+entries are keyed by the hash of the Wasm used to upload them.
-The contract upload host function will compute the hash of
-the Wasm and check if such a contract code already exists. If
-the entry exists, the operation will immediately succeed. If it doesn't, the
-new `ContractCodeEntry` will be created.
+The contract upload host function will compute the hash of the Wasm and check
+if such a contract code already exists. If the entry exists, the operation will
+immediately succeed. If it doesn't, the new `ContractCodeEntry` will be
+created.
Core does not perform any validation on the uploaded contract code, besides
checking its size.
@@ -115,12 +114,12 @@ checking its size.
#### Max contract size setting
The maximum Wasm contract size will be introduced as a `ConfigSettingEntry`(see
-[CAP-0046-09](./cap-0046-09.md) for details on config entries).
+[CAP-0046-09](./cap-0046-09.md) for details on config entries).
It is set during the protocol version upgrade using a new `ConfigSettingEntry`,
with `configSettingID` == `CONFIG_SETTING_CONTRACT_MAX_SIZE_BYTES`, and
-`contractMaxSizeBytes` == `65536`. The valid values for
-`contractMaxSizeBytes` are [5000, UINT32_MAX] (inclusive).
+`contractMaxSizeBytes` == `65536`. The valid values for `contractMaxSizeBytes`
+are [5000, UINT32_MAX] (inclusive).
### Instantiating contracts using `InvokeHostFunctionOp`
@@ -155,14 +154,16 @@ If the identifier is new, the host will create a new Persistent
`ContractDataEntry` from [CAP-0046-05](./CAP-0046-05.md) with a
`SCV_LEDGER_KEY_CONTRACT_INSTANCE` key value. The value of the entry is
`ScContractInstance` that either refers to the Wasm code entry or to a built-in
-contract (according to the value of the `executable` field in `CreateContractArgs`).
+contract (according to the value of the `executable` field in
+`CreateContractArgs`).
### Instantiating a contract from a contract
Factory contracts are quite popular already on other networks, so this CAP adds
-functionality to support them.
+functionality to support them.
-The following host functions are provided to instantiate contracts and upload Wasm:
+The following host functions are provided to instantiate contracts and upload
+Wasm:
```rust
// Uploads the Wasm. Returns the SHA-256 hash of the Wasm code.
@@ -178,8 +179,8 @@ fn create_asset_contract(serialized_asset: Bytes) -> Address
```
The contractIDs for the contracts created with `create_contract` and
-`create_asset_contract` are derived from `CONTRACT_ID_PREIMAGE_FROM_ADDRESS` and
-`CONTRACT_ID_PREIMAGE_FROM_ASSET` respectively.
+`create_asset_contract` are derived from `CONTRACT_ID_PREIMAGE_FROM_ADDRESS`
+and `CONTRACT_ID_PREIMAGE_FROM_ASSET` respectively.
Similar to how contract creation through `HOST_FUNCTION_TYPE_CREATE_CONTRACT`
requires authorization mentioned
@@ -200,45 +201,58 @@ fn update_current_contract_wasm(wasm_hash: Bytes)
```
### Invoking a contract using `InvokeHostFunctionOp`
+
Contracts can be invoked via `InvokeHostFunctionOp` with
`HOST_FUNCTION_TYPE_INVOKE_CONTRACT` host function type.
-The function accepts `InvokeContractArgs` struct that consists of the `contractAddress`, `functionName` and the `args` array.
+The function accepts `InvokeContractArgs` struct that consists of the
+`contractAddress`, `functionName` and the `args` array.
-The `auth` vector must be properly filled with the required credentials and the correct invocation details. Refer to [Soroban Authorization Framework](./cap-0046-11.md) for details.
+The `auth` vector must be properly filled with the required credentials and the
+correct invocation details. Refer to
+[Soroban Authorization Framework](./cap-0046-11.md) for details.
-If the invocation is successful, the return value `ScVal` along with `ContractEvent`s will be
-used to construct a `InvokeHostFunctionSuccessPreImage`, from which the hash is computed and
-returned in the `InvokeHostFunctionResult`.
+If the invocation is successful, the return value `ScVal` along with
+`ContractEvent`s will be used to construct a
+`InvokeHostFunctionSuccessPreImage`, from which the hash is computed and
+returned in the `InvokeHostFunctionResult`.
-If the invocation fails, the `InvokeHostFunctionResult` will contain the proper error code
-indicating the failure reason.
+If the invocation fails, the `InvokeHostFunctionResult` will contain the proper
+error code indicating the failure reason.
-The return value as well as all events (`ContractEvent`s and `DiagnosticEvent`s) will be included in the `TransactionMeta`, see [cap-0046-08](./cap-0046-08.md#specification) for details.
+The return value as well as all events (`ContractEvent`s and
+`DiagnosticEvent`s) will be included in the `TransactionMeta`, see
+[cap-0046-08](./cap-0046-08.md#specification) for details.
### Invoking a contract from a contract
+
[CAP-0046-03](./CAP-0046-03.md#call-host-functions-mod-d) specifies the host
functions that can be used for cross-contract invocations.
-The `auth` vector must be properly filled with the required credentials and the correct invocation details. Refer to [Soroban Authorization Framework](./cap-0046-11.md) for details.
+The `auth` vector must be properly filled with the required credentials and the
+correct invocation details. Refer to
+[Soroban Authorization Framework](./cap-0046-11.md) for details.
### Extend and restore contract
-Both the contract instance and the contract code entry are persistent ledger entries which have finite, pre-specified TTLs.
+Both the contract instance and the contract code entry are persistent ledger
+entries which have finite, pre-specified TTLs.
-We provide two additional operations `ExtendFootprintTTLOp` and `RestoreFootprintOp` for extending TTL and restoring entries.
+We provide two additional operations `ExtendFootprintTTLOp` and
+`RestoreFootprintOp` for extending TTL and restoring entries.
-See [State Archival Interface Cap](./cap-0046-12.md) for more details on archival semantics and operations for extending and restoring entries.
+See [State Archival Interface Cap](./cap-0046-12.md) for more details on
+archival semantics and operations for extending and restoring entries.
## Design Rationale
### There are no built in controls for contracts
-Controls like pausing invocation or mutability for all or a subset of a contract
-should be put into a contract itself. Leaving it to the contract writer is a
-much more general solution than baking it into the protocol. The downside is
-this is more error prone and will take more space since the same logic will be
-implemented multiple times.
+Controls like pausing invocation or mutability for all or a subset of a
+contract should be put into a contract itself. Leaving it to the contract
+writer is a much more general solution than baking it into the protocol. The
+downside is this is more error prone and will take more space since the same
+logic will be implemented multiple times.
### `ContractDataEntry` has no owner associated with it
@@ -249,10 +263,10 @@ the contract creator chooses.
### `ContractCodeEntry` has no owner associated with it
-Contract source code entries with the Wasm code don't have any ownership. Anyone
-can upload contract sources to the ledger and then anyone can use them. This
-encourages sharing the contract code and allows contracts that use it to be
-sure that their implementation can't unexpectedly change.
+Contract source code entries with the Wasm code don't have any ownership.
+Anyone can upload contract sources to the ledger and then anyone can use them.
+This encourages sharing the contract code and allows contracts that use it to
+be sure that their implementation can't unexpectedly change.
### Contracts cannot be deleted, and can only be updated through the update_current_contract_wasm host function
@@ -275,11 +289,11 @@ on the demand and network load requirements.
### ContractIDs are deterministic
-Pulling contractIDs from `LedgerHeader.idPool` would be easier but it would make
-parallelizing contract creation more difficult in the future. It's also more
-difficult to determine what the contractID will be since the id pool would be
-used by offers and other contracts. This CAP uses a `Hash` instead as the
-contractID.
+Pulling contractIDs from `LedgerHeader.idPool` would be easier but it would
+make parallelizing contract creation more difficult in the future. It's also
+more difficult to determine what the contractID will be since the id pool would
+be used by offers and other contracts. This CAP uses a `Hash` instead as the
+contractID.
With this CAP we provide several ways of building the contractID preimages that
can be reproduced off-chain and then used to address the contracts that may or
diff --git a/core/cap-0046-03.md b/core/cap-0046-03.md
index 0eaef016f..9fc864bc5 100644
--- a/core/cap-0046-03.md
+++ b/core/cap-0046-03.md
@@ -14,40 +14,71 @@ Protocol version: 20
```
## Simple Summary
-This CAP proposes a set of host functions — interface between the host environment running on the Stellar Core and the WebAssembly-based (Wasm) virtual machine running smart contracts.
+
+This CAP proposes a set of host functions — interface between the host
+environment running on the Stellar Core and the WebAssembly-based (Wasm)
+virtual machine running smart contracts.
## Motivation amd Goals Alignment
See the Soroban overview CAP.
## Abstract
-This CAP specifies the signatures of host functions that serve as the host-VM interface, divided into logical modules. The selection criteria of the host functions and the framework of resource accounting are detailed in the Design Rationale.
+
+This CAP specifies the signatures of host functions that serve as the host-VM
+interface, divided into logical modules. The selection criteria of the host
+functions and the framework of resource accounting are detailed in the Design
+Rationale.
## Specification
-The entire suite of host functions are broken down into logical modules, each evolving around a specific area of functionality (e.g. map, vector, integer).
-The host functions, which define the interface between the host environment and the virtual machine (VM), are specified in [WebAssembly text format](https://developer.mozilla.org/en-US/docs/WebAssembly/Understanding_the_text_format) to preserve generality, since implementation of the host functions are supposed to be language agnostic.
+The entire suite of host functions are broken down into logical modules, each
+evolving around a specific area of functionality (e.g. map, vector, integer).
-There are a few properties and conventions that apply generally to all host functions, they are outlined below to avoid repeating on every function.
+The host functions, which define the interface between the host environment and
+the virtual machine (VM), are specified in
+[WebAssembly text format](https://developer.mozilla.org/en-US/docs/WebAssembly/Understanding_the_text_format)
+to preserve generality, since implementation of the host functions are supposed
+to be language agnostic.
+
+There are a few properties and conventions that apply generally to all host
+functions, they are outlined below to avoid repeating on every function.
#### Error and trap
-Execution of the host function should never cause an exception in the host environment. If the execution fails for any reason, the host will emit a trap to the VM to stop the execution.
-There can be an variety of reasons causing a host function execution to fail, see [error handing](#error-handling).
-In general error propagation is not specified as part of the host interface specification. The only exception is the `try_call` function (inside module `d`) function, which may the error code on failure if the error is recoverable.
+Execution of the host function should never cause an exception in the host
+environment. If the execution fails for any reason, the host will emit a trap
+to the VM to stop the execution. There can be an variety of reasons causing a
+host function execution to fail, see [error handing](#error-handling).
+
+In general error propagation is not specified as part of the host interface
+specification. The only exception is the `try_call` function (inside module
+`d`) function, which may the error code on failure if the error is recoverable.
-The error conditions on a host function should be self-explainatory and/or clearly documented.
+The error conditions on a host function should be self-explainatory and/or
+clearly documented.
#### Parameter types and nomenclature
-All parameters (input arguments and return value) are 64-bit integers, and they either represent a primitive integer value or a host value type specified in [CAP-0046-01](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md#host-value-type).
-For clarity, the input parameters are named as "name underscore type" in "snake" case. For example `v: VecObject` in Rust definition is translated to `param $v_vec_object i64`.
+All parameters (input arguments and return value) are 64-bit integers, and they
+either represent a primitive integer value or a host value type specified in
+[CAP-0046-01](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md#host-value-type).
+
+For clarity, the input parameters are named as "name underscore type" in
+"snake" case. For example `v: VecObject` in Rust definition is translated to
+`param $v_vec_object i64`.
#### Immutability
-All host functions respect the immutability constraint on the host objects (see [CAP-0046-01](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md#immutability)). Any function that mutates a host object (e.g. `vec_push`) will create a new host object and return its handle.
-With that, we now present the host functions.
+All host functions respect the immutability constraint on the host objects (see
+[CAP-0046-01](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md#immutability)).
+Any function that mutates a host object (e.g. `vec_push`) will create a new
+host object and return its handle.
+
+With that, we now present the host functions.
+
### "Context" host functions (mod `x`)
+
```
;; Emit a diagnostic event containing a message and sequence of `Val`s.
(func $log_from_linear_memory (param $msg_pos_u32_val i64) (param $msg_len_u32_val i64) (param $vals_pos_u32_val i64) (param $vals_len_u32_val i64) (result i64))
@@ -81,6 +112,7 @@ With that, we now present the host functions.
```
### "Integer" host functions (mod `i`)
+
```
;; Convert a `u64` to an object containing a `u64`.
(func $obj_from_u64 (param $v u64) (result i64))
@@ -217,6 +249,7 @@ With that, we now present the host functions.
```
### "Map" host functions (mod `m`)
+
```
;; Create an empty new map.
(func $map_new (result i64))
@@ -256,6 +289,7 @@ With that, we now present the host functions.
```
### "Vec" host functions (mod `v`)
+
```
;; Creates an empty new vector.
(func $vec_new () (result i64))
@@ -317,6 +351,7 @@ With that, we now present the host functions.
```
### "Ledger" host functions (mod `l`)
+
```
(func $put_contract_data (param $k_val i64) (param $v_val i64) (param $t_storage_type i64) (result i64))
@@ -356,6 +391,7 @@ With that, we now present the host functions.
```
### "Call" host functions (mod `d`)
+
```
;; Calls a function in another contract with arguments contained in vector `args`. If the call is successful, returns the result of the called function. Traps otherwise.
(func $call (param $contract_address_object i64) (param $func_symbol i64) (param $args_vec_object i64) (result i64))
@@ -366,6 +402,7 @@ With that, we now present the host functions.
```
### "Buf" host functions (mod `b`)
+
```
;; Serializes an (SC)Val into XDR opaque `Bytes` object.
(func $serialize_to_bytes (param $v_val i64) (result i64))
@@ -441,6 +478,7 @@ With that, we now present the host functions.
```
### "Crypto" host functions (mod `c`)
+
```
;;
(func $compute_hash_sha256 (param $x_bytes_object i64) (result i64))
@@ -456,6 +494,7 @@ With that, we now present the host functions.
```
### "Address" host functions (mod `a`)
+
```
;; Checks if the address has authorized the invocation of the current contract function with the provided arguments. Traps if the invocation hasn't been authorized.
(func $require_auth_for_args (param $address_address_object i64) (param $args_vec_object i64) (result i64))
@@ -474,12 +513,14 @@ With that, we now present the host functions.
```
### "Test" host functions (mod `t`)
+
```
;; A dummy function taking 0 arguments and performs no-op. This function is for test purpose only, for measuring the roundtrip cost of invoking a host function, i.e. host->Vm->host.
(func $dummy0 (result i64))
```
### "prng" host functions (mod `p`)
+
```
;; Reseed the frame-local PRNG with a given BytesObject, which should be 32 bytes long.
(func $prng_reseed (param $seed_bytes_object i64) (result i64))
@@ -495,39 +536,100 @@ With that, we now present the host functions.
```
### XDR changes
-See [CAP-0046-01](./cap-0046-01.md#xdr-changes) for detail definition of all the host object types and semantics of their operations.
+
+See [CAP-0046-01](./cap-0046-01.md#xdr-changes) for detail definition of all
+the host object types and semantics of their operations.
## Design Rationale
-The Wasm smart-contract system for the Stellar network is divided into the host context and the guest context, and the host functions define the interface between the host environment (running the host context) and the VM (running the guest code) via which guest code can interact with the compute resources and host objects. For the full definitions of the host and guest context, host environment, virtual machine, please refer to the “Components” section in [CAP-0046-01](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md#components).
-The guest-host split allows common smart contract operations and computational heavy-lifting to be off-loaded to the host side. This reduces guest code size and results in a variety of benefits outlined in CAP-0046. However on the flip side, this potentially increases the attack surface and maintenance burden for the host function developers. Therefore it is vital to be judicious on host function selection, and it needs to be based on a clear set of criteria. The criteria we choose for host functions in this phase of the project are:
-- **Relevance**: the functions need to be relevant to a wide spectrum of smart contract applications. In particular, we would like to prioritize the expensive primitives that are common among smart contract operations.
-- **Determinism**: produces designed outcome deterministically across all relavent platforms.
-- **Efficiency**: must run within a reasonably limited resource restriction. Any smart contracts that run out of its resource limits will result in a trap.
-- **Maintainability**: must be reasonably straightforward to implement and easy to maintain. Maintainability requirement also extends to the third-party library we choose for the implementation of a particular host function.
+
+The Wasm smart-contract system for the Stellar network is divided into the host
+context and the guest context, and the host functions define the interface
+between the host environment (running the host context) and the VM (running the
+guest code) via which guest code can interact with the compute resources and
+host objects. For the full definitions of the host and guest context, host
+environment, virtual machine, please refer to the “Components” section in
+[CAP-0046-01](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md#components).
+The guest-host split allows common smart contract operations and computational
+heavy-lifting to be off-loaded to the host side. This reduces guest code size
+and results in a variety of benefits outlined in CAP-0046. However on the flip
+side, this potentially increases the attack surface and maintenance burden for
+the host function developers. Therefore it is vital to be judicious on host
+function selection, and it needs to be based on a clear set of criteria. The
+criteria we choose for host functions in this phase of the project are:
+
+- **Relevance**: the functions need to be relevant to a wide spectrum of smart
+ contract applications. In particular, we would like to prioritize the
+ expensive primitives that are common among smart contract operations.
+- **Determinism**: produces designed outcome deterministically across all
+ relavent platforms.
+- **Efficiency**: must run within a reasonably limited resource restriction.
+ Any smart contracts that run out of its resource limits will result in a
+ trap.
+- **Maintainability**: must be reasonably straightforward to implement and easy
+ to maintain. Maintainability requirement also extends to the third-party
+ library we choose for the implementation of a particular host function.
### Additional functions/host objects may be included
-The list of host functions proposed is an initial set based on the criteria above. It is not meant to be an exhaustive list. The list of host functions will be an evolving set determined based on the requirement and needs of the stellar ecosystem.
+
+The list of host functions proposed is an initial set based on the criteria
+above. It is not meant to be an exhaustive list. The list of host functions
+will be an evolving set determined based on the requirement and needs of the
+stellar ecosystem.
### Resource metering
-All the host functions are subject to resource metering specified in [cap-0046-01](./cap-0046-01.md). Cpu and memory consumptions are tracked by metering during the host function execution, and exceeding the resource limit will result in an `SCEC_EXCEEDED_LIMIT` error.
+
+All the host functions are subject to resource metering specified in
+[cap-0046-01](./cap-0046-01.md). Cpu and memory consumptions are tracked by
+metering during the host function execution, and exceeding the resource limit
+will result in an `SCEC_EXCEEDED_LIMIT` error.
### Error handling
-All host functions (with the exception of `try_call`) are infallible. An error generated during the host function execution will immediately result in a trap to the guest VM. The alternative approach of making all host functions fallible, i.e. including a success-or-failure signal in the returned 64-bit value. There are a few reasons favoring the infallible interface:
-1. Minimizes the amount of redundant error-handling guest code even on the non-error path (which is most of the time), thus reducing code size and resource cost.
-2. Trapping by default ensures errors are not hidden or forgotten, therefore makes for a safer design.
-3. Including the success-or-failure signal in the return value requires additional implementation complexity on the host, which is paid for by every contract on every host function call.
-4. No easy way to disambiguate "fail with status" vs "ok with status". See explaination [below](#try_call).
-The host function reportoire should be clear on the failure conditions, and should contain enough
-building blocks to help the guest preemptively decide if a failure condition will be triggered before making the call. For example, the `vec_get` function will trap if the index argument is greater than the length of the vector, but a contract developer can use the `vec_len` function to check if this would occur before calling `vec_get`.
+All host functions (with the exception of `try_call`) are infallible. An error
+generated during the host function execution will immediately result in a trap
+to the guest VM. The alternative approach of making all host functions
+fallible, i.e. including a success-or-failure signal in the returned 64-bit
+value. There are a few reasons favoring the infallible interface:
+
+1. Minimizes the amount of redundant error-handling guest code even on the
+ non-error path (which is most of the time), thus reducing code size and
+ resource cost.
+2. Trapping by default ensures errors are not hidden or forgotten, therefore
+ makes for a safer design.
+3. Including the success-or-failure signal in the return value requires
+ additional implementation complexity on the host, which is paid for by every
+ contract on every host function call.
+4. No easy way to disambiguate "fail with status" vs "ok with status". See
+ explaination [below](#try_call).
+
+The host function reportoire should be clear on the failure conditions, and
+should contain enough building blocks to help the guest preemptively decide if
+a failure condition will be triggered before making the call. For example, the
+`vec_get` function will trap if the index argument is greater than the length
+of the vector, but a contract developer can use the `vec_len` function to check
+if this would occur before calling `vec_get`.
#### `try_call`
-The only fallible host function is `try_call`, which will return the error code as the result (instead of trapping) on failure.
-One downside of allowing error code as return value is the ambiguity of "fail with status" and "ok with status". If a contract function returns an `ScError` as its ok return value, there is no other mechanism deciding if the error is the ok value or the failure status.
+The only fallible host function is `try_call`, which will return the error code
+as the result (instead of trapping) on failure.
+
+One downside of allowing error code as return value is the ambiguity of "fail
+with status" and "ok with status". If a contract function returns an `ScError`
+as its ok return value, there is no other mechanism deciding if the error is
+the ok value or the failure status.
## Security Concerns
-The security concerns are the same as what have been outlined in CAP-0046. By expanding the host object repertoire and introducing host functions that create and operate on host objects on-the-fly during runtime, we’ve expanded the surface where those concerns manifest. In particular, this CAP aims to address “the risk of mis-metering of guest-controlled resources and denial of service”, by detailing the exact metrics of resource accounting on both the guest and the host side.
+
+The security concerns are the same as what have been outlined in CAP-0046. By
+expanding the host object repertoire and introducing host functions that create
+and operate on host objects on-the-fly during runtime, we’ve expanded the
+surface where those concerns manifest. In particular, this CAP aims to address
+“the risk of mis-metering of guest-controlled resources and denial of service”,
+by detailing the exact metrics of resource accounting on both the guest and the
+host side.
## Implementation
-Host functions have been implemented in [rs-soroban-env](https://github.com/stellar/rs-soroban-env).
+
+Host functions have been implemented in
+[rs-soroban-env](https://github.com/stellar/rs-soroban-env).
diff --git a/core/cap-0046-05.md b/core/cap-0046-05.md
index 55763482c..49836c17e 100644
--- a/core/cap-0046-05.md
+++ b/core/cap-0046-05.md
@@ -15,117 +15,189 @@ Protocol version: 20
## Simple Summary
-This CAP defines a ledger entry types for storing data records for smart contracts, as well as host functions for interacting with it and some discussion of its interaction with contract invocation and execution.
+This CAP defines a ledger entry types for storing data records for smart
+contracts, as well as host functions for interacting with it and some
+discussion of its interaction with contract invocation and execution.
## Working Group
-This protocol change was authored by Graydon Hoare, with input from the consulted individuals mentioned at the top of this document.
+This protocol change was authored by Graydon Hoare, with input from the
+consulted individuals mentioned at the top of this document.
## Motivation
-Most nontrivial smart contracts have persistent state. Earlier smart contract CAPs left this topic undefined, this CAP attempts to fill in the gap.
+Most nontrivial smart contracts have persistent state. Earlier smart contract
+CAPs left this topic undefined, this CAP attempts to fill in the gap.
### Goals Alignment
-Same goals alignment as CAP-46. This CAP is essentially "a continuation of the work initiated in CAP-46".
+Same goals alignment as CAP-46. This CAP is essentially "a continuation of the
+work initiated in CAP-46".
## Requirements
- - Smart contracts must be able to store state in the ledger that persists between transactions.
- - Contracts should be given as much flexibility as possible in how they organize their data.
- - As much as possible, multiple contracts should be able to execute in parallel.
- - Parallel execution must maintain a strong consistency model: strict serializability.
- - The performance impact of user-initiated IO should be strictly limited, as IO can be very costly.
- - The granularity of IO should balance the desirability of amortizing fixed per-IO costs with the undesirability of IO on redundant data.
- - The ledger space consumed by contract data should be attenuated when possible, especially transient and dormant data.
-
-Additionally, several considerations that applied to the data model of CAP-0046-01 apply here, especially around interoperability and simplicity:
-
- - At least some data should be readable passively without running contract code.
- - Data should be at least somewhat robust to version changes in the contract code accessing it.
+- Smart contracts must be able to store state in the ledger that persists
+ between transactions.
+- Contracts should be given as much flexibility as possible in how they
+ organize their data.
+- As much as possible, multiple contracts should be able to execute in
+ parallel.
+- Parallel execution must maintain a strong consistency model: strict
+ serializability.
+- The performance impact of user-initiated IO should be strictly limited, as IO
+ can be very costly.
+- The granularity of IO should balance the desirability of amortizing fixed
+ per-IO costs with the undesirability of IO on redundant data.
+- The ledger space consumed by contract data should be attenuated when
+ possible, especially transient and dormant data.
+
+Additionally, several considerations that applied to the data model of
+CAP-0046-01 apply here, especially around interoperability and simplicity:
+
+- At least some data should be readable passively without running contract
+ code.
+- Data should be at least somewhat robust to version changes in the contract
+ code accessing it.
## Abstract
-A new ledger entry type is added that stores key-value pairs, where both key and val are of type `SCVal` (defined in [CAP-0046-01](./cap-0046-01.md)).
+A new ledger entry type is added that stores key-value pairs, where both key
+and val are of type `SCVal` (defined in [CAP-0046-01](./cap-0046-01.md)).
-An additional small key-value map called **instance storage** is also available inside each contract instance ledger entry (defined in [CAP-0046-02](./cap-0046-02.md)).
+An additional small key-value map called **instance storage** is also available
+inside each contract instance ledger entry (defined in
+[CAP-0046-02](./cap-0046-02.md)).
-New host functions are added to query and modify these key-value pairs from within a smart contract.
+New host functions are added to query and modify these key-value pairs from
+within a smart contract.
## Specification
-Readers should be familiar with the content of CAP-0046-01 and CAP-0046-02 at least, this CAP uses their definitions.
+Readers should be familiar with the content of CAP-0046-01 and CAP-0046-02 at
+least, this CAP uses their definitions.
### Ledger entry and key
-This CAP adds an entry type code `LedgerEntryType.CONTRACT_DATA`, an entry struct `ContractDataEntry`, and a variant of the `LedgerEntry` and `LedgerKey` unions to store the ledger entry and its key material, respectively, under the `CONTRACT_DATA` type code.
+This CAP adds an entry type code `LedgerEntryType.CONTRACT_DATA`, an entry
+struct `ContractDataEntry`, and a variant of the `LedgerEntry` and `LedgerKey`
+unions to store the ledger entry and its key material, respectively, under the
+`CONTRACT_DATA` type code.
The `LedgerKey` of a `CONTRACT_DATA` ledger entry is composed of:
- - A `ContractID` field (as defined in CAP-0046-02).
- - A `ContractDataDurability` field (as defined in CAP-0046-TBD data expiry).
- - A `key` field, an `SCVal` chosen by the contract.
-Each `CONTRACT_DATA` ledger entry has a unique `LedgerKey`, but multiple ledger entries may have the same `key` field within that `LedgerKey`, so long as the other earlier fields differ. In other words, there is a separate "key-space" for key-value mappings within each `ContractID` and `ContractDataDurability` level.
+- A `ContractID` field (as defined in CAP-0046-02).
+- A `ContractDataDurability` field (as defined in CAP-0046-TBD data expiry).
+- A `key` field, an `SCVal` chosen by the contract.
-The `LedgerKey` implied by an access to a key-value pair held in instance storage is different from that of a key-value pair in its own ledger entry. See the section on instance storage.
+Each `CONTRACT_DATA` ledger entry has a unique `LedgerKey`, but multiple ledger
+entries may have the same `key` field within that `LedgerKey`, so long as the
+other earlier fields differ. In other words, there is a separate "key-space"
+for key-value mappings within each `ContractID` and `ContractDataDurability`
+level.
+
+The `LedgerKey` implied by an access to a key-value pair held in instance
+storage is different from that of a key-value pair in its own ledger entry. See
+the section on instance storage.
### Instance storage
-This CAP adds (or rather, describes access to) an `SCMap` called `storage` stored inside each `SCContractInstance`. This **instance storage** map contains data that is closely coupled to the lifecycle and read-access pattern of the contract instance itself. See [CAP-0046-02](./cap-0046-02.md) for details on contract instances.
+This CAP adds (or rather, describes access to) an `SCMap` called `storage`
+stored inside each `SCContractInstance`. This **instance storage** map contains
+data that is closely coupled to the lifecycle and read-access pattern of the
+contract instance itself. See [CAP-0046-02](./cap-0046-02.md) for details on
+contract instances.
-A contract accesses instance storage by `key`, but this key is _not_ used to form a `LedgerKey` to access a ledger entry in the storage system. Rather, the entire `SCContractInstance` (including its entire `SCMap storage`) is accessed as a single `CONTRACT_DATA` ledger entry with its `LedgerKey` keyed by a special `SCVal` reserved just for this purpose: `SCV_LEDGER_KEY_CONTRACT_INSTANCE`.
+A contract accesses instance storage by `key`, but this key is _not_ used to
+form a `LedgerKey` to access a ledger entry in the storage system. Rather, the
+entire `SCContractInstance` (including its entire `SCMap storage`) is accessed
+as a single `CONTRACT_DATA` ledger entry with its `LedgerKey` keyed by a
+special `SCVal` reserved just for this purpose:
+`SCV_LEDGER_KEY_CONTRACT_INSTANCE`.
-In other words, all keys and values in instance storage are loaded or saved together, and the ledger entry storing them is just the instance entry itself.
+In other words, all keys and values in instance storage are loaded or saved
+together, and the ledger entry storing them is just the instance entry itself.
### Host functions
-Host functions are provided to get, put, delete, and check for the existence of a key-value pair.
+Host functions are provided to get, put, delete, and check for the existence of
+a key-value pair.
-The same host functions are used to access key-value pairs in `CONTRACT_DATA` ledger entries _or_ instance storage.
+The same host functions are used to access key-value pairs in `CONTRACT_DATA`
+ledger entries _or_ instance storage.
When accessing `CONTRACT_DATA` ledger entries:
- - The `ContractID` in the `LedgerKey` being accessed is implicitly that of the calling contract
- - The host function can _only_ access ledger entries with that `ContractID`
+
+- The `ContractID` in the `LedgerKey` being accessed is implicitly that of the
+ calling contract
+- The host function can _only_ access ledger entries with that `ContractID`
When accessing instance storage:
- - The instance storage being accessed is implicitly that of the calling contract
- - The host function can _only_ access that instance storage
+
+- The instance storage being accessed is implicitly that of the calling
+ contract
+- The host function can _only_ access that instance storage
Host functions for key-value access are also passed a **storage type**.
### StorageType
-The **storage type** provided to a given key-value access host function is, at the host interface level, a plain `u64` that encodes the Rust enum `StorageType`.
+The **storage type** provided to a given key-value access host function is, at
+the host interface level, a plain `u64` that encodes the Rust enum
+`StorageType`.
This type has 3 cases:
- - `StorageType::Temporary = 0` which directs the host function to access a `ContractData` ledger entry with `ContractDataDurability::TEMPORARY`.
- - `StorageType::Persistent = 1` which directs the host function to access a `ContractData` ledger entry with `ContractDataDurability::PERSISTENT`.
- - `StorageType::Instance = 2` which directs the host function to access contract-instance storage.
+
+- `StorageType::Temporary = 0` which directs the host function to access a
+ `ContractData` ledger entry with `ContractDataDurability::TEMPORARY`.
+- `StorageType::Persistent = 1` which directs the host function to access a
+ `ContractData` ledger entry with `ContractDataDurability::PERSISTENT`.
+- `StorageType::Instance = 2` which directs the host function to access
+ contract-instance storage.
The storage type specifies two separate dimensions of storage:
- - Whether to access a key-value mapping held in instance storage or a separate per-key ledger entry
- - The durability of the key-value mapping to access.
-These two separate dimensions are selected with a single value because instance storage is always implicitly `PERSISTENT`, as it is stored in contract instances which are `PERSISTENT`.
+- Whether to access a key-value mapping held in instance storage or a separate
+ per-key ledger entry
+- The durability of the key-value mapping to access.
+
+These two separate dimensions are selected with a single value because instance
+storage is always implicitly `PERSISTENT`, as it is stored in contract
+instances which are `PERSISTENT`.
### Restrictions
#### Point access only
-Contract data IO is restricted to so-called "point access" to specific keys. In particular there is no support for "range queries", upper or lower bounds, or any sort of iteration over the keyspace.
+
+Contract data IO is restricted to so-called "point access" to specific keys. In
+particular there is no support for "range queries", upper or lower bounds, or
+any sort of iteration over the keyspace.
#### Static footprints
-To facilitate parallel execution, contract data IO is also restricted to operate on keys that are declared in the so-called _footprint_ of each transaction. The footprint is a set of `LedgerKey`s each of which is marked as either read-only or read-write. The footprint _permits_ any read of a key within it, or a write of any key within it that is marked as read-write. All other reads and writes are not permitted.
-Note that access to _instance storage_ is described, at the level of `LedgerKey`s and therefore footprints, as an access to the `key` reserved for identifying instances: `SCV_LEDGER_KEY_CONTRACT_INSTANCE`.
+To facilitate parallel execution, contract data IO is also restricted to
+operate on keys that are declared in the so-called _footprint_ of each
+transaction. The footprint is a set of `LedgerKey`s each of which is marked as
+either read-only or read-write. The footprint _permits_ any read of a key
+within it, or a write of any key within it that is marked as read-write. All
+other reads and writes are not permitted.
-Any call to a host function to interact with a `LedgerKey` that is not permitted by the footprint will generally trap. For instance storage, trapping such an invalid access may be deferred until contract exit, when any modified instance storage map is implicitly written back to the ledger.
+Note that access to _instance storage_ is described, at the level of
+`LedgerKey`s and therefore footprints, as an access to the `key` reserved for
+identifying instances: `SCV_LEDGER_KEY_CONTRACT_INSTANCE`.
-The footprint of a transaction is static for the duration of the transaction: it is established before transaction execution begins and does not change during execution.
+Any call to a host function to interact with a `LedgerKey` that is not
+permitted by the footprint will generally trap. For instance storage, trapping
+such an invalid access may be deferred until contract exit, when any modified
+instance storage map is implicitly written back to the ledger.
+
+The footprint of a transaction is static for the duration of the transaction:
+it is established before transaction execution begins and does not change
+during execution.
### XDR changes
-See the XDR diffs in the Soroban overview CAP, specifically those covering
-new `CONTRACT_DATA` ledger entries.
+See the XDR diffs in the Soroban overview CAP, specifically those covering new
+`CONTRACT_DATA` ledger entries.
### Host function additions
@@ -158,56 +230,123 @@ new `CONTRACT_DATA` ledger entries.
### Semantics
-The semantics of each host function is described in the associated comments above.
+The semantics of each host function is described in the associated comments
+above.
+
+These semantics should be considered in the light of the strict serializability
+requirement and the understanding that all IO occurs within a transaction. In
+particular:
-These semantics should be considered in the light of the strict serializability requirement and the understanding that all IO occurs within a transaction. In particular:
- - Each write is visible immediately within the issuing transaction, but not to any other transaction, until the writing transaction commits
- - All reads and writes are observable to transactions as they would be if the transactions executed sequentially in transaction-set application order
+- Each write is visible immediately within the issuing transaction, but not to
+ any other transaction, until the writing transaction commits
+- All reads and writes are observable to transactions as they would be if the
+ transactions executed sequentially in transaction-set application order
-The durability of each write is controlled by the archival strategy described in CAP-0046-12 state archival.
+The durability of each write is controlled by the archival strategy described
+in CAP-0046-12 state archival.
## Design Rationale
### Granularity
-Granularity of data elements is a key consideration in storage. Too large and IO is wasted loading and storing redundant data; too small and the fixed space and time overheads associated with storing each data element overwhelm the system. Moreover when parallel execution is included in consideration, the storage granularity becomes the unit of contention, with two contracts constrained to execute serially (or with some mechanism to enforce serializable consistency) when they share access to a single data element and at least one of them performs a write.
-Keying contract data by arbitrary `SCVal` values allows users to choose the granularity of data entering and leaving IO functions: fine-grained data may be stored under very large and specific keys, or coarser-grained data may be stored under smaller prefixes or "group" keys, with inner data structures such as vectors or maps combining together groups of data values. This is an intentional decision to allow contract authors to experiment and find the right balance, rather than deciding a priori on a granularity.
+Granularity of data elements is a key consideration in storage. Too large and
+IO is wasted loading and storing redundant data; too small and the fixed space
+and time overheads associated with storing each data element overwhelm the
+system. Moreover when parallel execution is included in consideration, the
+storage granularity becomes the unit of contention, with two contracts
+constrained to execute serially (or with some mechanism to enforce serializable
+consistency) when they share access to a single data element and at least one
+of them performs a write.
+
+Keying contract data by arbitrary `SCVal` values allows users to choose the
+granularity of data entering and leaving IO functions: fine-grained data may be
+stored under very large and specific keys, or coarser-grained data may be
+stored under smaller prefixes or "group" keys, with inner data structures such
+as vectors or maps combining together groups of data values. This is an
+intentional decision to allow contract authors to experiment and find the right
+balance, rather than deciding a priori on a granularity.
### Instance storage
-Some data (such as authentication and configuration data) is both small and has a strong implicit connection to a contract instance: it should have the same lifecycle as the instance, and will commonly be accessed (read-only) on every call to the contract. For this sort of data, storage in a separate ledger entry can introduce unwanted performance overhead and, more importantly, potential failure modes (for example when the data expires but the contract instance does not). To simplify such cases and improve performance, instance storage was added.
+Some data (such as authentication and configuration data) is both small and has
+a strong implicit connection to a contract instance: it should have the same
+lifecycle as the instance, and will commonly be accessed (read-only) on every
+call to the contract. For this sort of data, storage in a separate ledger entry
+can introduce unwanted performance overhead and, more importantly, potential
+failure modes (for example when the data expires but the contract instance does
+not). To simplify such cases and improve performance, instance storage was
+added.
-Note that instance storage should _not_ be used for unbounded key-value data or for data that will be frequently written during execution, as this will introduce artificial contention on the instance storage and defeat concurrent execution.
+Note that instance storage should _not_ be used for unbounded key-value data or
+for data that will be frequently written during execution, as this will
+introduce artificial contention on the instance storage and defeat concurrent
+execution.
### Static footprint
-The requirement that each transaction have a static footprint serves both to limit arbitrary user-initiated IO mid-transaction (i.e. to enable efficient bulk IO only at transaction endpoints) as well as to enable static scheduling of parallel execution.
-
-This limits transactions to those which _have_ static footprints, which at first glance may seem overly restrictive. To make it work in practice, contracts with dynamic footprints need to be run twice, once "offline" (or out of the main stellar-core processing phase, for example on a horizon server with a recent ledger snapshot) and then once again online, as part of normal stellar-core processing.
-
-The first run is executed in a special trial-run or "recording" mode that permits any reads or writes and just observes and records the footprint; but it also does not actually effect any changes to the ledger, running against a (possibly stale) read-only snapshot and discarding all writes at the end of execution. The recorded footprint is then used as the static footprint for the second run, when the transaction is submitted for real execution against the real ledger, in stellar-core. The second execution thereby validates and enforces the footprint, assuming nothing has changed between recording and enforcing. If the true footprint _has_ changed between recording and enforcing, the transaction fails the second run and the user must retry the cycle.
-This technique is taken from the ["deterministic database"](http://cs.yale.edu/homes/thomson/publications/calvin-sigmod12.pdf) and ["conflict-free concurrency control"](https://arxiv.org/abs/1810.01997) literature, where footprints are sometimes also called "read-write sets" and footprint recording is sometimes also called "reconnaissance queries".
+The requirement that each transaction have a static footprint serves both to
+limit arbitrary user-initiated IO mid-transaction (i.e. to enable efficient
+bulk IO only at transaction endpoints) as well as to enable static scheduling
+of parallel execution.
+
+This limits transactions to those which _have_ static footprints, which at
+first glance may seem overly restrictive. To make it work in practice,
+contracts with dynamic footprints need to be run twice, once "offline" (or out
+of the main stellar-core processing phase, for example on a horizon server with
+a recent ledger snapshot) and then once again online, as part of normal
+stellar-core processing.
+
+The first run is executed in a special trial-run or "recording" mode that
+permits any reads or writes and just observes and records the footprint; but it
+also does not actually effect any changes to the ledger, running against a
+(possibly stale) read-only snapshot and discarding all writes at the end of
+execution. The recorded footprint is then used as the static footprint for the
+second run, when the transaction is submitted for real execution against the
+real ledger, in stellar-core. The second execution thereby validates and
+enforces the footprint, assuming nothing has changed between recording and
+enforcing. If the true footprint _has_ changed between recording and enforcing,
+the transaction fails the second run and the user must retry the cycle.
+
+This technique is taken from the
+["deterministic database"](http://cs.yale.edu/homes/thomson/publications/calvin-sigmod12.pdf)
+and ["conflict-free concurrency control"](https://arxiv.org/abs/1810.01997)
+literature, where footprints are sometimes also called "read-write sets" and
+footprint recording is sometimes also called "reconnaissance queries".
## Protocol Upgrade Transition
### Backwards Incompatibilities
There is no backwards incompatibility consideration in this CAP.
+
### Resource Utilization
-By restricting IO to pre-declared static footprints, IO costs are fairly limited. The transaction execution lifecycle will perform bulk IO of all ledger entries in the footprint at the beginning of transaction execution, and write back those modified entries only at the end of execution. Calibrating the costs of such IO and reflecting it in fees charged for use remains an open problem to address. This CAP expects to build on the cost model that CAP-46 and CAP-51 will eventually provide.
+By restricting IO to pre-declared static footprints, IO costs are fairly
+limited. The transaction execution lifecycle will perform bulk IO of all ledger
+entries in the footprint at the beginning of transaction execution, and write
+back those modified entries only at the end of execution. Calibrating the costs
+of such IO and reflecting it in fees charged for use remains an open problem to
+address. This CAP expects to build on the cost model that CAP-46 and CAP-51
+will eventually provide.
## Security Concerns
-The main security risk is unauthorized data-writing, as all data on the blockchain is publicly readable in any case.
-The authorization model for writes is narrow and easy to understand: contracts are restricted to only being able to write to data with their contract ID. Further authorization checks are delegated to contracts themselves to manage.
+The main security risk is unauthorized data-writing, as all data on the
+blockchain is publicly readable in any case.
+
+The authorization model for writes is narrow and easy to understand: contracts
+are restricted to only being able to write to data with their contract ID.
+Further authorization checks are delegated to contracts themselves to manage.
## Test Cases
+
TBD.
## Implementation
There is are two work-in-progress branches associated with this CAP:
- - [stellar-core PR 3439](https://github.com/stellar/stellar-core/pull/3439) including XDR and C++ changes to stellar-core
- - [stellar-contract-env PR 83](https://github.com/stellar/rs-stellar-contract-env/pull/83) including Rust changes to the contract host crate
+- [stellar-core PR 3439](https://github.com/stellar/stellar-core/pull/3439)
+ including XDR and C++ changes to stellar-core
+- [stellar-contract-env PR 83](https://github.com/stellar/rs-stellar-contract-env/pull/83)
+ including Rust changes to the contract host crate
diff --git a/core/cap-0046-06.md b/core/cap-0046-06.md
index 36596b7b9..676f3b7ac 100644
--- a/core/cap-0046-06.md
+++ b/core/cap-0046-06.md
@@ -24,8 +24,8 @@ blockchain ecosystems have very little innovation in the space of fungible
assets, with developers often relying on open source implementations such as
OpenZeppelin.
-Rather than rely on an open source implementation, developers should have access
-to a native contract which fulfils typical needs. This does not prevent
+Rather than rely on an open source implementation, developers should have
+access to a native contract which fulfils typical needs. This does not prevent
developers from implementing their own fungible asset if the contract does not
meet their needs. But the efficiency gained from a native implementation should
reduce fees sufficiently to encourage most developers to choose the native
@@ -59,8 +59,8 @@ The interface tries to follow an ERC-20 model.
### XDR Changes
-See the XDR diffs in the Soroban overview CAP, specifically those covering
-new envelope types.
+See the XDR diffs in the Soroban overview CAP, specifically those covering new
+envelope types.
### Semantics: Data Format
@@ -104,7 +104,6 @@ pub enum InstanceDataKey {
### Semantics: Initialization
-
```rust
/******************************************************************************\
*
@@ -345,22 +344,24 @@ fn clawback(env: Env, from: Address, amount: i128);
#### Deploying a contract that allows interacting with Stellar classic assets
-The Stellar Asset Contract can be deployed by using `InvokeHostFunctionOp`, with
-a `HostFunction` of type `HOST_FUNCTION_TYPE_CREATE_CONTRACT`. The
+The Stellar Asset Contract can be deployed by using `InvokeHostFunctionOp`,
+with a `HostFunction` of type `HOST_FUNCTION_TYPE_CREATE_CONTRACT`. The
`CreateContractArgs` under that will contain a `ContractExecutable` of type
`CONTRACT_EXECUTABLE_TOKEN`, and a `ContractIDPreimage` of type
-`CONTRACT_ID_PREIMAGE_FROM_ASSET` which contains the `Asset` being
-deployed.
+`CONTRACT_ID_PREIMAGE_FROM_ASSET` which contains the `Asset` being deployed.
In order to guarantee uniqueness of contracts that allow interacting with a
classic Stellar asset, the contractID for any specific Stellar asset is
deterministic because the `ContractIDPreimage` is of type
`CONTRACT_ID_PREIMAGE_FROM_ASSET` which contains only an `Asset`.
-The deployment and initilization of these contracts should be atomic, and the host
-accomplishes this by calling `init_asset` during deployment of the Stellar Asset Contract.
+The deployment and initilization of these contracts should be atomic, and the
+host accomplishes this by calling `init_asset` during deployment of the Stellar
+Asset Contract.
+
+Contracts can also deploy the Stellar Asset Contract by calling the
+`create_asset_contract` host function.
-Contracts can also deploy the Stellar Asset Contract by calling the `create_asset_contract` host function.
```rust
// Creates the instance of Stellar Asset Contract corresponding to the provided asset. `asset`
// is `stellar::Asset` XDR serialized to bytes format. Returns the address of the created contract.
@@ -379,28 +380,41 @@ fn init_asset(asset_bytes: Bytes) -> Result<(), Error>;
```
#### Balances
+
The Stellar Asset Contract handles balances for both `ScAddress::Account` and
-`ScAddress::Contract`. For the `Account` scenario, it uses the classic trustline
-for the account specified, so if the trustline doesn't exist, any function that
-uses it will error. For the `Contract` scenario, the contract will create a
-`ContractDataEntry` to hold the balance, along with an `authorized` and
-`clawback` flag to mimic classic trustlines and allow for the same issuer
-controls.
+`ScAddress::Contract`. For the `Account` scenario, it uses the classic
+trustline for the account specified, so if the trustline doesn't exist, any
+function that uses it will error. For the `Contract` scenario, the contract
+will create a `ContractDataEntry` to hold the balance, along with an
+`authorized` and `clawback` flag to mimic classic trustlines and allow for the
+same issuer controls.
#### `set_authorized`
-If the `set_authorized` function is called with `authorize == true` on an `Account`, it will set the accounts trustline's flag to `AUTHORIZED_FLAG` and unset `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` if the trustline currently does not have `AUTHORIZED_FLAG` set.
-If the `set_authorized` function is called with `authorize == false` on an `Account`, it will set the accounts trustline's flag to `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` and unset `AUTHORIZED_FLAG` if the trustline currently has `AUTHORIZED_FLAG` set. We set `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` instead of clearing the flags to avoid pulling all offers and pool shares that involve this trustline.
+If the `set_authorized` function is called with `authorize == true` on an
+`Account`, it will set the accounts trustline's flag to `AUTHORIZED_FLAG` and
+unset `AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` if the trustline currently does
+not have `AUTHORIZED_FLAG` set.
+
+If the `set_authorized` function is called with `authorize == false` on an
+`Account`, it will set the accounts trustline's flag to
+`AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` and unset `AUTHORIZED_FLAG` if the
+trustline currently has `AUTHORIZED_FLAG` set. We set
+`AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG` instead of clearing the flags to
+avoid pulling all offers and pool shares that involve this trustline.
#### Authorized flag
+
Contract balances will be authorized by default unless the issuer account has
`AUTH_REQUIRED_FLAG` set.
#### Auth Revocable flag
+
Authorization cannot be revoked with `set_authorized` if the issuer does not
have `AUTH_REVOCABLE_FLAG` set.
#### Clawback
+
Account balances can only be clawed back if the trustline has
`TRUSTLINE_CLAWBACK_ENABLED_FLAG` set. When a contract balance is created, it
will be clawback enabled if the issuer account has `AUTH_CLAWBACK_ENABLED_FLAG`
@@ -412,17 +426,17 @@ choose to disable it.
### Asset Equivalence
-From the perspective of a contract, smart assets and classic assets have exactly
-identical semantics. This makes contract design and implementation easier by
-reducing the number of edge cases to consider and tests to write.
+From the perspective of a contract, smart assets and classic assets have
+exactly identical semantics. This makes contract design and implementation
+easier by reducing the number of edge cases to consider and tests to write.
### No Pre / Post Hooks
The main goal of this proposal is to create a standard asset with predictable
-behavior. This preserves the ability to deliver certain future enhancements such
-as a dedicated fee-lane for payments. If payments have arbitrary pre / post
-hooks then any arbitrarily expensive program could get embedded into the payment
-lane, significantly reducing the feasibility of such a concept.
+behavior. This preserves the ability to deliver certain future enhancements
+such as a dedicated fee-lane for payments. If payments have arbitrary pre /
+post hooks then any arbitrarily expensive program could get embedded into the
+payment lane, significantly reducing the feasibility of such a concept.
### Admin Interface
@@ -440,59 +454,59 @@ balances.
### Deployment
Unlike earlier variants of this proposal (see CAP-0048 and CAP-0049), this
-proposal does not implicitly deploy a contract for every classic asset. However,
-anyone can deploy the contract for any asset. The main advantage to this approach
-is that we no longer need to special case the identifiers for these contract.
-We still desire that these contracts are unique, so they will be constructed
-differently even though they have the same format.
+proposal does not implicitly deploy a contract for every classic asset.
+However, anyone can deploy the contract for any asset. The main advantage to
+this approach is that we no longer need to special case the identifiers for
+these contract. We still desire that these contracts are unique, so they will
+be constructed differently even though they have the same format.
### No native contract for Soroban only assets
-We are only adding a contract to handle Classic stellar assets. For Soroban-only
-assets, you'd implement a contract that follows the same interface, but will
-need to deploy Wasm. This will allow token contracts in the ecosystem to
-materialize naturally. We still have the option to nativize popular contracts in
-the future.
+We are only adding a contract to handle Classic stellar assets. For
+Soroban-only assets, you'd implement a contract that follows the same
+interface, but will need to deploy Wasm. This will allow token contracts in the
+ecosystem to materialize naturally. We still have the option to nativize
+popular contracts in the future.
### The Token Interface accepts i128s, and Contract Balances are i128s
The options considered were u63 (contained in an ScVal), u64, u128, i128, u256,
-and an arbitrary precision big integer. Big integer is unnecessary since u256 is
-more than enough to represent any reasonable balance. This is made more obvious
-when looking at other chains and what they use (Solana uses u64, Near uses u128,
-and ERC-20 uses u256). Note that a u63 is more efficient than u64 in Soroban
-because the u63 can be contained in a 64 bit `ScVal`, while u64 will need be to
-an `ScObject`, which only exists in the host and needs to be referenced by the
-guest.
+and an arbitrary precision big integer. Big integer is unnecessary since u256
+is more than enough to represent any reasonable balance. This is made more
+obvious when looking at other chains and what they use (Solana uses u64, Near
+uses u128, and ERC-20 uses u256). Note that a u63 is more efficient than u64 in
+Soroban because the u63 can be contained in a 64 bit `ScVal`, while u64 will
+need be to an `ScObject`, which only exists in the host and needs to be
+referenced by the guest.
Now the question is what should it be instead? u63 is the most efficient with
u64 close behind, but not the most flexible if you use more than a couple
decimal places. For example, a u64 balance with 9 decimal places would have a
max value of 18,446,744,073.709551615. Stellar classic uses a signed 64 bit
integer (which would be a u63 in Soroban) with seven decimal places, leading to
-a max value of 922,337,203,685.4775807. These values might be sufficient in most
-cases, but there are some edge cases where these limits are too small. For
+a max value of 922,337,203,685.4775807. These values might be sufficient in
+most cases, but there are some edge cases where these limits are too small. For
example, the US Treasury has a cash balance of $636 billion. This barely works
in the Stellar case, but isn’t far off from reaching the max. It works in the
variable decimals case if you give up some decimals. There are other currency
and asset examples that exceed these limits as well. Instead of forcing issuers
to compromise on decimals, the best option is to use u128 balances. We chose to
use i128 instead though to allow contracts to handle negative amounts if they
-wanted to.
+wanted to.
i128 gives users more flexibilty than Stellar classic without compromising too
much in terms of performance. u256 gives us even more flexibility and easy
-compatibility with ERC-20, but i128 has a max value that should work with almost
-every use case even with 18 decimal places
+compatibility with ERC-20, but i128 has a max value that should work with
+almost every use case even with 18 decimal places
(170,141,183,460,469,231,731.687303715884105727).
### Possible Improvement: Contract Extensibility
One disadvantage of this design is the fact that token functionality must be
separated into different contracts. For example, a liquidity pool share token
-will typically have `mint` and `burn` functions which can be called by any user.
-This is not possible because there is no way to extend a contract, so the only
-functions available will be those specified above. Instead, the additional
+will typically have `mint` and `burn` functions which can be called by any
+user. This is not possible because there is no way to extend a contract, so the
+only functions available will be those specified above. Instead, the additional
functions will need to be provided in a separate contract.
## Protocol Upgrade Transition
diff --git a/core/cap-0046-07.md b/core/cap-0046-07.md
index 582e84315..d52833558 100644
--- a/core/cap-0046-07.md
+++ b/core/cap-0046-07.md
@@ -13,24 +13,34 @@ Protocol version: 20
## Simple Summary
-This CAP defines the mechanism used to determine fees when using smart contracts on the Stellar network.
+This CAP defines the mechanism used to determine fees when using smart
+contracts on the Stellar network.
## Motivation
-With the introduction of smart contracts on the network, the existing fee model of the "classic" transaction system is too simplistic: it requires careful design of the code that runs "on chain" as to ensure that all operations have a similar cost and performance profile, which is not possible with arbitrary code running in contracts.
+With the introduction of smart contracts on the network, the existing fee model
+of the "classic" transaction system is too simplistic: it requires careful
+design of the code that runs "on chain" as to ensure that all operations have a
+similar cost and performance profile, which is not possible with arbitrary code
+running in contracts.
### Goals Alignment
Goals of the updated fee model are to:
-* ensure fairness between users and use cases.
-* promote scalable patterns on the network, doing more with the same amount of overall resources.
-* ensure that the network operates in a sustainable way, network operators should be in control of their operating cost.
+
+- ensure fairness between users and use cases.
+- promote scalable patterns on the network, doing more with the same amount of
+ overall resources.
+- ensure that the network operates in a sustainable way, network operators
+ should be in control of their operating cost.
## Abstract
-This CAP proposes various network level parameters (voted on by validators), and fee structure for the different kinds of resources involved on the network.
+This CAP proposes various network level parameters (voted on by validators),
+and fee structure for the different kinds of resources involved on the network.
-The fee structure is designed to discourage "spam" traffic and overall waste of infrastructure capacity.
+The fee structure is designed to discourage "spam" traffic and overall waste of
+infrastructure capacity.
## Specification
@@ -38,7 +48,8 @@ The fee structure is designed to discourage "spam" traffic and overall waste of
See the full XDR diffs in the Soroban overview CAP.
-Fee and resource limit configuration is specified via the following network parameters (in some cases increments are used to mitigate for rounding errors):
+Fee and resource limit configuration is specified via the following network
+parameters (in some cases increments are used to mitigate for rounding errors):
```
// General “Soroban execution lane” settings
@@ -94,7 +105,7 @@ struct ConfigSettingContractLedgerCostV0
int64 bucketListTargetSizeBytes;
// Fee per 1KB write when the bucket list is empty
int64 writeFee1KBBucketListLow;
- // Fee per 1KB write when the bucket list has reached `bucketListTargetSizeBytes`
+ // Fee per 1KB write when the bucket list has reached `bucketListTargetSizeBytes`
int64 writeFee1KBBucketListHigh;
// Write fee multiplier for any additional data past the first `bucketListTargetSizeBytes`
uint32 bucketListWriteFeeGrowthFactor;
@@ -137,11 +148,11 @@ transaction:
// Resource limits for a Soroban transaction.
// The transaction will fail if it exceeds any of these limits.
struct SorobanResources
-{
+{
// The ledger footprint of the transaction.
LedgerFootprint footprint;
// The maximum number of instructions this transaction can use
- uint32 instructions;
+ uint32 instructions;
// The maximum number of bytes this transaction can read from ledger
uint32 readBytes;
@@ -163,121 +174,191 @@ struct SorobanTransactionData
#### Fee model overview
-The approach taken in this proposal is to decompose the total transaction fee into the following additive components:
-* `competitiveResourcesFee` - the fee for 'competitive' network resources (defined below) and non-refundable resources, based on the values *declared* in transaction and network-defined fee rates.
-* `refundableResourcesFee` - the maximum fee for resources that don't need to be strictly restricted per ledger and thus are charged based on the actual usage.
-* `inclusionFeeBid` - this is the "social value" part of the fee, it represents the intrinsic value that the submitter puts on that transaction.
+The approach taken in this proposal is to decompose the total transaction fee
+into the following additive components:
+
+- `competitiveResourcesFee` - the fee for 'competitive' network resources
+ (defined below) and non-refundable resources, based on the values _declared_
+ in transaction and network-defined fee rates.
+- `refundableResourcesFee` - the maximum fee for resources that don't need to
+ be strictly restricted per ledger and thus are charged based on the actual
+ usage.
+- `inclusionFeeBid` - this is the "social value" part of the fee, it represents
+ the intrinsic value that the submitter puts on that transaction.
-The 'competitive' resources are resources that have to be limited per ledger in order to ensure reasonable close time and prevent network from overloading. These resources are bounded on different dimensions, i.e. there is no single 'proxy' resource that could be used to restrict them. On a high level, these resources are:
-* instructions (virtual CPU instructions to execute)
-* ledger data access (ledger IO metrics)
-* network propagation (bandwidth usage)
+The 'competitive' resources are resources that have to be limited per ledger in
+order to ensure reasonable close time and prevent network from overloading.
+These resources are bounded on different dimensions, i.e. there is no single
+'proxy' resource that could be used to restrict them. On a high level, these
+resources are:
-Soroban transaction fee has to cover all three components, but only `inclusionFeeBid` is used for transaction prioritization.
+- instructions (virtual CPU instructions to execute)
+- ledger data access (ledger IO metrics)
+- network propagation (bandwidth usage)
+
+Soroban transaction fee has to cover all three components, but only
+`inclusionFeeBid` is used for transaction prioritization.
#### TransactionSet semantics
-All Soroban transactions must be present in phase `1` of `GeneralizedTransactionSet` (all the remaining 'classic' transactions must be in phase `0`). The Soroban phase must contain only a single `TXSET_COMP_TXS_MAYBE_DISCOUNTED_FEE` component. Refer to [`CAP-0042`](./cap-0042.md) for details on `GeneralizedTransactionSet` and phases.
+All Soroban transactions must be present in phase `1` of
+`GeneralizedTransactionSet` (all the remaining 'classic' transactions must be
+in phase `0`). The Soroban phase must contain only a single
+`TXSET_COMP_TXS_MAYBE_DISCOUNTED_FEE` component. Refer to
+[`CAP-0042`](./cap-0042.md) for details on `GeneralizedTransactionSet` and
+phases.
-While transactions bid specific `inclusionFeeBid`, the effective bid may be lowered within a transaction set component by setting `baseFee` in `txsMaybeDiscountedFee` component.
+While transactions bid specific `inclusionFeeBid`, the effective bid may be
+lowered within a transaction set component by setting `baseFee` in
+`txsMaybeDiscountedFee` component.
When set:
-* all transactions within the component must bid not less than `baseFee`, i.e. for each transaction `inclusionFeeBid >= baseFee`
-* the effective inclusion bid for transactions in that group is `baseFee`
-The total resource consumption for every one the 'competitive' resources must not exceed the ledger-wide limits. The specific limits are specified in sections below on per-resource basis.
+- all transactions within the component must bid not less than `baseFee`, i.e.
+ for each transaction `inclusionFeeBid >= baseFee`
+- the effective inclusion bid for transactions in that group is `baseFee`
+
+The total resource consumption for every one the 'competitive' resources must
+not exceed the ledger-wide limits. The specific limits are specified in
+sections below on per-resource basis.
-The usual `GeneralizedTransactionSet` validity and comparison rules also apply to Soroban corresponding to the semantics described in [CAP-0042](./cap-0042.md).
+The usual `GeneralizedTransactionSet` validity and comparison rules also apply
+to Soroban corresponding to the semantics described in
+[CAP-0042](./cap-0042.md).
#### Transaction validation
-All Soroban transactions must have `ext.sorobanData()` extension present and populated.
+All Soroban transactions must have `ext.sorobanData()` extension present and
+populated.
-`resources` contain the declared values of resources that the transaction is paying the fee for. These values have to not exceed the limits specified by the network settings.
+`resources` contain the declared values of resources that the transaction is
+paying the fee for. These values have to not exceed the limits specified by the
+network settings.
-`resourceFee` is computed based on the `resources` declared in `tx` and transaction envelope size:
+`resourceFee` is computed based on the `resources` declared in `tx` and
+transaction envelope size:
`resourceFee(tx) = Instructions_fee(resources.instructions) + LedgerDataAccess_fee(resources) + NetworkData_fee(size(txEnvelope)) + Historical_flat_fee(size(txEnvelope))`
-Note, that `Historical_flat_fee` is a 'competitive' resource, but it's constant for any transaction execution result and thus is a part of non-refundable fee (as its refund is always 0).
+Note, that `Historical_flat_fee` is a 'competitive' resource, but it's constant
+for any transaction execution result and thus is a part of non-refundable fee
+(as its refund is always 0).
-`resourceFee` corresponds to the sum of `competetiveResourcesFee` and `refundableResourcesFee` components.
+`resourceFee` corresponds to the sum of `competetiveResourcesFee` and
+`refundableResourcesFee` components.
-The rules for limits and fee computation per-resource are specified in dedicated sections below.
+The rules for limits and fee computation per-resource are specified in
+dedicated sections below.
-At validation time total transaction fee (`tx.fee`) has to cover the fee components based only on the values declared in transaction:
+At validation time total transaction fee (`tx.fee`) has to cover the fee
+components based only on the values declared in transaction:
`tx.fee = sorobanData.resourceFee + inclusionFeeBid`
-Minimum valid `inclusionFeeBid` value is 100 stroops, thus the following condition has to be true:
+Minimum valid `inclusionFeeBid` value is 100 stroops, thus the following
+condition has to be true:
`tx.fee >= sorobanData.resourceFee + 100`
-`sorobanData.resourceFee` value has to cover the 'competetive' resource fee computed based on the declared resource values specified in `sorobanData` and transaction envelope size:
+`sorobanData.resourceFee` value has to cover the 'competetive' resource fee
+computed based on the declared resource values specified in `sorobanData` and
+transaction envelope size:
`sorobanData.resourceFee >= resourceFee(tx)`
-The remaining value of `sorobanData.resourceFee - resourceFee(tx)` is considered to be a refundable part of the resource fee and has to cover the refundable resources consumed at apply time.
+The remaining value of `sorobanData.resourceFee - resourceFee(tx)` is
+considered to be a refundable part of the resource fee and has to cover the
+refundable resources consumed at apply time.
-Similarly to 'classic' transactions, source account must be able to pay for the total fee (`tx.fee`) for the transaction.
+Similarly to 'classic' transactions, source account must be able to pay for the
+total fee (`tx.fee`) for the transaction.
#### Fee computation while applying transactions
-As in classic, total fees are taken from the source account balance before applying transactions.
+As in classic, total fees are taken from the source account balance before
+applying transactions.
-Total fee charged is equal to `tx.fee` if `baseFee` is not set in the transaction set component, and `tx.fee - inclusionFeeBid + baseFee` if `baseFee` is set in the transaction set component.
+Total fee charged is equal to `tx.fee` if `baseFee` is not set in the
+transaction set component, and `tx.fee - inclusionFeeBid + baseFee` if
+`baseFee` is set in the transaction set component.
-During transaction execution the resource limits declared by transaction are enforced and exceeding any one of the limits leads to transaction failure with `_RESOURCE_LIMIT_EXCEEDED` operation error code (every Soroban operation defines a separate error for this, such as `INVOKE_HOST_FUNCTION_RESOURCE_LIMIT_EXCEEDED`).
+During transaction execution the resource limits declared by transaction are
+enforced and exceeding any one of the limits leads to transaction failure with
+`_RESOURCE_LIMIT_EXCEEDED` operation error code (every Soroban
+operation defines a separate error for this, such as
+`INVOKE_HOST_FUNCTION_RESOURCE_LIMIT_EXCEEDED`).
The per-resource failure conditions are specified in the sections below.
-At the end of the transaction execution, compute the final refundable fee for successful transaction as follows:
+At the end of the transaction execution, compute the final refundable fee for
+successful transaction as follows:
`effectiveRefundableFee = Events_fee(emittedContractEventsSizeBytes) + Rent_fee`
-where `emittedContractEventsSizeBytes` is the size of the emitted contract events and invocation return value, and `Rent_fee` is the fee for the rent bumps performed by the transaction (if any). If `effectiveRefundableFee > sorobanData.resourceFee - resourceFee(tx)` (i.e. if actual required refundable fee is greater than the `refundableResourcesFee` component defined above), the transaction fails.
+where `emittedContractEventsSizeBytes` is the size of the emitted contract
+events and invocation return value, and `Rent_fee` is the fee for the rent
+bumps performed by the transaction (if any). If
+`effectiveRefundableFee > sorobanData.resourceFee - resourceFee(tx)` (i.e. if
+actual required refundable fee is greater than the `refundableResourcesFee`
+component defined above), the transaction fails.
In case if transaction fails `effectiveRefundableFee` is set to `0`.
-After executing the transaction, the refund amount is computed as `sorobanData.resourceFee - resourceFee(tx) - effectiveRefundableFee`. Protocol refunds that amount (when non-zero) to the transaction source account. The ledger modification due to refund is reflected under `txChangesAfter` in the meta.
+After executing the transaction, the refund amount is computed as
+`sorobanData.resourceFee - resourceFee(tx) - effectiveRefundableFee`. Protocol
+refunds that amount (when non-zero) to the transaction source account. The
+ledger modification due to refund is reflected under `txChangesAfter` in the
+meta.
Note, that refund happens for the failed transactions as well.
#### Per-resource specifications
-This section describes the fee contributions, per-transaction/per-ledger maximum limits and apply-time enforcement for all the transaction resources.
+This section describes the fee contributions, per-transaction/per-ledger
+maximum limits and apply-time enforcement for all the transaction resources.
#### Instructions
Instructions bound the execution time of the transactions in the ledger.
A transaction contains:
-* maximum number of CPU instructions that transaction may use `sorobanData.resources.instructions`
+
+- maximum number of CPU instructions that transaction may use
+ `sorobanData.resources.instructions`
All the configuration values come from `ConfigSettingContractComputeV0`.
-Fee: `Instructions_fee(instructions) = round_up(instructions * feeRatePerInstructionsIncrement / 10000)`
+Fee:
+`Instructions_fee(instructions) = round_up(instructions * feeRatePerInstructionsIncrement / 10000)`
Validity constraints:
-* per transaction
- * `resources.instructions <= txMaxInstructions`.
-* ledger wide (`GeneralizedTransactionSet`)
- * sum of all `resources.instructions` <= `ledgerMaxInstructions`.
-Apply-time enforcement: instructions metered during the contract execution may not exceed `instructions` declared in the transaction. Refer to [CAP-0046-10](./cap-0046-10.md) for metering details.
+- per transaction
+ - `resources.instructions <= txMaxInstructions`.
+- ledger wide (`GeneralizedTransactionSet`)
+ - sum of all `resources.instructions` <= `ledgerMaxInstructions`.
+
+Apply-time enforcement: instructions metered during the contract execution may
+not exceed `instructions` declared in the transaction. Refer to
+[CAP-0046-10](./cap-0046-10.md) for metering details.
#### Ledger data
Ledger data resources bounds the amount and size of ledger reads and writes.
A transaction contains:
-* the read `sorobanData.resources.footprint.readOnly` and read/write `sorobanData.resources.readWrite` sets of ledger keys.
-* the maximum total amount of data that can be read from the ledger in bytes `sorobanData.resources.readBytes`
-* the maximum total amount of data that can be written to the ledger in bytes `sorobanData.resources.writeBytes`
+
+- the read `sorobanData.resources.footprint.readOnly` and read/write
+ `sorobanData.resources.readWrite` sets of ledger keys.
+- the maximum total amount of data that can be read from the ledger in bytes
+ `sorobanData.resources.readBytes`
+- the maximum total amount of data that can be written to the ledger in bytes
+ `sorobanData.resources.writeBytes`
All the configuration values come from `ConfigSettingContractLedgerCostV0`.
Fee:
+
```
LedgerDataAccess_fee(resources) =
(length(resources.footprint.readOnly)+length(resources.footprint.readWrite))*feeReadLedgerEntry + // cost of reading ledger entries
@@ -286,7 +367,10 @@ LedgerDataAccess_fee(resources) =
round_up(write_fee_per_1kb(BucketListSize)* resources.writeBytes / 1024) // cost of adding to the bucket list
```
-where `BucketListSize` is the average size of the bucket list over the moving window. Refer to the [State Archival CAP](cap-0046-12.md) for details, and `write_fee_per_1kb` is a function that determines the ledger write fee per 1024 bytes based on the bucket list size and is defined as follows:
+where `BucketListSize` is the average size of the bucket list over the moving
+window. Refer to the [State Archival CAP](cap-0046-12.md) for details, and
+`write_fee_per_1kb` is a function that determines the ledger write fee per 1024
+bytes based on the bucket list size and is defined as follows:
```
// this is the fee rate slope
@@ -309,51 +393,69 @@ write_fee_per_1kb(s) = max(MINIMUM_WRITE_FEE_PER_1KB,
```
Validity constraints:
-* per transaction
- * `length(resources.footprint.readOnly) + length(resources.footprint.readWrite) <= txMaxReadLedgerEntries`.
- * `resources.readBytes <= txMaxReadBytes`.
- * `length(resources.footprint.readWrite) <= txMaxWriteLedgerEntries`.
- * `resources.writeBytes <= txMaxWriteBytes`.
-* ledger wide (`GeneralizedTransactionSet`)
- * `sum(length(resources.footprint.readOnly) + length(resources.footprint.readWrite)) <= ledgerMaxReadLedgerEntries`.
- * `sum(length(resources.footprint.readWrite)) <= ledgerMaxWriteLedgerEntries`.
- * `sum(resources.readBytes) <= ledgerMaxReadBytes`.
- * `sum(resources.writeBytes) <= ledgerMaxWriteBytes`.
-
-Apply-time enforcement:
-
-* Before executing the transaction logic all the entries in the footprint (both read-only and read-write) are read from the ledger and the total read size is computed by adding the size of the key and size of the entry read (if any) to the total value. If total read size exceeds `resources.readBytes`, transaction fails.
-* During the host function execution any read/write of a ledger key outside of the footprint (or write of a read-only entry) leads immediately to a transaction failure.
-* After the execution the total size of the writes is computed by adding sizes of the keys and values of the non-removed entries. If the total write size exceeds `resources.writeBytes`, transaction fails. Entry deletion is 'free' and not counted towards the total write size.
+
+- per transaction
+ - `length(resources.footprint.readOnly) + length(resources.footprint.readWrite) <= txMaxReadLedgerEntries`.
+ - `resources.readBytes <= txMaxReadBytes`.
+ - `length(resources.footprint.readWrite) <= txMaxWriteLedgerEntries`.
+ - `resources.writeBytes <= txMaxWriteBytes`.
+- ledger wide (`GeneralizedTransactionSet`)
+ - `sum(length(resources.footprint.readOnly) + length(resources.footprint.readWrite)) <= ledgerMaxReadLedgerEntries`.
+ - `sum(length(resources.footprint.readWrite)) <= ledgerMaxWriteLedgerEntries`.
+ - `sum(resources.readBytes) <= ledgerMaxReadBytes`.
+ - `sum(resources.writeBytes) <= ledgerMaxWriteBytes`.
+
+Apply-time enforcement:
+
+- Before executing the transaction logic all the entries in the footprint (both
+ read-only and read-write) are read from the ledger and the total read size is
+ computed by adding the size of the key and size of the entry read (if any) to
+ the total value. If total read size exceeds `resources.readBytes`,
+ transaction fails.
+- During the host function execution any read/write of a ledger key outside of
+ the footprint (or write of a read-only entry) leads immediately to a
+ transaction failure.
+- After the execution the total size of the writes is computed by adding sizes
+ of the keys and values of the non-removed entries. If the total write size
+ exceeds `resources.writeBytes`, transaction fails. Entry deletion is 'free'
+ and not counted towards the total write size.
#### Bandwidth related
-Bandwidth utilization is bounded by the total size of the transactions flooded and included to the ledger.
+Bandwidth utilization is bounded by the total size of the transactions flooded
+and included to the ledger.
All the configuration values come from `ConfigSettingContractBandwidthV0`.
A transaction contains:
-* implicitly, its impact in terms of bandwidth utilization, the size (in bytes) of the `TransactionEnvelope`
-Fee: `NetworkData_fee(txEnvelope) = round_up(size(txEnvelope) * feeTxSize1KB / 1024)`
+- implicitly, its impact in terms of bandwidth utilization, the size (in bytes)
+ of the `TransactionEnvelope`
+
+Fee:
+`NetworkData_fee(txEnvelope) = round_up(size(txEnvelope) * feeTxSize1KB / 1024)`
Validity constraints:
-* per transaction
- * `size(txEnvelope) <= txMaxSizeBytes`
-* ledger wide
- * sum of all `size(txEnvelope)` <= `ledgerMaxTxsSizeBytes`.
+
+- per transaction
+ - `size(txEnvelope) <= txMaxSizeBytes`
+- ledger wide
+ - sum of all `size(txEnvelope)` <= `ledgerMaxTxsSizeBytes`.
Apply-time enforcement: _None_
#### Historical storage
-Historical storage is utilized for any transaction result and hence the fee has to be paid unconditionally. The fee depends on `TransactionEnvelope` size.
+Historical storage is utilized for any transaction result and hence the fee has
+to be paid unconditionally. The fee depends on `TransactionEnvelope` size.
All the configuration values come from `ConfigSettingContractHistoricalDataV0`.
-Fee: `Historical_flat_fee(txEnvelope) = round_up((size(txEnvelope)+TX_BASE_RESULT_SIZE) * feeHistorical1KB / 1024)`
+Fee:
+`Historical_flat_fee(txEnvelope) = round_up((size(txEnvelope)+TX_BASE_RESULT_SIZE) * feeHistorical1KB / 1024)`
-Where `TX_BASE_RESULT_SIZE` is a constant approximating the size in bytes of transaction results published to archives and is set to `300`.
+Where `TX_BASE_RESULT_SIZE` is a constant approximating the size in bytes of
+transaction results published to archives and is set to `300`.
Validity constraints: _None_
@@ -361,31 +463,47 @@ Apply-time enforcement: _None_
#### Contract events and return value
-Contract events are a 'side' output of the transaction that is written to metadata and not to ledger. Invocation return value has the same properties and thus is included into this as well.
+Contract events are a 'side' output of the transaction that is written to
+metadata and not to ledger. Invocation return value has the same properties and
+thus is included into this as well.
-Note, that ledger changes are also emitted in metadata for transaction, but their size is bounded by proxy with ledger access limits and we can consider write fees to also cover metadata writes as well.
+Note, that ledger changes are also emitted in metadata for transaction, but
+their size is bounded by proxy with ledger access limits and we can consider
+write fees to also cover metadata writes as well.
All the configuration values come from `ConfigSettingContractEventsV0`.
-Fee: `Events_fee(eventsBytes) = round_up(eventsBytes * feeContractEvents1KB / 1024)`
+Fee:
+`Events_fee(eventsBytes) = round_up(eventsBytes * feeContractEvents1KB / 1024)`
Validity constraints: _None_
Apply-time enforcement:
-* compute the consumed events size as the sum of events emitted during the host function invocation and its return value. If total size exceeds `ConfigSettingContractEventsV0.txMaxContractEventsSizeBytes`, the transaction fails
+
+- compute the consumed events size as the sum of events emitted during the host
+ function invocation and its return value. If total size exceeds
+ `ConfigSettingContractEventsV0.txMaxContractEventsSizeBytes`, the transaction
+ fails
#### Rent fee
-Rent fee has to be paid if operation increases the lifetime of the ledger entries and/or increases entry size.
+Rent fee has to be paid if operation increases the lifetime of the ledger
+entries and/or increases entry size.
-Rent fee is computed only at transaction application time and it depends on the state of the ledger entries before and after the transaction has been applied.
+Rent fee is computed only at transaction application time and it depends on the
+state of the ledger entries before and after the transaction has been applied.
-Fee: `Rent_fee = sum(rent_fee_per_entry_change(entry_before, entry_after)) + ttl_write_fee` for all the ledger entry changes.
+Fee:
+`Rent_fee = sum(rent_fee_per_entry_change(entry_before, entry_after)) + ttl_write_fee`
+for all the ledger entry changes.
-Entry rent fee consists of two components: fee for renting new ledgers with the new entry size and fee for renting the old ledgers with increased size. If `entry_before` does not exist, we treat its size as `0` and `live_until_ledger` as `0` for the sake of this formula.
+Entry rent fee consists of two components: fee for renting new ledgers with the
+new entry size and fee for renting the old ledgers with increased size. If
+`entry_before` does not exist, we treat its size as `0` and `live_until_ledger`
+as `0` for the sake of this formula.
```
-rent_fee_per_entry_change(entry_before_entry_after) =
+rent_fee_per_entry_change(entry_before_entry_after) =
if (entry_after.live_until_ledger > entry_before.live_until_ledger,
rent_fee_for_size_and_ledgers(
entry_after.is_persistent,
@@ -400,7 +518,8 @@ rent_fee_per_entry_change(entry_before_entry_after) =
0)
```
-`rent_fee_for_size_and_ledgers` is the main rent primitive that computes the fee for renting `S` bytes of ledger space for the period of `L` ledgers:
+`rent_fee_for_size_and_ledgers` is the main rent primitive that computes the
+fee for renting `S` bytes of ledger space for the period of `L` ledgers:
```
rent_fee_for_size_and_ledgers(is_persistent, S, L) = round_up(
@@ -412,15 +531,18 @@ rent_fee_for_size_and_ledgers(is_persistent, S, L) = round_up(
Settings values come from `StateArchivalSettings`.
-Additionally, we charge for the `TTLEntry` writes of entries that had `liveUntilLedgerSeq` changed using the same rate as for any other entry write:
+Additionally, we charge for the `TTLEntry` writes of entries that had
+`liveUntilLedgerSeq` changed using the same rate as for any other entry write:
```
-ttl_write_fee =
+ttl_write_fee =
num_ttl_updates * feeWriteLedgerEntry +
round_up(write_fee_per_1kb(BucketListSize) * TTL_ENTRY_SIZE / 1024)
```
-where `num_ttl_updates` is the number of ledger entries that had `live_until_ledger` updated and `TTL_ENTRY_SIZE` is size of `TTLEntry` with its key and is set to `68` bytes.
+where `num_ttl_updates` is the number of ledger entries that had
+`live_until_ledger` updated and `TTL_ENTRY_SIZE` is size of `TTLEntry` with its
+key and is set to `68` bytes.
Validity constraints: _None_
@@ -428,138 +550,246 @@ Apply-time enforcement: _None_
#### Operations
-Every Soroban transaction must contain exactly 1 operation. There is no fee for operations, but there is a ledger-wide limit on transactions (and thus operations) defined by `ConfigSettingContractExecutionLanesV0.ledgerMaxTxCount`.
+Every Soroban transaction must contain exactly 1 operation. There is no fee for
+operations, but there is a ledger-wide limit on transactions (and thus
+operations) defined by
+`ConfigSettingContractExecutionLanesV0.ledgerMaxTxCount`.
## 'Fee bump' semantics
-Soroban transactions are compatible with the 'fee bump' mechanism via `FeeBumpTransactionEnvelope`. Total transaction fee can be increased in this way in order to account for the higher network contention. However, fee bump transactions can only modify the overall fee of transaction and their semantics is independent of the inner ('bumped') transaction. This leads to the following of the Soroban 'fee bumps':
-
-* `sorobanData.resourceFee` can not be increased via `FeeBumpTransactionEnvelope`, so only the inclusion fee can be raised
-* `sorobanData.resources` can not be modified either, which is why the fee bump envelope is transparent for the resource accounting, i.e. it is not accounted for when computing the transaction size for the sake of enforcing limits/charging the fees
-* The point former also applies to the `TransactionSet` validation: `ledgerMaxTxsSizeBytes` limit enforcement only includes sizes of the inner envelopes of the fee bump transactions
-
-The relation between the resouce and inclusion fees for Soroban 'fee bumps' is defined in the same fashion as for regular Soroban transactions:
+Soroban transactions are compatible with the 'fee bump' mechanism via
+`FeeBumpTransactionEnvelope`. Total transaction fee can be increased in this
+way in order to account for the higher network contention. However, fee bump
+transactions can only modify the overall fee of transaction and their semantics
+is independent of the inner ('bumped') transaction. This leads to the following
+of the Soroban 'fee bumps':
+
+- `sorobanData.resourceFee` can not be increased via
+ `FeeBumpTransactionEnvelope`, so only the inclusion fee can be raised
+- `sorobanData.resources` can not be modified either, which is why the fee bump
+ envelope is transparent for the resource accounting, i.e. it is not accounted
+ for when computing the transaction size for the sake of enforcing
+ limits/charging the fees
+- The point former also applies to the `TransactionSet` validation:
+ `ledgerMaxTxsSizeBytes` limit enforcement only includes sizes of the inner
+ envelopes of the fee bump transactions
+
+The relation between the resouce and inclusion fees for Soroban 'fee bumps' is
+defined in the same fashion as for regular Soroban transactions:
`feeBumpTx.fee = feeBumpTx.innerTx.sorobanData.resourceFee + fullInclusionFee`
-Protocol treats 'fee bump' as an additional operation. Thus the effective inclusion fee bid used for transaction prioritization is defined as follows:
+Protocol treats 'fee bump' as an additional operation. Thus the effective
+inclusion fee bid used for transaction prioritization is defined as follows:
`inclusionFeeBid = fullInclusionFee / 2 = (feeBumpTx.fee - feeBumpTx.innerTx.sorobanData.resourceFee) / 2`
-Soroban transactions might fail at apply time due to too low declared resource values or too low refundable fee. We don't provide any built-in way for re-using the failed transactions in the first version of Soroban. However, the user experience can be significantly improved by decoupling the transaction signature from the signatures used for the host function invocation itself, specifically by using the Soroban Authorization Framework ([CAP-0046-11](./cap-0046-11.md)). If all the signatures are decoupled, then any party can pay the transaction fees and sign new transactions in case of failure and there is no need to use `FeeBumpTransactionEnvelope` at all (which is cheaper). Soroban nonces will only be consumed on transaction success, so the signatures can be re-used as many times as needed until the transaction succeeds.
+Soroban transactions might fail at apply time due to too low declared resource
+values or too low refundable fee. We don't provide any built-in way for
+re-using the failed transactions in the first version of Soroban. However, the
+user experience can be significantly improved by decoupling the transaction
+signature from the signatures used for the host function invocation itself,
+specifically by using the Soroban Authorization Framework
+([CAP-0046-11](./cap-0046-11.md)). If all the signatures are decoupled, then
+any party can pay the transaction fees and sign new transactions in case of
+failure and there is no need to use `FeeBumpTransactionEnvelope` at all (which
+is cheaper). Soroban nonces will only be consumed on transaction success, so
+the signatures can be re-used as many times as needed until the transaction
+succeeds.
### Future work
-Initial implementation of 'fee bumps' follows the 'classic' rules, which simplifies the protocol design, but comes with a number of shortcomings:
+Initial implementation of 'fee bumps' follows the 'classic' rules, which
+simplifies the protocol design, but comes with a number of shortcomings:
-* It's not possible to increase the resource fee
-* It's not possible to increase the declared resources
-* The inclusion fee has to be 2x of the inclusion fee for the regular transactions
+- It's not possible to increase the resource fee
+- It's not possible to increase the declared resources
+- The inclusion fee has to be 2x of the inclusion fee for the regular
+ transactions
-Future protocol versions may fix these shortcomings by introducing the new type of the 'fee bump' transaction envelope that addresses these shortcomings. The envelope will need to have the `SorobanData` extension that overrides the `SorobanData` of the inner transaction, so that every relevant value can be increased. The new envelope may also have a different inclusion fee semantics that wouldn't count the 'fee bump' as an additonal operation.
+Future protocol versions may fix these shortcomings by introducing the new type
+of the 'fee bump' transaction envelope that addresses these shortcomings. The
+envelope will need to have the `SorobanData` extension that overrides the
+`SorobanData` of the inner transaction, so that every relevant value can be
+increased. The new envelope may also have a different inclusion fee semantics
+that wouldn't count the 'fee bump' as an additonal operation.
## Design Rationale
### Fee estimation
-This proposal relies heavily on the existence of a "preflight" mechanism to determine all parameters needed to compute fees.
+This proposal relies heavily on the existence of a "preflight" mechanism to
+determine all parameters needed to compute fees.
-Additional logic (not covered in this CAP), will be needed to determine the market rate of resources based for example on historical data (see below).
+Additional logic (not covered in this CAP), will be needed to determine the
+market rate of resources based for example on historical data (see below).
### Resources
Fees are used to ensure fair and balanced utilization of resources.
For each resource type, we're assuming a model where we can define:
-* the maximum resource consumption for a transaction, as to protect the network.
-* a reasonable price for any given transaction, as to ensure that there are no broken markets
-* additional constraints may include
- * a "ledger wide" maximum as to protect the network and downstream systems when producing blocks.
- * "execution lane" maximum, as to ensure that execution lanes (executed in parallel), are balanced. This CAP does not attempt to define actual semantics or fee models related to parallel execution, and is mentioned here for context.
-
-We’re also assuming that resource allocation is done independently of “classic” transactions (ie: the amount of resources allocated to smart contract execution is independent of other traffic). This points to “smart contract transactions” being managed as their own “phase” (in `GeneralizedTransactionSet` terminology) and having its own dedicated capacity expressed in terms of transactions (`ledgerMaxTxCount`).
-Reasonable fees should be more than some minimum (on top of "on chain market dynamics") both to combat "spam" transactions and ensure that there is no strange incentive to perform certain operations on chain instead of performing them on other systems with worse properties (like centralized cloud infrastructure).
-
-Validators are expected to vote regularly (once a quarter for example) to ensure that fees are set correctly for the broader ecosystem. The exact way fee parameters are established is outside the scope of this document.
+- the maximum resource consumption for a transaction, as to protect the
+ network.
+- a reasonable price for any given transaction, as to ensure that there are no
+ broken markets
+- additional constraints may include
+ - a "ledger wide" maximum as to protect the network and downstream systems
+ when producing blocks.
+ - "execution lane" maximum, as to ensure that execution lanes (executed in
+ parallel), are balanced. This CAP does not attempt to define actual
+ semantics or fee models related to parallel execution, and is mentioned
+ here for context.
+
+We’re also assuming that resource allocation is done independently of “classic”
+transactions (ie: the amount of resources allocated to smart contract execution
+is independent of other traffic). This points to “smart contract transactions”
+being managed as their own “phase” (in `GeneralizedTransactionSet` terminology)
+and having its own dedicated capacity expressed in terms of transactions
+(`ledgerMaxTxCount`).
+
+Reasonable fees should be more than some minimum (on top of "on chain market
+dynamics") both to combat "spam" transactions and ensure that there is no
+strange incentive to perform certain operations on chain instead of performing
+them on other systems with worse properties (like centralized cloud
+infrastructure).
+
+Validators are expected to vote regularly (once a quarter for example) to
+ensure that fees are set correctly for the broader ecosystem. The exact way fee
+parameters are established is outside the scope of this document.
#### Compute
-[CAP-0046: WebAssembly Smart Contract Runtime Environment](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md) introduces the notion of virtual instructions. In the context of this CAP, the only thing that matters is that an "instruction" represents an arbitrary base unit for "execution time".
+[CAP-0046: WebAssembly Smart Contract Runtime Environment](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md)
+introduces the notion of virtual instructions. In the context of this CAP, the
+only thing that matters is that an "instruction" represents an arbitrary base
+unit for "execution time".
-As a consequence, the "goal" for validators is to construct a `GeneralizedTransactionSet` that uses up to `lcl.ConfigSettingContractComputeV0.ledgerMaxInstructions`.
+As a consequence, the "goal" for validators is to construct a
+`GeneralizedTransactionSet` that uses up to
+`lcl.ConfigSettingContractComputeV0.ledgerMaxInstructions`.
#### Ledger data
##### Read traffic
-Reads are logically performed *before* transaction execution.
+Reads are logically performed _before_ transaction execution.
When performing reads of a ledger entry:
-* The ledger entry needs to be located via some index in the ledger and the entry loaded. Depending on the underlying database technology, this translates to at least 1 disk operation.
-* The bucket entry needs to be xdr decoded.
+
+- The ledger entry needs to be located via some index in the ledger and the
+ entry loaded. Depending on the underlying database technology, this
+ translates to at least 1 disk operation.
+- The bucket entry needs to be xdr decoded.
The resources to allocate in this context are therefore:
-* a maximum number of ledger entry read operations in a ledger `ledgerMaxReadLedgerEntries`.
-* a maximum number of bytes that can be read in a ledger `ledgerMaxReadBytes`.
-The cost of a "ledger entry read" is fairly open ended, and depends on many variables. In this proposal, we give it a "base cost" for simplicity even if it translates to multiple disk operations (which is typically the case when using B-Trees for example, or if the ledger entry is retrieved by lookup over multiple buckets).
+- a maximum number of ledger entry read operations in a ledger
+ `ledgerMaxReadLedgerEntries`.
+- a maximum number of bytes that can be read in a ledger `ledgerMaxReadBytes`.
-That "base cost" is defined by validators as `feeReadLedgerEntry`. This proposal does not let transactions compete directly on the number of ledger entry read operations, therefore the cost of a read operation is `feeReadLedgerEntry` (validators must still construct transaction sets that keep the number of reads below a maximum).
+The cost of a "ledger entry read" is fairly open ended, and depends on many
+variables. In this proposal, we give it a "base cost" for simplicity even if it
+translates to multiple disk operations (which is typically the case when using
+B-Trees for example, or if the ledger entry is retrieved by lookup over
+multiple buckets).
-Transactions contain the total number of bytes that they will read from the bucket list as well at a fee bid for reading those bytes.
+That "base cost" is defined by validators as `feeReadLedgerEntry`. This
+proposal does not let transactions compete directly on the number of ledger
+entry read operations, therefore the cost of a read operation is
+`feeReadLedgerEntry` (validators must still construct transaction sets that
+keep the number of reads below a maximum).
-The number of bytes read corresponds to the size of the latest `BucketEntry` for that ledger entry (and does not take into account the possibility that an implementation may read stale entries in buckets or may have to read other entries from a bucket).
+Transactions contain the total number of bytes that they will read from the
+bucket list as well at a fee bid for reading those bytes.
-The fee is determined based on the rate `feeRead1KB` expressed for reading 1 KB (1024 bytes) worth of data.
+The number of bytes read corresponds to the size of the latest `BucketEntry`
+for that ledger entry (and does not take into account the possibility that an
+implementation may read stale entries in buckets or may have to read other
+entries from a bucket).
-As transactions compete for the total read capacity `ledgerMaxReadBytes` for a given ledger, the inclusion fee goes up.
+The fee is determined based on the rate `feeRead1KB` expressed for reading 1 KB
+(1024 bytes) worth of data.
+As transactions compete for the total read capacity `ledgerMaxReadBytes` for a
+given ledger, the inclusion fee goes up.
##### Write traffic and ledger size
-Writes are performed *after* transaction execution, and are blocking the actual closing of a ledger.
+Writes are performed _after_ transaction execution, and are blocking the actual
+closing of a ledger.
When writing a ledger entry:
-* The bucket entry is marshaled to binary.
-* The bucket entry is appended to the topmost bucket serially.
-* The bucket entry is read, hashed and written back with every level merge operation.
-In this proposal, we're modeling "worst case": a bucket entry gets added to the bucket list and has to travel all the way to the bottom bucket, contributing as many bytes as the bucket entry itself.
+- The bucket entry is marshaled to binary.
+- The bucket entry is appended to the topmost bucket serially.
+- The bucket entry is read, hashed and written back with every level merge
+ operation.
+
+In this proposal, we're modeling "worst case": a bucket entry gets added to the
+bucket list and has to travel all the way to the bottom bucket, contributing as
+many bytes as the bucket entry itself.
-In that case, the overhead is dominated by the size of buckets and bucket entries, and the number of bucket entries is not really a factor when merging.
+In that case, the overhead is dominated by the size of buckets and bucket
+entries, and the number of bucket entries is not really a factor when merging.
-Consequently, we can model the cost of a write as an append to the overall bucket list and charge a "base rate" for adding a bucket entry.
+Consequently, we can model the cost of a write as an append to the overall
+bucket list and charge a "base rate" for adding a bucket entry.
-For allocating ledger entry writes, the model is analogous to "reads": a ledger is constructed as to not exceed `ledgerMaxWriteLedgerEntry` writes and each write contributes `feeWriteLedgerEntry` to the overall fee for that transaction (no market dynamics here).
+For allocating ledger entry writes, the model is analogous to "reads": a ledger
+is constructed as to not exceed `ledgerMaxWriteLedgerEntry` writes and each
+write contributes `feeWriteLedgerEntry` to the overall fee for that transaction
+(no market dynamics here).
As for "bytes written", the model that was chosen is:
-* use the total bucket list size as the main resource to track.
-* a cost function, allows to price the cost of expanding ledger size.
-* ledger size, and therefore price of storage, goes down as bucket entries get merged/deleted.
-The cost function that was selected is similar to what was proposed in Ethereum's [make EIP 1559 more like an AMM curve](https://ethresear.ch/t/make-eip-1559-more-like-an-amm-curve/9082).
+- use the total bucket list size as the main resource to track.
+- a cost function, allows to price the cost of expanding ledger size.
+- ledger size, and therefore price of storage, goes down as bucket entries get
+ merged/deleted.
-The main point being that the fee for adding `b` bytes to a bucket list of size `s` is calculated as `fee(b,s) = lfee(s + b) - lfee(s)`, where `lfee` is the "total cost to build a bucket list of a given size".
-When designing for specific properties of that function, it's useful to see the "fee rate": `fee_rate(s) = lim b->0, fee(b, s)/ b = (lfee(s+b) - lfee(b))/b`, is the derivative of `lfee`, ie `fee_rate(s) = lfee'(s)`.
+The cost function that was selected is similar to what was proposed in
+Ethereum's
+[make EIP 1559 more like an AMM curve](https://ethresear.ch/t/make-eip-1559-more-like-an-amm-curve/9082).
+
+The main point being that the fee for adding `b` bytes to a bucket list of size
+`s` is calculated as `fee(b,s) = lfee(s + b) - lfee(s)`, where `lfee` is the
+"total cost to build a bucket list of a given size". When designing for
+specific properties of that function, it's useful to see the "fee rate":
+`fee_rate(s) = lim b->0, fee(b, s)/ b = (lfee(s+b) - lfee(b))/b`, is the
+derivative of `lfee`, ie `fee_rate(s) = lfee'(s)`.
Properties that we're looking for:
-* validators should be able to pick parameters such that total bucket list size can grow to size `M_base` (that is deemed manageable by the ecosystem), but puts up a lot of resistance to grow to size `M_base+M_buffer` and beyond.
-* `fee_rate(s)` should provide enough feedback for users and use cases to self-correct. It would not be desirable at the extreme to have very low fees up to `M_base` and suddenly "hit a wall" where fees shoot up to extremely high numbers after that.
-Given those, the choice for `fee_rate` is constructed as the superposition of the following 2 functions (integrating yields the respective `lfee` component):
-* `(feeRateM - feeRate)*s/M_base + feeRate` --> `(feeRateM - feeRate)*s^2/(2*M_base) + feeRate*s`
-* `if s > M_base, exp(K*(s-M_base)/B_buffer)` --> `exp(K*(s-M_base)/B_buffer)*B_buffer/K`
+- validators should be able to pick parameters such that total bucket list size
+ can grow to size `M_base` (that is deemed manageable by the ecosystem), but
+ puts up a lot of resistance to grow to size `M_base+M_buffer` and beyond.
+- `fee_rate(s)` should provide enough feedback for users and use cases to
+ self-correct. It would not be desirable at the extreme to have very low fees
+ up to `M_base` and suddenly "hit a wall" where fees shoot up to extremely
+ high numbers after that.
+
+Given those, the choice for `fee_rate` is constructed as the superposition of
+the following 2 functions (integrating yields the respective `lfee` component):
+
+- `(feeRateM - feeRate)*s/M_base + feeRate` -->
+ `(feeRateM - feeRate)*s^2/(2*M_base) + feeRate*s`
+- `if s > M_base, exp(K*(s-M_base)/B_buffer)` -->
+ `exp(K*(s-M_base)/B_buffer)*B_buffer/K`
-Where `feeRate` and `feeRateM` are the fee rate at size 0 and `M_base` respectively.
+Where `feeRate` and `feeRateM` are the fee rate at size 0 and `M_base`
+respectively.
Which together yields:
`lfee(s) = (feeRateM - feeRate)*s^2/(2*M_base) + feeRate*s + (if s > M_base, exp(K*(s-M_base)/B_buffer), 0)`.
-With `K` picked such that `fee(1, M_base+M_buffer)` is orders of magnitude larger than what the market would be willing to pay.
+With `K` picked such that `fee(1, M_base+M_buffer)` is orders of magnitude
+larger than what the market would be willing to pay.
-We simplify those functions further by charging fees linearly to the number of bytes within a ledger (see rationale below).
+We simplify those functions further by charging fees linearly to the number of
+bytes within a ledger (see rationale below).
As a consequence the final formula looks like this:
@@ -568,7 +798,9 @@ As a consequence the final formula looks like this:
With
`fee_rate(s) = (feeRateM - feeRate)*s/M_base + feeRate + if (s > M_base, exp(K*(s-M_base)/B_buffer), 0)`
-We can simplify this even further by replacing the exponential component by a steep linear slope that causes fees to be "extremely high" at `M_buffer`, which turns the formula into what is specified above:
+We can simplify this even further by replacing the exponential component by a
+steep linear slope that causes fees to be "extremely high" at `M_buffer`, which
+turns the formula into what is specified above:
`fee_rate(s) = (feeRateM - feeRate)*s/M_base + feeRate + if (s > M_base, K*(s-M_base)/B_buffer, 0)`
@@ -576,143 +808,252 @@ where `K >= 1`.
##### Ledger size averaging
-Tracking the ledger size for every ledger introduces unnecessary noise that leads to the following issues:
-* flooding might be somewhat imprecise due to fees changing every ledger with a risk of transaction becoming invalid
-* wrong incentives, such as trying to pay the rent for a long time period right after the bucket list merge ledger
-* fee estimations are harder for the clients
+Tracking the ledger size for every ledger introduces unnecessary noise that
+leads to the following issues:
-To alleviate all of these issues, instead of using the current ledger size, this proposal uses the average of the ledger size over the sliding window, that is large enough to average out most of the noise coming from short-term merges and rather representing the ledger size change trends rather than actual size at any moment.
+- flooding might be somewhat imprecise due to fees changing every ledger with a
+ risk of transaction becoming invalid
+- wrong incentives, such as trying to pay the rent for a long time period right
+ after the bucket list merge ledger
+- fee estimations are harder for the clients
+
+To alleviate all of these issues, instead of using the current ledger size,
+this proposal uses the average of the ledger size over the sliding window, that
+is large enough to average out most of the noise coming from short-term merges
+and rather representing the ledger size change trends rather than actual size
+at any moment.
##### Putting it together
-"read/write" operations need to first read data before writing it. The amount of data written back can be larger or smaller than what was read, as consequence:
-* The number of ledger entry reads is the size of ledger entries referenced in ledger footprints (both read and read/write).
-* The number of bytes to read is the size of bucket entries from both the read and read/write footprints.
-* The number of bytes to write is the number of bytes associated with bucket entries referenced by the readWrite footprint.
-* The number of ledger entries to write is the size of the read/write footprint.
+"read/write" operations need to first read data before writing it. The amount
+of data written back can be larger or smaller than what was read, as
+consequence:
+
+- The number of ledger entry reads is the size of ledger entries referenced in
+ ledger footprints (both read and read/write).
+- The number of bytes to read is the size of bucket entries from both the read
+ and read/write footprints.
+- The number of bytes to write is the number of bytes associated with bucket
+ entries referenced by the readWrite footprint.
+- The number of ledger entries to write is the size of the read/write
+ footprint.
##### Ledger size reduction
-So far we've established a model for deriving fees based on the bucket list size, but there needs to be a mechanism to ensure that the cost of storage does not grow indefinitely, hurting usability of the network.
+So far we've established a model for deriving fees based on the bucket list
+size, but there needs to be a mechanism to ensure that the cost of storage does
+not grow indefinitely, hurting usability of the network.
Core ideas and principles:
-* Ledger space is a shared public resource, policies should be set to ensure fair use.
-* cost of using ledger space should converge towards market rate over time
- * in particular creating spam ledger entries should cost market rate over the long term.
-* abandoned entries should not cost anything to network participants over the long term.
-This proposal therefore depends on a solution with the following high level properties:
-* ledger entries have to periodically pay for "rent", where the rent amount is adjusted on a per period basis (as to approximate "market rate")
-* ledger entries that do not want to pay for rent anymore should be purged from the ledger, freeing up space for other entries (and lowering the overall price of storage)
- * purged entries may be recoverable by relying on external recovery nodes that can reconstruct proofs that validators can verify.
+- Ledger space is a shared public resource, policies should be set to ensure
+ fair use.
+- cost of using ledger space should converge towards market rate over time
+ - in particular creating spam ledger entries should cost market rate over the
+ long term.
+- abandoned entries should not cost anything to network participants over the
+ long term.
+
+This proposal therefore depends on a solution with the following high level
+properties:
+
+- ledger entries have to periodically pay for "rent", where the rent amount is
+ adjusted on a per period basis (as to approximate "market rate")
+- ledger entries that do not want to pay for rent anymore should be purged from
+ the ledger, freeing up space for other entries (and lowering the overall
+ price of storage)
+ - purged entries may be recoverable by relying on external recovery nodes
+ that can reconstruct proofs that validators can verify.
#### Historical storage
-Historical storage corresponds to data that needs to be persisted by full validators outside of the bucket list.
+Historical storage corresponds to data that needs to be persisted by full
+validators outside of the bucket list.
This includes transactions and their result.
-As the data is stored only once but for "eternity", it has to be priced accordingly (at a minimum, this data has to be made available as to allow validators to catch up to the network).
+As the data is stored only once but for "eternity", it has to be priced
+accordingly (at a minimum, this data has to be made available as to allow
+validators to catch up to the network).
-The model retained in the context of this CAP is to just have the validators set a flat rate per byte for this kind of data (updated on a regular basis as to track cost of storage over time).
+The model retained in the context of this CAP is to just have the validators
+set a flat rate per byte for this kind of data (updated on a regular basis as
+to track cost of storage over time).
##### Transaction Result
-In order to reduce the base cost of transactions, the "result" published to archive is fixed size and the actual detailed transaction result is emitted in the meta and accounted for in the same way as contract events. See [CAP-0046: Smart Contract Events](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-08.md) for more details.
+In order to reduce the base cost of transactions, the "result" published to
+archive is fixed size and the actual detailed transaction result is emitted in
+the meta and accounted for in the same way as contract events. See
+[CAP-0046: Smart Contract Events](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-08.md)
+for more details.
#### Extended meta data
-Extended meta data here refers to parts of the meta data (produced when closing ledgers) that are not related to ledger changes:
-* Smart contracts generate "events"
-* `TransactionResult`
+Extended meta data here refers to parts of the meta data (produced when closing
+ledgers) that are not related to ledger changes:
-See [CAP-0046: Smart Contract Events](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-08.md) for more details.
+- Smart contracts generate "events"
+- `TransactionResult`
+
+See
+[CAP-0046: Smart Contract Events](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-08.md)
+for more details.
Fees are needed to control for the overhead in those systems.
-The model retained in this CAP is a flat rate per byte model for simplicity. It is expected that this fee would be orders of magnitude smaller than what is needed to persist data on chain.
+The model retained in this CAP is a flat rate per byte model for simplicity. It
+is expected that this fee would be orders of magnitude smaller than what is
+needed to persist data on chain.
#### Bandwidth
Transactions need to be propagated to peers on the network.
-At the networking layer, transactions compete for bandwidth on a per ledger basis (`ledgerMaxPropagateSizeBytes`).
+At the networking layer, transactions compete for bandwidth on a per ledger
+basis (`ledgerMaxPropagateSizeBytes`).
-Note that validators may apply additional market dynamics due to implementation constraints, especially when trying to balance propagating large transactions vs smaller ones. See [CAP-0042: Multi-Part Transaction Sets](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md).
+Note that validators may apply additional market dynamics due to implementation
+constraints, especially when trying to balance propagating large transactions
+vs smaller ones. See
+[CAP-0042: Multi-Part Transaction Sets](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md).
##### Ephemeral payload
-In the future, it may be possible to attach a `ephemeralPayload` (Hash + size), that gets cleared before applying transactions (used in the context of proofs of availability).
+In the future, it may be possible to attach a `ephemeralPayload` (Hash + size),
+that gets cleared before applying transactions (used in the context of proofs
+of availability).
-Further reading: [blob transactions in Ethereum](https://notes.ethereum.org/@vbuterin/blob_transactions).
+Further reading:
+[blob transactions in Ethereum](https://notes.ethereum.org/@vbuterin/blob_transactions).
#### Refunds on “flat rate” resources
-Some resources are priced determined on a per ledger basis, independently of transaction set composition.
+Some resources are priced determined on a per ledger basis, independently of
+transaction set composition.
-For such resources, a transaction gets charged the “worst case” utilization at the beginning of the transaction execution, and gets refunded based on actual usage at the end of the execution.
+For such resources, a transaction gets charged the “worst case” utilization at
+the beginning of the transaction execution, and gets refunded based on actual
+usage at the end of the execution.
#### No refund for unused capacity on market based resources
-If a transaction declares that it wants to use up to X units of a given resource, nominators assemble a transaction set with that information, potentially excluding other transactions because of this.
+If a transaction declares that it wants to use up to X units of a given
+resource, nominators assemble a transaction set with that information,
+potentially excluding other transactions because of this.
-As a consequence, there should not be any refund for unused capacity. Specifically, if a resource was priced at a given rate by validators, the fee charged will be for the entire capacity (note that this still lets validators provide discounts on the rate).
+As a consequence, there should not be any refund for unused capacity.
+Specifically, if a resource was priced at a given rate by validators, the fee
+charged will be for the entire capacity (note that this still lets validators
+provide discounts on the rate).
#### Transaction fees and prioritization
-This proposal assumes that fees charged for resources based on network settings are "fair", and that market dynamics should be shifted towards the "intent" of any given transaction (also called "social value" of a transaction).
+This proposal assumes that fees charged for resources based on network settings
+are "fair", and that market dynamics should be shifted towards the "intent" of
+any given transaction (also called "social value" of a transaction).
This implies that:
-* transactions are flooded/included purely based on their social value.
-* additional throttling at the overlay layer may occur when some resources are scarce (similar to how in classic, the rate of operations that can be flooded is capped).
-Note that the inclusion fee is *not* related to the amount of work that a transaction does. In other words, a transaction performing twice as much work than another but with the same inclusion fee bid are considered to have the same priority.
+- transactions are flooded/included purely based on their social value.
+- additional throttling at the overlay layer may occur when some resources are
+ scarce (similar to how in classic, the rate of operations that can be flooded
+ is capped).
+
+Note that the inclusion fee is _not_ related to the amount of work that a
+transaction does. In other words, a transaction performing twice as much work
+than another but with the same inclusion fee bid are considered to have the
+same priority.
-This simplification allows to remove entirely the need to model on chain a "synthetic universal resource" that can be used to represent the amount of work a given transaction performs (such as "gas" in Ethereum for example).
+This simplification allows to remove entirely the need to model on chain a
+"synthetic universal resource" that can be used to represent the amount of work
+a given transaction performs (such as "gas" in Ethereum for example).
The following notable properties are expected with this model:
-* adjustment to fee rates can be done using arbitrary models based on historical data, outside of the network
-* in the future, additional logic can be added to have some price adjustment based on historical usage (similar to what is done for ledger space)
-* validators (via CAP-0042 components) can still group similar transactions together.
+
+- adjustment to fee rates can be done using arbitrary models based on
+ historical data, outside of the network
+- in the future, additional logic can be added to have some price adjustment
+ based on historical usage (similar to what is done for ledger space)
+- validators (via CAP-0042 components) can still group similar transactions
+ together.
#### Alternate fee model considered: multidimensional and uniform fees
-Another way considered at some point was to try to dynamically price resources as to attain some sort of market rate as quickly as possible. This section goes over the approaches to implement "resource markets".
+Another way considered at some point was to try to dynamically price resources
+as to attain some sort of market rate as quickly as possible. This section goes
+over the approaches to implement "resource markets".
-Note that we’re excluding “flat rate” resources where there is no competition from this section.
+Note that we’re excluding “flat rate” resources where there is no competition
+from this section.
There are two ways to do it:
-* have a separate market for each dimension. Transactions need to explicitly bid on each dimension.
- * This allows accurate price discovery for all resources. For example, if there is a lot of contention on "Instructions", this allows to discover the price of an instruction.
- * Relative priority between transactions is flexible, this is good (more room for innovation by nominators) and bad (harder for clients to know what to do to “get ahead”).
-* transactions just specify a "fee bid" for multiple dimensions at once (potentially all markets at once)
- * there needs to be a function that computes the "minimum fee" for a given transaction, mixing all dimensions somehow (polynomial of sorts for example). Effectively creating a "synthetic universal resource".
- * comparing transactions can be done by comparing the ratio between the fee bid and the minimum fee, which is simple.
- * There is no price discovery of individual dimensions as people automatically bid more on all dimensions at once. That said, nominators can just pick "market prices" for each dimension that fits recent network conditions.
-
-Both solutions require nominators to price resources (in much the same way that CAP-0042 allows nominators to price operations in the classic protocol).
-
-The bidding is more complicated with the first approach. In order to come up with a reasonable bid, clients need not only to have 'market prices' for every resource, but also need to take into account the comparison algorithm used during transaction set building. For example, validators may consider ordering transactions by a tuple of bid-to-min-fee ratios for every resource (e.g. (instructions, IO, bandwidth)) and in order to prevent abuse of the fixed order, they would dynamically come up with that order depending on the current contents of the transaction queue. It's not obvious how to bid optimally for such an algorithm, as every ledger priorities might change several times.
-For the second approach the bidding is comparable with the classic transactions: there is just a single 'market rate' for the smart contract transactions, that can be both used as a part of the bidding strategy and for comparison. The downside is that it requires maintaining parameters used to give different weights to the various resources as to come up with a "synthetic universal resource" that the network can reason about.
+- have a separate market for each dimension. Transactions need to explicitly
+ bid on each dimension.
+ - This allows accurate price discovery for all resources. For example, if
+ there is a lot of contention on "Instructions", this allows to discover the
+ price of an instruction.
+ - Relative priority between transactions is flexible, this is good (more room
+ for innovation by nominators) and bad (harder for clients to know what to
+ do to “get ahead”).
+- transactions just specify a "fee bid" for multiple dimensions at once
+ (potentially all markets at once)
+ - there needs to be a function that computes the "minimum fee" for a given
+ transaction, mixing all dimensions somehow (polynomial of sorts for
+ example). Effectively creating a "synthetic universal resource".
+ - comparing transactions can be done by comparing the ratio between the fee
+ bid and the minimum fee, which is simple.
+ - There is no price discovery of individual dimensions as people
+ automatically bid more on all dimensions at once. That said, nominators can
+ just pick "market prices" for each dimension that fits recent network
+ conditions.
+
+Both solutions require nominators to price resources (in much the same way that
+CAP-0042 allows nominators to price operations in the classic protocol).
+
+The bidding is more complicated with the first approach. In order to come up
+with a reasonable bid, clients need not only to have 'market prices' for every
+resource, but also need to take into account the comparison algorithm used
+during transaction set building. For example, validators may consider ordering
+transactions by a tuple of bid-to-min-fee ratios for every resource (e.g.
+(instructions, IO, bandwidth)) and in order to prevent abuse of the fixed
+order, they would dynamically come up with that order depending on the current
+contents of the transaction queue. It's not obvious how to bid optimally for
+such an algorithm, as every ledger priorities might change several times.
+
+For the second approach the bidding is comparable with the classic
+transactions: there is just a single 'market rate' for the smart contract
+transactions, that can be both used as a part of the bidding strategy and for
+comparison. The downside is that it requires maintaining parameters used to
+give different weights to the various resources as to come up with a "synthetic
+universal resource" that the network can reason about.
Related work:
- * Ethereum [Multidimensional EIP-1559](https://ethresear.ch/t/multidimensional-eip-1559/11651).
+
+- Ethereum
+ [Multidimensional EIP-1559](https://ethresear.ch/t/multidimensional-eip-1559/11651).
## Protocol Upgrade Transition
+
None, this fee model will only apply to smart contract transactions.
-A subsequent CAP may update the fee model for the existing classic transaction subsystem as to be more consistent with this CAP.
+A subsequent CAP may update the fee model for the existing classic transaction
+subsystem as to be more consistent with this CAP.
### Resource Utilization
-There are no significant resource utilization changes compared to the classic fee model.
+There are no significant resource utilization changes compared to the classic
+fee model.
## Security Concerns
-The resource fees and limits are introduced to maintain network health and therefore the all the risks are around the network liveness and DOS possibility, but not necessarily security.
+The resource fees and limits are introduced to maintain network health and
+therefore the all the risks are around the network liveness and DOS
+possibility, but not necessarily security.
-Incorrect configuration or incorrect enforcement calibration might lead to high ledger close times or spam.
+Incorrect configuration or incorrect enforcement calibration might lead to high
+ledger close times or spam.
## Test Cases
@@ -720,11 +1061,11 @@ The fees are covered in most of the Soroban-related test cases.
## Implementation
-[TransactionFrame::validateSorobanResources](https://github.com/stellar/stellar-core/blob/0df2e0c6f80d2c461870e837fbe50fa16f9048f3/src/transactions/TransactionFrame.cpp#L588) enforces the limts at transaction validation time.
-
-
-[InvokeHostFunctionOpFrame::doApply](https://github.com/stellar/stellar-core/blob/0df2e0c6f80d2c461870e837fbe50fa16f9048f3/src/transactions/InvokeHostFunctionOpFrame.cpp#L379) performs most of the apply-time resource limit enforcement.
-
-[`fees.rs`][https://github.com/stellar/rs-soroban-env/blob/d92944576e2301c9866215efcdc4bbd24a5f3981/soroban-env-host/src/fees.rs] file of Soroban host contains all the fee computation logic specified here.
+[TransactionFrame::validateSorobanResources](https://github.com/stellar/stellar-core/blob/0df2e0c6f80d2c461870e837fbe50fa16f9048f3/src/transactions/TransactionFrame.cpp#L588)
+enforces the limts at transaction validation time.
+[InvokeHostFunctionOpFrame::doApply](https://github.com/stellar/stellar-core/blob/0df2e0c6f80d2c461870e837fbe50fa16f9048f3/src/transactions/InvokeHostFunctionOpFrame.cpp#L379)
+performs most of the apply-time resource limit enforcement.
+[`fees.rs`][https://github.com/stellar/rs-soroban-env/blob/d92944576e2301c9866215efcdc4bbd24a5f3981/soroban-env-host/src/fees.rs]
+file of Soroban host contains all the fee computation logic specified here.
diff --git a/core/cap-0046-08.md b/core/cap-0046-08.md
index 481b8a8c6..ddbabf65b 100644
--- a/core/cap-0046-08.md
+++ b/core/cap-0046-08.md
@@ -14,26 +14,34 @@ Protocol version: 20
```
## Simple Summary
+
This CAP proposes an update to `TransactionMeta` along with new host functions
to allow for contracts to write data to the meta that can be consumed by
downstream systems.
## Motivation
+
Contract writers will need a way to emit information about what their specific
contracts are doing. Downstream consumers of these new messages can also push
these to subscribers, allowing users to listen to specific messages.
### Goals Alignment
+
This CAP is aligned with the following Stellar Network Goals:
-* The Stellar Network should make it easy for developers of Stellar projects to create highly usable products
+
+- The Stellar Network should make it easy for developers of Stellar projects to
+ create highly usable products
## Abstract
+
This CAP provides host functions for contracts to write events that will be put
-into `TransactionMeta`. It also provides a way to cryptographically verify the events.
+into `TransactionMeta`. It also provides a way to cryptographically verify the
+events.
## Specification
### Contract Events
+
We introduced a `TransactionMetaV3`, which has an optional
`SorobanTransactionMeta` struct that should be set for Soroban transactions.
This contains a `events` vector of `ContractEvent`, which will contain the
@@ -47,20 +55,22 @@ transactions will not catch instances where the events emitted have changed. To
fix this issue, `InvokeHostFunctionOp` returns the SHA-256 hash of
`InvokeHostFunctionSuccessPreImage`, which contains the SCVal return value of
the top level contract call, and the vector of events emitted. This makes the
-events part of the protocol.
-
+events part of the protocol.
### Diagnostic events
-The host can also emit diagnostic events, which are not part of the protocol and
-do not count towards metering and fees. These events are in a separate
+
+The host can also emit diagnostic events, which are not part of the protocol
+and do not count towards metering and fees. These events are in a separate
`diagnosticEvents` vector in `SorobanTransactionMeta` with `ContractEventType`
`DIAGNOSTIC`. To be able to order diagnostic and non-diagnostic events,
`diagnosticEvents` will also contain all of the protocol events in the order
-they were emitted.
+they were emitted.
#### Host function
-The `contract_event` host function specified in [CAP-0046-03](./CAP-0046-03.md#context-host-functions)
-can be used to create events in contracts.
+
+The `contract_event` host function specified in
+[CAP-0046-03](./CAP-0046-03.md#context-host-functions) can be used to create
+events in contracts.
### XDR Changes
@@ -68,42 +78,50 @@ See the XDR diffs in the Soroban overview CAP, specifically those covering
changes to transaction meta and results, result sets, history entries, and
contract events.
-
## Design Rationale
### Different event types
+
Each event host function uses a different type. This gives downstream users
another fields to query off of. In addition to that, events from `system_event`
are also special in that they can only be used by the host functions, allowing
users to see "system" level events as opposed to ones generated by contracts.
-For example, this can be used when contracts are created to emit the hash of the
-created contract.
+For example, this can be used when contracts are created to emit the hash of
+the created contract.
### Limits
+
There are no limits on individual events, but the total size of all events
-emitted in a transaction. The maximum combined events size will be
-introduced as `txMaxContractEventsSizeBytes` in the
-`ConfigSettingContractEventsV0` `ConfigSettingEntry`(see
-[CAP-0046-09](./cap-0046-09.md) for details on config entries).
-`txMaxContractEventsSizeBytes` will be `10000` initially, and the minimum the
-setting can be set to is `0`.
+emitted in a transaction. The maximum combined events size will be introduced
+as `txMaxContractEventsSizeBytes` in the `ConfigSettingContractEventsV0`
+`ConfigSettingEntry`(see [CAP-0046-09](./cap-0046-09.md) for details on config
+entries). `txMaxContractEventsSizeBytes` will be `10000` initially, and the
+minimum the setting can be set to is `0`.
## Protocol Upgrade Transition
### Backwards Incompatibilities
+
Downstream systems will need to be able to parse `TransactionMetaV3` for all
meta information.
### Resource Utilization
-The events can make `TransactionMeta` larger by at most `txMaxContractEventsSizeBytes`.
+
+The events can make `TransactionMeta` larger by at most
+`txMaxContractEventsSizeBytes`.
## Security Concerns
+
The security concerns from [CAP-0051]
(https://github.com/stellar/stellar-protocol/blob/master/core/cap-0051.md#security-concerns)
apply here as well.
## Test Cases
+
TBD
## Implementation
-TBD. See [rs-stellar-contract-env](https://github.com/stellar/rs-stellar-contract-env) and stellar-core’s repo (branch to be added) for the prototype implementation.
+
+TBD. See
+[rs-stellar-contract-env](https://github.com/stellar/rs-stellar-contract-env)
+and stellar-core’s repo (branch to be added) for the prototype implementation.
diff --git a/core/cap-0046-09.md b/core/cap-0046-09.md
index c60eda006..7b4932f59 100644
--- a/core/cap-0046-09.md
+++ b/core/cap-0046-09.md
@@ -29,12 +29,11 @@ network-wide configuration is only stored in the `LedgerHeader`, which would be
infeasible to use given that the Soroban-related information may require tens
of kilobytes of space to be stored.
-
## Abstract
This CAP introduces a special type of configuration `LedgerEntry` that can only
-be modified via a validator vote. It also specifies the general rules for adding
-and modifying these entries.
+be modified via a validator vote. It also specifies the general rules for
+adding and modifying these entries.
## Specification
@@ -56,11 +55,11 @@ the ledger as a new type of `LedgerEntry`: `ConfigSettingEntry`. Every
hence at most one entry of each type may exist in the ledger. Setting entries
are represented by arbitrary XDR.
-`ConfigSettingID` and its corresponding payload should correspond to an 'atomic'
-set of configuration settings, i.e. those that are expected to be used and
-upgraded together. For example, contract size limit, metering data and fee data
-would all be represented by the separate entries (or more granular entries if
-needed).
+`ConfigSettingID` and its corresponding payload should correspond to an
+'atomic' set of configuration settings, i.e. those that are expected to be used
+and upgraded together. For example, contract size limit, metering data and fee
+data would all be represented by the separate entries (or more granular entries
+if needed).
#### Adding and removing setting entries
@@ -80,25 +79,29 @@ they are introduced (similar to e.g. `LedgerHeader` parameters).
### Configuration upgrade mechanism
-Every configuration setting entry is considered to be atomic for the sake of the
-upgrade, i.e. an upgrade must provide the full XDR of the setting, even if it
-is only being partially updated. This is done to simplify the upgrade format
+Every configuration setting entry is considered to be atomic for the sake of
+the upgrade, i.e. an upgrade must provide the full XDR of the setting, even if
+it is only being partially updated. This is done to simplify the upgrade format
(otherwise we'd need to introduce some key-value mechanism of specifying
upgraded fields).
Multiple configuration setting entries may be upgraded at once using a
`ConfigUpgradeSet` struct that contains all the updated `ConfigSettingEntry`
-entries. The `LedgerUpgrade` in `StellarValue` that validators will vote on will
-contain only an `ConfigUpgradeSetKey`, which containts a contractID, and the
-SHA-256 hash of the `ConfigUpgradeSet`. The actual `ConfigUpgradeSet` will need
-to exist on ledger in a `ContractDataEntry`, and the validators will look it up
-using the `ConfigUpgradeSetKey`. This means that a Wasm contract will need to be
-used to write the proposed upgrade. The specs of the entry written is as follows -
-- It should be a `TEMPORARY` entry, so make sure it's bumped to live long enough for the upgrade.
-- The `SCVal` `key` should be of type `SCV_BYTES`, where the value is the SHA-256 hash of the `ConfigUpgradeSet`.
+entries. The `LedgerUpgrade` in `StellarValue` that validators will vote on
+will contain only an `ConfigUpgradeSetKey`, which containts a contractID, and
+the SHA-256 hash of the `ConfigUpgradeSet`. The actual `ConfigUpgradeSet` will
+need to exist on ledger in a `ContractDataEntry`, and the validators will look
+it up using the `ConfigUpgradeSetKey`. This means that a Wasm contract will
+need to be used to write the proposed upgrade. The specs of the entry written
+is as follows -
+
+- It should be a `TEMPORARY` entry, so make sure it's bumped to live long
+ enough for the upgrade.
+- The `SCVal` `key` should be of type `SCV_BYTES`, where the value is the
+ SHA-256 hash of the `ConfigUpgradeSet`.
- The `SCVal` `val` should also be of type `SCV_BYTES`, where the value is the
serialized XDR of the `ConfigUpgradeSet`.
-
+
Once the proposed upgrade is setup, you'll need to share the
`ConfigUpgradeSetKey` with the other validators so they can vote for it. Note
that the validators will verify that the hash in `ConfigUpgradeSetKey` matches
@@ -111,7 +114,7 @@ the upgrade).
`ConfigUpgradeSet` is considered valid when:
-- all the `updatedEntry` values have unique `configSettingID` values
+- all the `updatedEntry` values have unique `configSettingID` values
- `updateEntry` are ordered by the integer values of the `ConfigSettingID` enum
- every `updatedEntry` is valid itself
@@ -128,9 +131,9 @@ contracts, which means the `ConfigUpgradeSet` mechanism cannot be used to
re-enable Soroban.
To allow increasing `ledgerMaxTxCount` from 0, we introduce the ability to set
-the value using the traditional upgrade mechanism. There's a new `LedgerUpgrade`
-type `LEDGER_UPGRADE_MAX_SOROBAN_TX_SET_SIZE`, which contains a `uint32
-newMaxSorobanTxSetSize` to update
+the value using the traditional upgrade mechanism. There's a new
+`LedgerUpgrade` type `LEDGER_UPGRADE_MAX_SOROBAN_TX_SET_SIZE`, which contains a
+`uint32 newMaxSorobanTxSetSize` to update
`ConfigSettingContractExecutionLanesV0::ledgerMaxTxCount`.
#### Minimum values
diff --git a/core/cap-0046-10.md b/core/cap-0046-10.md
index 23ae3f5c8..cfd1885b0 100644
--- a/core/cap-0046-10.md
+++ b/core/cap-0046-10.md
@@ -15,175 +15,382 @@ Protocol version: 20
## Simple Summary
-This CAP defines the resources for running a smart contract, and proposes an componentized, extensible framework of metering those resources during runtime against a predetermined budget.
+This CAP defines the resources for running a smart contract, and proposes an
+componentized, extensible framework of metering those resources during runtime
+against a predetermined budget.
## Working Group
+
TBD
## Motivation
-Resource **metering** serves as the canonical truth of the cost of executing a smart contract on the network. It has two main goals.
+
+Resource **metering** serves as the canonical truth of the cost of executing a
+smart contract on the network. It has two main goals.
+
- Preventing DoS attacks
- Ensuring fair and efficient resource allocation
-The ledger has capacity limits. If transaction processing is allocated 1s (out of the total 5s of ledger closing time), and the max number of smart contract transactions per ledger is `X`, then the max compute time of each transaction is `1/X`, ignoring non-smart transactions and parallel execution. Furthermore, the ledger processing unit has a memory capacity, which the total amount of memory usage (host objects, linear memory) of executing the smart contract transaction set cannot exceed. The resource **budget** reflects those limits.
+The ledger has capacity limits. If transaction processing is allocated 1s (out
+of the total 5s of ledger closing time), and the max number of smart contract
+transactions per ledger is `X`, then the max compute time of each transaction
+is `1/X`, ignoring non-smart transactions and parallel execution. Furthermore,
+the ledger processing unit has a memory capacity, which the total amount of
+memory usage (host objects, linear memory) of executing the smart contract
+transaction set cannot exceed. The resource **budget** reflects those limits.
### Requirements
-The metered costs must align closely to the true costs of running a smart contract.
-- If metering underestimates the true costs, the ledger is susceptible to DoS attack. Underestimation also include where metering fails to properly consider exploitable edge cases whose true cost is significantly higher than the average.
-- If overestimate, the ledger fail to fully utilize its capacity.
+The metered costs must align closely to the true costs of running a smart
+contract.
+
+- If metering underestimates the true costs, the ledger is susceptible to DoS
+ attack. Underestimation also include where metering fails to properly
+ consider exploitable edge cases whose true cost is significantly higher than
+ the average.
+- If overestimate, the ledger fail to fully utilize its capacity.
In addition, metering must have:
-- High coverage: metering needs to cover all the non-trivial work done by the host.
-- Moderate overestimate: Metering needs to err on the side of worst case of the true cost, but should not be too far (within the same order of magnitude) from the average true cost.
+
+- High coverage: metering needs to cover all the non-trivial work done by the
+ host.
+- Moderate overestimate: Metering needs to err on the side of worst case of the
+ true cost, but should not be too far (within the same order of magnitude)
+ from the average true cost.
### Design goals
-- Simplicity – the metering model should be simple enough to understand. The cost composition should be easy to explain and reason about.
-- Extensibility and maintainability – should be straightforward to add metering to future code. Changes in the implementation should not require rewrite of metering. Every iteration of code changes should not require complete model re-calibration.
-- Efficiency – metering model should enable succinct implementation in the host that can be executed efficiently.
+
+- Simplicity – the metering model should be simple enough to understand. The
+ cost composition should be easy to explain and reason about.
+- Extensibility and maintainability – should be straightforward to add metering
+ to future code. Changes in the implementation should not require rewrite of
+ metering. Every iteration of code changes should not require complete model
+ re-calibration.
+- Efficiency – metering model should enable succinct implementation in the host
+ that can be executed efficiently.
### Goals alignment
-Aligns with the general goals of the overview [cap-0046](./cap-0046.md) as as well the fee model [cap-0047-07](./cap-0046-07.md).
+
+Aligns with the general goals of the overview [cap-0046](./cap-0046.md) as as
+well the fee model [cap-0047-07](./cap-0046-07.md).
## Abstract
-This specification starts by defining which resource metrics are chosen to reflect the cost of running a smart contract. It then formalizes a process for breaking down the host (or any arbitrary code) into components and defines steps for deriving the cost parameters for each component. It then introduces new ledger configuration entries for storing the budget and metering parameters, and finally, discusses relevant issues regarding maintaining and upgrading these parameters.
+This specification starts by defining which resource metrics are chosen to
+reflect the cost of running a smart contract. It then formalizes a process for
+breaking down the host (or any arbitrary code) into components and defines
+steps for deriving the cost parameters for each component. It then introduces
+new ledger configuration entries for storing the budget and metering
+parameters, and finally, discusses relevant issues regarding maintaining and
+upgrading these parameters.
-This CAP builds heavily on previous chapters of CAP-46. The reader needs to be comfortable with various concepts introduced in the CAP-46 (overview), 01 (the runtime environment), 03 (host functions), 09 (network configuration ledger entries). Familiarity with 05 (smart contract data) and 07 (fee model) is also beneficial.
+This CAP builds heavily on previous chapters of CAP-46. The reader needs to be
+comfortable with various concepts introduced in the CAP-46 (overview), 01 (the
+runtime environment), 03 (host functions), 09 (network configuration ledger
+entries). Familiarity with 05 (smart contract data) and 07 (fee model) is also
+beneficial.
## Specification
-Smart contract transactions on the ledger compete for 1. compute time 2. host memory. The two resource metrics we employ for budget and metering are **cpu instruction count** (`cpu_insns`) and **bytes of memory allocated** (`mem_bytes`).
+Smart contract transactions on the ledger compete for 1. compute time 2. host
+memory. The two resource metrics we employ for budget and metering are **cpu
+instruction count** (`cpu_insns`) and **bytes of memory allocated**
+(`mem_bytes`).
### Definitions
+
The entire host side code is broken down to a list of components and blocks:
-- A **component** is some code whose costs can be approximated with a linear or constant function of some input value derived from the code’s inputs.
-- A **block** is any code that cannot be measured this way, usually because it implements a complex or data-dependent algorithm.
+
+- A **component** is some code whose costs can be approximated with a linear or
+ constant function of some input value derived from the code’s inputs.
+- A **block** is any code that cannot be measured this way, usually because it
+ implements a complex or data-dependent algorithm.
Components and blocks may be wild or tame:
-- Code is **wild** if it’s code we didn’t write and are not maintaining a fork of.
+
+- Code is **wild** if it’s code we didn’t write and are not maintaining a fork
+ of.
- Code is **tame** if it’s code we wrote or are maintaining a fork of.
### Requirements for a component
-1. Can be modeled as a constant or linear function w.r.t. to a single input, on both resource types `cpu_insns` and `mem_bytes`.
-2. Does not invoke another component. I.e. components are the the leafs of a call tree.
+
+1. Can be modeled as a constant or linear function w.r.t. to a single input, on
+ both resource types `cpu_insns` and `mem_bytes`.
+2. Does not invoke another component. I.e. components are the the leafs of a
+ call tree.

### Call-tree invariant
-Consider the host code as a tree of called blocks and components (see figure 1), with the entrypoint at the root, blocks as interior nodes and components as leafs of the tree.
-We structure the host in such a way that ensures as an **invariant** that **every component in the call tree is metered on every path to it**. This is done by ensuring the following:
-- Blocks consist of only trivial (no need to meter) code, calls to components, and calls to other blocks.
-- Every piece of wild component is converted to a tame component, tracked by the cost model with a unique code number assigned to it.
-- Components are standalone and do not call other blocks or components — they are truly the leafs of the tree.
+Consider the host code as a tree of called blocks and components (see figure
+1), with the entrypoint at the root, blocks as interior nodes and components as
+leafs of the tree.
+
+We structure the host in such a way that ensures as an **invariant** that
+**every component in the call tree is metered on every path to it**. This is
+done by ensuring the following:
+
+- Blocks consist of only trivial (no need to meter) code, calls to components,
+ and calls to other blocks.
+- Every piece of wild component is converted to a tame component, tracked by
+ the cost model with a unique code number assigned to it.
+- Components are standalone and do not call other blocks or components — they
+ are truly the leafs of the tree.
-The full list of component types are defined in `enum ContractCostType`, see [XDR changes](#xdr-changes).
+The full list of component types are defined in `enum ContractCostType`, see
+[XDR changes](#xdr-changes).
-Once the call-tree invariant is satisfied, we can ensure that if every single component is metered, the entire call-tree is metered.
+Once the call-tree invariant is satisfied, we can ensure that if every single
+component is metered, the entire call-tree is metered.
### Metering a component
-During runtime, whenever a component is hit, the meter is incremented by `y = ax + b`, where `x` is the component's input, `a` and `b` are the pre-fitted linear and constant parameter of that resource type. The metering happens independently for `cpu_insns` and `mem_bytes`, so there will be two sets of parameters for each component.
-To obtain the parameters, we isolate the component and set up a benchmark sandbox around it with profiling enabled (e.g. `perf_event` or `rusage`). We then call it repeatedly with varying input size, measure and record the resource output for each input size. Finally we fit a linear curve and extract the parameters.
+During runtime, whenever a component is hit, the meter is incremented by
+`y = ax + b`, where `x` is the component's input, `a` and `b` are the
+pre-fitted linear and constant parameter of that resource type. The metering
+happens independently for `cpu_insns` and `mem_bytes`, so there will be two
+sets of parameters for each component.
+
+To obtain the parameters, we isolate the component and set up a benchmark
+sandbox around it with profiling enabled (e.g. `perf_event` or `rusage`). We
+then call it repeatedly with varying input size, measure and record the
+resource output for each input size. Finally we fit a linear curve and extract
+the parameters.
### Cost parameters
-The result of calibration for per resource type is a set of `C` cost parameters containing `a` and `b` coefficients for each input type. All the cost parameters per resource type are stored in a `ConfigSettingEntry`.
+
+The result of calibration for per resource type is a set of `C` cost parameters
+containing `a` and `b` coefficients for each input type. All the cost
+parameters per resource type are stored in a `ConfigSettingEntry`.
### The budget
-The budget for each resource type is a `ConfigSettingEntry` that is determined in consensus by the validators. The budget reflects the ledger processing capacity in accordance to the requirements in the [Requirements](#requirements) section. Minimum budget for `cpu_insns` budget is 2'500'000 and for `mem_bytes` is 2MB.
-At every metering charge, the cumulative resource consumption will be compared with the budget, and if exceeded, will result in a `SCEC_EXCEEDED_LIMIT` host error.
+The budget for each resource type is a `ConfigSettingEntry` that is determined
+in consensus by the validators. The budget reflects the ledger processing
+capacity in accordance to the requirements in the [Requirements](#requirements)
+section. Minimum budget for `cpu_insns` budget is 2'500'000 and for `mem_bytes`
+is 2MB.
+
+At every metering charge, the cumulative resource consumption will be compared
+with the budget, and if exceeded, will result in a `SCEC_EXCEEDED_LIMIT` host
+error.
### XDR changes
-See [cap-0046 Overview](./cap-0046-01.md) and [Stellar-contract-config-setting.x](../contents/cap-0046/Stellar-contract-config-setting.x) for the XDR changes. In particular `ConfigSettingEntry`
-contains new entries for budget and metering. `ContractCostParamEntry` defines all cost component types with their explainations in the comment.
+
+See [cap-0046 Overview](./cap-0046-01.md) and
+[Stellar-contract-config-setting.x](../contents/cap-0046/Stellar-contract-config-setting.x)
+for the XDR changes. In particular `ConfigSettingEntry` contains new entries
+for budget and metering. `ContractCostParamEntry` defines all cost component
+types with their explainations in the comment.
### Metering an arbitrary new piece of code
-The main challenge of dealing with an arbitrary new piece of code (*wild* or *tame*) is to identify the components through an iterative process:
-1. Break down the code into a call tree where each node consists of meaningful, non-trivial operation.
-2. Identify the leaf nodes, making sure they are components according to the [requirements for a component](#requirements-for-a-component).
-3. For any *tame* component, meter it according to [metering a component](#metering-a-component)
-4. If it contains any *wild* code, follow [taming wild code](#taming-wild-code) to tame it. This step needs to be done in conjunction with 3.
-5. Start from the leaf nodes, mark them as metered, then proceed up level by level until the reaching root.
-If a node is composed of only metered children, it is a metered block. Once the root is metered, the call-tree invariant is satisfied and the entire call-tree is metered.
+The main challenge of dealing with an arbitrary new piece of code (_wild_ or
+_tame_) is to identify the components through an iterative process:
+
+1. Break down the code into a call tree where each node consists of meaningful,
+ non-trivial operation.
+2. Identify the leaf nodes, making sure they are components according to the
+ [requirements for a component](#requirements-for-a-component).
+3. For any _tame_ component, meter it according to
+ [metering a component](#metering-a-component)
+4. If it contains any _wild_ code, follow [taming wild code](#taming-wild-code)
+ to tame it. This step needs to be done in conjunction with 3.
+5. Start from the leaf nodes, mark them as metered, then proceed up level by
+ level until the reaching root.
+
+If a node is composed of only metered children, it is a metered block. Once the
+root is metered, the call-tree invariant is satisfied and the entire call-tree
+is metered.
### Taming wild code
-As mentioned previously, one of the keys to satisfying the call-tree invariant is that all *wild* code, blocks or components, be tamed. A piece of *wild* code can appear in one of the following patterns:
+
+As mentioned previously, one of the keys to satisfying the call-tree invariant
+is that all _wild_ code, blocks or components, be tamed. A piece of _wild_ code
+can appear in one of the following patterns:
+
1. Consists of a single wild component (**WC**)
-2. A wild block (**WB**) that only consists of tamed blocks (**TB**s) and tamed components (**TC**s)
-3. A WB that consists of a mixture of TCs (recall a TB is just a combination of TCs) and WCs which we do not have access to.
+2. A wild block (**WB**) that only consists of tamed blocks (**TB**s) and tamed
+ components (**TC**s)
+3. A WB that consists of a mixture of TCs (recall a TB is just a combination of
+ TCs) and WCs which we do not have access to.
4. A WB that consist of several WCs
-For 1, we are calling a WC which is standalone and does not call us back. We can easily tame the WC by defining it as a metered component following [metering a component](#metering-a-component).
+For 1, we are calling a WC which is standalone and does not call us back. We
+can easily tame the WC by defining it as a metered component following
+[metering a component](#metering-a-component).
-For 2, metering is already covered by the tamed code and there is nothing else we need to do.
+For 2, metering is already covered by the tamed code and there is nothing else
+we need to do.
-For scenario 3, we first try to approximate the WB as pure wild code, i.e. by minimizing the footprint of TCs. Concretely this means during the calibration process, set up the samples (e.g. making `x = 0` in the linear function) such that the TBs have minimal effect on the output resource consumption. If this is possible, we end up in scenario 4. See figure 2 below for illustration.
+For scenario 3, we first try to approximate the WB as pure wild code, i.e. by
+minimizing the footprint of TCs. Concretely this means during the calibration
+process, set up the samples (e.g. making `x = 0` in the linear function) such
+that the TBs have minimal effect on the output resource consumption. If this is
+possible, we end up in scenario 4. See figure 2 below for illustration.
-For scenario 4, we first approximate the WB as a single WC, by picking a single dominant input and calibrate it as a linear function. If it works, we end up back to scenario 1 and we are done.
+For scenario 4, we first approximate the WB as a single WC, by picking a single
+dominant input and calibrate it as a linear function. If it works, we end up
+back to scenario 1 and we are done.
-If either 3 or 4 fails, then we have to tame it the brute force way either by forking the code and modifying it, or choosing a different library, or removing our dependency on it altogether.
+If either 3 or 4 fails, then we have to tame it the brute force way either by
+forking the code and modifying it, or choosing a different library, or removing
+our dependency on it altogether.

## Design Rationale
### Why `cpu_insns` metric
-We use cpu instruction count as the main metrics for "compute" because it is a direct proxy to process running time, i.e. `run_time = cpu_insns_count / clock_freq / ave_insns_per_cycle`.
-The average instructions per cycle `ave_insns_per_cycle` depends on a set of CPU architecture-specific factors such as the instruction set, instruction length, micro-ops, instruction-level parallelism (which depends on instruction window size, branch-prediction), which are stable per architecture.
-Assuming 2GHz cpu with an avg. insns per cycle of 2, 1ms roughly equals 4'000'000 cpu instructions.
+We use cpu instruction count as the main metrics for "compute" because it is a
+direct proxy to process running time, i.e.
+`run_time = cpu_insns_count / clock_freq / ave_insns_per_cycle`. The average
+instructions per cycle `ave_insns_per_cycle` depends on a set of CPU
+architecture-specific factors such as the instruction set, instruction length,
+micro-ops, instruction-level parallelism (which depends on instruction window
+size, branch-prediction), which are stable per architecture.
+
+Assuming 2GHz cpu with an avg. insns per cycle of 2, 1ms roughly equals
+4'000'000 cpu instructions.
-Note that the instruction count may vary across architectures, but the metering model needs to be same, so the metering model needs to produce the upper bound on all viable architectures.
+Note that the instruction count may vary across architectures, but the metering
+model needs to be same, so the metering model needs to produce the upper bound
+on all viable architectures.
-Another considered alternative resource is execution time, which relates much closer to the actual cost in ledger closing time. However, execution time is much more volatile and less deterministic, which make it a less desirable target metric for metering.
+Another considered alternative resource is execution time, which relates much
+closer to the actual cost in ledger closing time. However, execution time is
+much more volatile and less deterministic, which make it a less desirable
+target metric for metering.
### Why `mem_bytes` metric
-The bytes of memory allocated is a good proxy of the memory footprint of contract execution. The majority of the smart contract memory footprint comes from 1. a fixed-sized linear memory 2. immutable host objects created during contract execution, and both of these are not freed until the end of contract execution. This memory model is very similar to the arena allocator. Using allocated memory as the metric is an worst-case approximation that is 1. close to the actual memory cost 2. gives us flexibility to switch to an actual arena allocator later.
+
+The bytes of memory allocated is a good proxy of the memory footprint of
+contract execution. The majority of the smart contract memory footprint comes
+from 1. a fixed-sized linear memory 2. immutable host objects created during
+contract execution, and both of these are not freed until the end of contract
+execution. This memory model is very similar to the arena allocator. Using
+allocated memory as the metric is an worst-case approximation that is 1. close
+to the actual memory cost 2. gives us flexibility to switch to an actual arena
+allocator later.
### Why do we have to model the costs?
-In other words, why can't we measure and use the runtime resource consumption for metering? Because the profiling results are non-deterministic and 1. we can't use them for consensus 2. the contract execution outcome won't be able to be replayed bit-identically. Using an analytical model ensure determinism for consensus and replayability (more on this later).
+
+In other words, why can't we measure and use the runtime resource consumption
+for metering? Because the profiling results are non-deterministic and 1. we
+can't use them for consensus 2. the contract execution outcome won't be able to
+be replayed bit-identically. Using an analytical model ensure determinism for
+consensus and replayability (more on this later).
### Why linear and constant components only?
-Simplicity. We want the costs to follow a simple linear characteristic such that we can fit it accurately without needing a complex numerical model (and fitting process, heuristics etc).
-A model with higher order dependencies also risk the worst-case costs significantly outweighing the average, and any small deviation in the input resulting in significant over or underestimation of the costs. This goes against the [design goals](#design-goals).
+Simplicity. We want the costs to follow a simple linear characteristic such
+that we can fit it accurately without needing a complex numerical model (and
+fitting process, heuristics etc).
+
+A model with higher order dependencies also risk the worst-case costs
+significantly outweighing the average, and any small deviation in the input
+resulting in significant over or underestimation of the costs. This goes
+against the [design goals](#design-goals).
### Host vs Wasm vm
-This metering framework is generic and does not differentiate between the host and the Wasm vm. Both the host and the vm are treated as components and blocks defined in the [specification](#specification) section and subject to the same metering procedures.
-Our current choice of the Wasm virtual machine implementation is Wasmi, which is a lightweight interpreter of the wasm standard, written in the same language (Rust) as the host. Wasmi runs an inner interpreter loop that executes a single wasm instruction on each loop. Thus every wasm instruction logic fits the requirements of a component. `WasmInsnExec` in `ContractCostType` is designated for the wasm instructions.
+This metering framework is generic and does not differentiate between the host
+and the Wasm vm. Both the host and the vm are treated as components and blocks
+defined in the [specification](#specification) section and subject to the same
+metering procedures.
+
+Our current choice of the Wasm virtual machine implementation is Wasmi, which
+is a lightweight interpreter of the wasm standard, written in the same language
+(Rust) as the host. Wasmi runs an inner interpreter loop that executes a single
+wasm instruction on each loop. Thus every wasm instruction logic fits the
+requirements of a component. `WasmInsnExec` in `ContractCostType` is designated
+for the wasm instructions.
### Relation to cap-0046-07 (fee model)
-[CAP-0046-07](./cap-0046-07.md) proposed a fee model for smart contracts taking into account ledger access, storage and computation. This CAP details the computation aspect which includes cpu and memory. The metered `cpu_insns` goes into the fee model as input to the "compute" fee. While `mem_bytes` is not part of the fee model, it is subject to the network limit.
+[CAP-0046-07](./cap-0046-07.md) proposed a fee model for smart contracts taking
+into account ledger access, storage and computation. This CAP details the
+computation aspect which includes cpu and memory. The metered `cpu_insns` goes
+into the fee model as input to the "compute" fee. While `mem_bytes` is not part
+of the fee model, it is subject to the network limit.
### Cost estimation
-This proposal relies on the "preflight" mechanism to provide an estimation of the cpu and mem consumption in a transaction. These can only serve as guidance to the actual cost, since the ledger snapshot used for preflight may be outdated, as well as the actual logic during preflight and actual ("recording" vs "enforcing") modes may be different. Thus it is not guaranteed that a transaction staying below the budget during preflight will not exceed it during the actual run.
+
+This proposal relies on the "preflight" mechanism to provide an estimation of
+the cpu and mem consumption in a transaction. These can only serve as guidance
+to the actual cost, since the ledger snapshot used for preflight may be
+outdated, as well as the actual logic during preflight and actual ("recording"
+vs "enforcing") modes may be different. Thus it is not guaranteed that a
+transaction staying below the budget during preflight will not exceed it during
+the actual run.
### Config Settings Upgrade
-Both the budget and metering parameters are stored on the ledger as `ConfigSettingEntry` and their upgrade and validation process have been discussed in [CAP-0046-09](./cap-0046-09.md). In general, the settings can be upgraded with or without a protocol version upgrade.
-In the case of a protocol version upgrade, here are the scenarios that also require a settings upgrade:
-- New blocks have been introduced in the host that require introducing new components. Such changes include e.g. a new crypto primitive function. Note that if a new block merely consists of trivial code and calling existing components, then no settings upgrade is needed.
-- Changes on the host components, or version changes in its dependencies that result in observable difference in components' cost characteristics. In rare cases, if the cost characteristics becomes no longer linear, then the component needs to be broken down into finer sub-components. See [Taming wild code](#taming-wild-code).
+Both the budget and metering parameters are stored on the ledger as
+`ConfigSettingEntry` and their upgrade and validation process have been
+discussed in [CAP-0046-09](./cap-0046-09.md). In general, the settings can be
+upgraded with or without a protocol version upgrade.
+
+In the case of a protocol version upgrade, here are the scenarios that also
+require a settings upgrade:
+
+- New blocks have been introduced in the host that require introducing
+ new components. Such changes include e.g. a new crypto primitive function.
+ Note that if a new block merely consists of trivial code and calling existing
+ components, then no settings upgrade is needed.
+- Changes on the host components, or version changes in its dependencies that
+ result in observable difference in components' cost characteristics. In rare
+ cases, if the cost characteristics becomes no longer linear, then the
+ component needs to be broken down into finer sub-components. See
+ [Taming wild code](#taming-wild-code).
### The “metered” stamp
-We may need to introduce a new mechanism for stamping the metered entities in the host, following the definitions of wild/tamed components/blocks outlined in previous section. Such a mechanism would help us ensuring the call-tree invariant is satisfied by examining the root block. A further mechanism to automatically detect if metering is missing on a path would be even more ideal.
-We will also need to introduce set of reviewing standards that differentiates between block vs component changes. A metered component is subject to significantly higher bars for review and audit, to make sure the component criteria are truly satisfied, as they are the foundational building blocks of the budget metering framework.
+We may need to introduce a new mechanism for stamping the metered entities in
+the host, following the definitions of wild/tamed components/blocks outlined in
+previous section. Such a mechanism would help us ensuring the call-tree
+invariant is satisfied by examining the root block. A further mechanism to
+automatically detect if metering is missing on a path would be even more ideal.
+
+We will also need to introduce set of reviewing standards that differentiates
+between block vs component changes. A metered component is subject to
+significantly higher bars for review and audit, to make sure the component
+criteria are truly satisfied, as they are the foundational building blocks of
+the budget metering framework.
-In the future we may add tooling around ensuring metering coverage and assisting with updating parameters or adding new metered components.
+In the future we may add tooling around ensuring metering coverage and
+assisting with updating parameters or adding new metered components.
### Maintainability
-The cost parameters need to be maintained to prevent the metering model from gradually deviating away from reality (model drift). Even if we maintain the same host unchanged, the host's dependencies may change that result in small performance differences which can accumulate over time, causing the cost models to drift. To combat that, we will need to publish a set of specs where the metering calibration benchmark needs to be run regularly, along with a suite of tests and criteria for determining when the model parameters need to be updated.
+
+The cost parameters need to be maintained to prevent the metering model from
+gradually deviating away from reality (model drift). Even if we maintain the
+same host unchanged, the host's dependencies may change that result in small
+performance differences which can accumulate over time, causing the cost models
+to drift. To combat that, we will need to publish a set of specs where the
+metering calibration benchmark needs to be run regularly, along with a suite of
+tests and criteria for determining when the model parameters need to be
+updated.
## Security Concerns
+
Missed or inaccurate metering can cause security concerns in two aspects:
-- **Denial of Service**: the computed costs significantly underestimate the true cost of running a contract, this can slowdown the validators and prevent them to close the ledger in an acceptable time frame.
-- **Under-Utilization of the Ledger Capacity**: this is not a direct attack per se. However, a side effect of overestimation in metering, is the ledger could be filled with many (deliberately crafted) fast contract transactions which theoretically could require more resource at the worst case, causing the ledger to be under-utilized. This may in turn cause other (important) transactions to queue up and not making into the ledger in a reasonable time.
+
+- **Denial of Service**: the computed costs significantly underestimate the
+ true cost of running a contract, this can slowdown the validators and prevent
+ them to close the ledger in an acceptable time frame.
+- **Under-Utilization of the Ledger Capacity**: this is not a direct attack per
+ se. However, a side effect of overestimation in metering, is the ledger could
+ be filled with many (deliberately crafted) fast contract transactions which
+ theoretically could require more resource at the worst case, causing the
+ ledger to be under-utilized. This may in turn cause other (important)
+ transactions to queue up and not making into the ledger in a reasonable time.
## Implementation
-Metering, budget and calibration has been implemented in the [soroban-env](https://github.com/stellar/rs-soroban-env). Related integration work (such as the config settings) have been done in stellar-core and [soroban-sdk](https://github.com/stellar/rs-soroban-sdk).
+
+Metering, budget and calibration has been implemented in the
+[soroban-env](https://github.com/stellar/rs-soroban-env). Related integration
+work (such as the config settings) have been done in stellar-core and
+[soroban-sdk](https://github.com/stellar/rs-soroban-sdk).
diff --git a/core/cap-0046-11.md b/core/cap-0046-11.md
index ba5edbce4..844a38bf6 100644
--- a/core/cap-0046-11.md
+++ b/core/cap-0046-11.md
@@ -15,52 +15,53 @@ Protocol version: 20
## Simple Summary
-This proposal describes the authorization framework built into the smart contract
-runtime environment.
+This proposal describes the authorization framework built into the smart
+contract runtime environment.
## Motivation
+
See the Soroban overview CAP for overall Soroban motivation.
Any smart contract that directly produces side effects (ledger modification and
events) requires some sort of authentication and authorization procedure.
-Without it, any party could produce any side effects, making the contract
-pointless.
+Without it, any party could produce any side effects, making the contract
+pointless.
A lot of the authorization tasks are common for almost every contract:
authentication, context verification, and signature replay protection are very
likely to be implemented in the same fashion. A common solution for these tasks
would be beneficial for the contract developers.
-On the other hand, bespoke authorization protocols require bespoke client
+On the other hand, bespoke authorization protocols require bespoke client
support, which makes it hard to present the information about what exactly is
-being authorized to the user.
+being authorized to the user.
Both of the above points serve as motivation for providing the authorization
framework that is built into the core protocol and is not just defined by SEP.
Built-in authorization framework has the following benefits:
-- Safety guarantees that are hard (or impossible) to achieve on the contract
-side (for example, advanced context tracking between cross-contract calls)
+- Safety guarantees that are hard (or impossible) to achieve on the contract
+ side (for example, advanced context tracking between cross-contract calls)
- Built-in contracts and authorized host functions can use exactly the same
-authorization standard as any other contract
+ authorization standard as any other contract
- Common procedures can be abstracted away and thus lower risk of contract
-developers missing an important authorization piece
-- Clients can rely on a single authorization standard that most of the contracts
-use
+ developers missing an important authorization piece
+- Clients can rely on a single authorization standard that most of the
+ contracts use
- Better performance thanks to using the native code and thus lower fees
### Goals Alignment
This CAP is aligned with the following Stellar Network Goals:
- - The Stellar Network should make it easy for developers of Stellar projects
- to create highly usable products
+- The Stellar Network should make it easy for developers of Stellar projects to
+ create highly usable products
## Abstract
This CAP introduces the Soroban Authorization Framework. It consists of an
authorization host module that performs common authorization tasks, custom
-account standard and specification, and transaction-level support for
+account standard and specification, and transaction-level support for
authorization entries.
## Specification
@@ -76,7 +77,7 @@ This CAP uses several terms specific to the Soroban authorization framework.
#### Address
-This is a universal built-in identifier in Soroban. In XDR addresses are
+This is a universal built-in identifier in Soroban. In XDR addresses are
represented by `SCAddress` XDR that has two variants:
- `SC_ADDRESS_TYPE_ACCOUNT` for the Stellar account identifier (`AccountID`).
@@ -85,10 +86,10 @@ represented by `SCAddress` XDR that has two variants:
#### Account
Account in the context of Soroban Authorization is any address that performs
-authorization (not to confuse with the classic Stellar `AccountID`). For example,
-in case if payment is performed from address A to address B, address A needs to
-authorize the payment. Thus we can say 'account A authorizes the payment to
-address B'.
+authorization (not to confuse with the classic Stellar `AccountID`). For
+example, in case if payment is performed from address A to address B, address A
+needs to authorize the payment. Thus we can say 'account A authorizes the
+payment to address B'.
#### Custom account
@@ -99,22 +100,23 @@ is provided by the arbitrary smart contract logic.
### Components
-On a high level, the authorization framework comprises the following components:
+On a high level, the authorization framework comprises the following
+components:
- Transaction-level definitions of authorization entries. These allow users to
-provide authorization for arbitrary trees of the contract calls on behalf of
-arbitrary accounts using the standardized data structure.
+ provide authorization for arbitrary trees of the contract calls on behalf of
+ arbitrary accounts using the standardized data structure.
- Host functions that allow contracts to interact with host's authorization
-module.
-- Authorization module of Soroban host. It consumes the transaction-level
-authorization data, performs authentication, provides replay protection, and
-enforces the authorization context.
+ module.
+- Authorization module of Soroban host. It consumes the transaction-level
+ authorization data, performs authentication, provides replay protection, and
+ enforces the authorization context.
In the following sections each of the components is described in detail.
### Authorization Payload in Transaction
-`InvokeHostFunctionOp` contains an arbitrarily sized array of
+`InvokeHostFunctionOp` contains an arbitrarily sized array of
`SorobanAuthorizationEntry` alongside the host function.
Every `SorobanAuthorizationEntry` contains a single tree of contract calls
@@ -123,97 +125,100 @@ contain the necessary information for the account to authorize the tree.
`SorobanAuthorizedInvocation` only contains the contract calls that require
authorization. More details on how this tree is matched to the actual contract
-calls are provided in the [following section](#authorization-algorithm-for-accounts).
-Every node in `SorobanAuthorizedInvocation` tree contains an authorized function
-specification and an arbitrary number `subInvocations` that specify the
+calls are provided in the
+[following section](#authorization-algorithm-for-accounts). Every node in
+`SorobanAuthorizedInvocation` tree contains an authorized function
+specification and an arbitrary number `subInvocations` that specify the
authorized call trees spawned from the `function`.
`SorobanAuthorizedFunction` allows authorizing the contract invocations with
`SOROBAN_AUTHORIZED_FUNCTION_TYPE_CONTRACT_FN` and contract creation operations
with `SOROBAN_AUTHORIZED_FUNCTION_TYPE_CREATE_CONTRACT_HOST_FN`. Specification
-for both is defined by the respective XDR structures used in
+for both is defined by the respective XDR structures used in
`InvokeHostFunctionOp`: `InvokeContractArgs` for the contract invocation and
`CreateContractArgs` for the contract creation.
-`SorobanCredentials` are a union with 2 supported values:
-`SOROBAN_CREDENTIALS_SOURCE_ACCOUNT` with no additional payload and
+`SorobanCredentials` are a union with 2 supported values:
+`SOROBAN_CREDENTIALS_SOURCE_ACCOUNT` with no additional payload and
`SOROBAN_CREDENTIALS_ADDRESS` with `SorobanAddressCredentials` payload.
-`SOROBAN_CREDENTIALS_SOURCE_ACCOUNT` identifies that the transaction source
-account authorizes the corresponding call tree. Since Soroban authorization data
-is signed by the source account as a part of the transaction, the source account
-can authorize all `SOROBAN_CREDENTIALS_SOURCE_ACCOUNT` entries at once without
-any additional signatures or replay protection.
+`SOROBAN_CREDENTIALS_SOURCE_ACCOUNT` identifies that the transaction source
+account authorizes the corresponding call tree. Since Soroban authorization
+data is signed by the source account as a part of the transaction, the source
+account can authorize all `SOROBAN_CREDENTIALS_SOURCE_ACCOUNT` entries at once
+without any additional signatures or replay protection.
`SorobanAddressCredentials` includes the account address, nonce, signature
expiration ledger and the signature itself. Signature is only considered valid
-until the signature expiration ledger (inclusive). Nonce is an arbitrary `int64`
-number used for replay prevention. Nonce can't be reused until the signature
-expires. More details about the credentials [authentication](#authentication)
-and [nonce consumption](#nonce-consumption) are provided in the following
-sections.
+until the signature expiration ledger (inclusive). Nonce is an arbitrary
+`int64` number used for replay prevention. Nonce can't be reused until the
+signature expires. More details about the credentials
+[authentication](#authentication) and [nonce consumption](#nonce-consumption)
+are provided in the following sections.
#### Authorization Payload is Standalone
-The specification described above is not associated with transaction in any way.
-This allows decoupling transaction source from the Soroban operation actors, so
-that they can only sign the Soroban authorization payload. The only exception
-are entries with `SOROBAN_CREDENTIALS_SOURCE_ACCOUNT` that infer the authorizer
-credentials from the transaction source account (the entry itself doesn't have
-any explicit identifiers connecting it to the transaction though).
+The specification described above is not associated with transaction in any
+way. This allows decoupling transaction source from the Soroban operation
+actors, so that they can only sign the Soroban authorization payload. The only
+exception are entries with `SOROBAN_CREDENTIALS_SOURCE_ACCOUNT` that infer the
+authorizer credentials from the transaction source account (the entry itself
+doesn't have any explicit identifiers connecting it to the transaction though).
This puts a restriction on the authorization enforcement mechanism: the entries
have to only be used (i.e. consume nonces) when they are actually being used.
-This way frontrunning someone else's authorization entry becomes a no-op, unless
-exactly the same operation is performed, which is similar to just submitting a
-signed transaction for someone.
+This way frontrunning someone else's authorization entry becomes a no-op,
+unless exactly the same operation is performed, which is similar to just
+submitting a signed transaction for someone.
#### Soroban Authorization Signature Payload
-The authentication of `SorobanAddressCredentials` is performed by verifying the
+The authentication of `SorobanAddressCredentials` is performed by verifying the
signature (or signatures) of a SHA-256 hash of a protocol-defined payload in an
account-specific way. The common payload is defined as
-`ENVELOPE_TYPE_SOROBAN_AUTHORIZATION` variant of `HashIDPreimage`. The
+`ENVELOPE_TYPE_SOROBAN_AUTHORIZATION` variant of `HashIDPreimage`. The
`sorobanAuthorization` envelope has to be filled with information corresponding
to the respective `SorobanAuthorizationEntry` and `networkID`. `networkID` is a
-SHA-256 hash of the of the network name. `nonce` and `signatureExpirationLedger`
-fields have to match the respective fields in `SorobanAddressCredentials`.
-`invocation` field has to match `rootInvocation` of the `SorobanAuthorizationEntry`.
+SHA-256 hash of the of the network name. `nonce` and
+`signatureExpirationLedger` fields have to match the respective fields in
+`SorobanAddressCredentials`. `invocation` field has to match `rootInvocation`
+of the `SorobanAuthorizationEntry`.
### Authorization Host Functions
-There are two similar host functions for smart contracts to use in order to
+There are two similar host functions for smart contracts to use in order to
require authorization from an address:
- `require_auth_for_args(address: AddressObject, args: VecObject)` - require
-address to have authorized call of the current contract function with provided
-arguments.
-- `require_auth(address: AddressObject)` - require address to have authorized
-call of the current contract function with all the arguments it has been called
-with.
+ address to have authorized call of the current contract function with
+ provided arguments.
+- `require_auth(address: AddressObject)` - require address to have authorized
+ call of the current contract function with all the arguments it has been
+ called with.
`require_auth` and `require_auth_for_args` have equivalent functionality with
the only difference being the automatic argument inference for `require_auth`.
Going forward we refer to both as `require_auth`.
-'Requiring authorization' means that a respective entry
-`SorobanAuthorizedFunction` has to be present in an authenticated
-`SorobanAuthorizationEntry` corresponding to the `address`. Contract address
-and function name are inferred automatically from the current contract's address
-and the _entry point_ contract function, i.e. the function that has been invoked
-through the host. Internal function calls that are done with Wasm are not
-considered for the authorization purposes.
+'Requiring authorization' means that a respective entry
+`SorobanAuthorizedFunction` has to be present in an authenticated
+`SorobanAuthorizationEntry` corresponding to the `address`. Contract address
+and function name are inferred automatically from the current contract's
+address and the _entry point_ contract function, i.e. the function that has
+been invoked through the host. Internal function calls that are done with Wasm
+are not considered for the authorization purposes.
-Another authorization-related host function is
+Another authorization-related host function is
`authorize_as_curr_contract(auth_entries: VecObject)`. This function authorizes
-the subcontract calls made on behalf of the current contract from the next (and
+the subcontract calls made on behalf of the current contract from the next (and
only next) contract call that current contract performs. More specifically, if
-the current contract A calls function `f` of the contract `B`, then
+the current contract A calls function `f` of the contract `B`, then
`authorize_as_curr_contract` allows A to specify authorizations on calls that
`B.f` performs. Any authorization `B.f` itself performs on behalf of `A` is
-implicitly successful (more details in the [following section](#authorization-algorithm-for-invoker-contracts)).
-This function expects a vector of `InvokerContractAuthEntry` Soroban contract
-types defined as follows:
+implicitly successful (more details in the
+[following section](#authorization-algorithm-for-invoker-contracts)). This
+function expects a vector of `InvokerContractAuthEntry` Soroban contract types
+defined as follows:
```rust
#[contracttype]
@@ -239,114 +244,116 @@ pub struct CreateContractHostFnContext {
}
```
-`InvokerContractAuthEntry` has the same semantics as `SorobanAuthorizedFunction`
-XDR, but expressed with contract types.
+`InvokerContractAuthEntry` has the same semantics as
+`SorobanAuthorizedFunction` XDR, but expressed with contract types.
### Soroban Host Authorization Module
-All the authorization logic is implemented in the Soroban host and we refer to
+All the authorization logic is implemented in the Soroban host and we refer to
it as 'authorization module' in this CAP.
-Soroban host is initialized with all the `SorobanAuthorizationEntry`
-entries from the input `InvokeHostFunctionOp` operation. These entries are
-validated structurally (i.e. host ensures that XDR is valid and that function
-arguments are valid host values), but not semantically, i.e. no
-authorization-related validations happen at this point.
+Soroban host is initialized with all the `SorobanAuthorizationEntry` entries
+from the input `InvokeHostFunctionOp` operation. These entries are validated
+structurally (i.e. host ensures that XDR is valid and that function arguments
+are valid host values), but not semantically, i.e. no authorization-related
+validations happen at this point.
There are two distinct authorization scenarios: authorization for the accounts
-via `SorobanAuthorizationEntry` transaction payload and authorization of invoker
-contracts. Invoker contract authorization is attempted before proceeding to more
-general account authorization flow. However, we start with specifying the account
-authorization algorithm, as the invoker contract authorization is very similar
-and builds on top of that algorithm.
+via `SorobanAuthorizationEntry` transaction payload and authorization of
+invoker contracts. Invoker contract authorization is attempted before
+proceeding to more general account authorization flow. However, we start with
+specifying the account authorization algorithm, as the invoker contract
+authorization is very similar and builds on top of that algorithm.
#### Authorization Algorithm for Accounts
-Whenever a contract calls `require_auth` for an account, host iterates over every
-non-exhausted `SorobanAuthorizationEntry` where credentials match the
+Whenever a contract calls `require_auth` for an account, host iterates over
+every non-exhausted `SorobanAuthorizationEntry` where credentials match the
account's address. The address is just compared to `SorobanAddressCredentials`
and transaction source account is used for comparison of
-`SOROBAN_CREDENTIALS_SOURCE_ACCOUNT`. Matching happens in exactly the same order
-as specified in transaction.
+`SOROBAN_CREDENTIALS_SOURCE_ACCOUNT`. Matching happens in exactly the same
+order as specified in transaction.
`create_contract` host function pushes a special stack frame identifying that
'create contract' host function is running and implicitly calls a special
- implementation of `require_auth(deployer)` with the arguments being
+implementation of `require_auth(deployer)` with the arguments being
`CreateContractArgs` inferred from the `create_contract` arguments. The same
procedure is also applied to the `InvokeHostFunctionOp` operations with
`HOST_FUNCTION_TYPE_CREATE_CONTRACT` host function variant.
-For every entry with the matching address host proceeds to matching current
-authorized invocation in `SorobanAuthorizationEntry` to the current
-`require_auth` request. The matching algorithm is as follows (the details of
+For every entry with the matching address host proceeds to matching current
+authorized invocation in `SorobanAuthorizationEntry` to the current
+`require_auth` request. The matching algorithm is as follows (the details of
sub-operations are specified in the sub-sections):
-- In case if entry hasn't been matched yet, try matching the root
-`SorobanAuthorizedFunction` of the entry to the current invocation. If the entry
-matches, perform authentication and consume nonce. Trap contract in case of
-authentication failure or incorrect nonce. Mark the root node as currently
-matched node and take a note of the host invocation frame where the root node was
-matched.
- - This case is not executed when there is at least one non-exhausted
- `SorobanAuthorizationEntry` with a currently matched node.
+- In case if entry hasn't been matched yet, try matching the root
+ `SorobanAuthorizedFunction` of the entry to the current invocation. If the
+ entry matches, perform authentication and consume nonce. Trap contract in
+ case of authentication failure or incorrect nonce. Mark the root node as
+ currently matched node and take a note of the host invocation frame where the
+ root node was matched. - This case is not executed when there is at least one
+ non-exhausted `SorobanAuthorizationEntry` with a currently matched node.
- In case if entry has a currently matched node in the invocation tree, try
-matching every `SorobanAuthorizedFunction` in its `subInvocations`. In case of
-a match, mark the first match in iteration order as currently matched node.
-- Every time host pops an invocation stack frame, mark all the
-`SorobanAuthorizationEntry` entries that have a root node matched within that
-frame as exhausted.
+ matching every `SorobanAuthorizedFunction` in its `subInvocations`. In case
+ of a match, mark the first match in iteration order as currently matched
+ node.
+- Every time host pops an invocation stack frame, mark all the
+ `SorobanAuthorizationEntry` entries that have a root node matched within that
+ frame as exhausted.
##### `SorobanAuthorizedFunction` matching
-The `SOROBAN_AUTHORIZED_FUNCTION_TYPE_CONTRACT_FN` variant is considered
+The `SOROBAN_AUTHORIZED_FUNCTION_TYPE_CONTRACT_FN` variant is considered
succesfully matched when all of the following is true:
- `contractAddress` equals to the address of the currently running contract
-- `functionName` equals to the name of the entry point function of the currently
-running contract
+- `functionName` equals to the name of the entry point function of the
+ currently running contract
- `args` are equal to arguments of the currently entry point contract function
-(in case of `require_auth`) or `args` passed to `require_auth_for_args`.
+ (in case of `require_auth`) or `args` passed to `require_auth_for_args`.
All the equality comparisons must be equivalent to comparing the binary
representation of XDR of every one of these fields.
-`SOROBAN_AUTHORIZED_FUNCTION_TYPE_CREATE_CONTRACT_HOST_FN` variant is considered
-succesfully matched when `CreateContractArgs` are equal between the
+`SOROBAN_AUTHORIZED_FUNCTION_TYPE_CREATE_CONTRACT_HOST_FN` variant is
+considered succesfully matched when `CreateContractArgs` are equal between the
authorization entry and the contract creation operation.
-In case of `HOST_FUNCTION_TYPE_CREATE_CONTRACT` operation two
-`CreateContractArgs` XDR structs are directly compared.
+In case of `HOST_FUNCTION_TYPE_CREATE_CONTRACT` operation two
+`CreateContractArgs` XDR structs are directly compared.
-In case of `create_contract` host function `CreateContractArgs` are built as
+In case of `create_contract` host function `CreateContractArgs` are built as
follows:
+
- `contractIDPreimage` is set to `CONTRACT_ID_PREIMAGE_FROM_ADDRESS` variant,
-where `address` is the `deployer` passed to the host function and `salt` is
-`salt` value passed to the host function.
+ where `address` is the `deployer` passed to the host function and `salt` is
+ `salt` value passed to the host function.
- `executable` is set to `CONTRACT_EXECUTABLE_WASM` variant, where `hash` value
-is `wasm_hash` value passed to the host function.
+ is `wasm_hash` value passed to the host function.
##### Authentication
-There are 3 different supported approaches to authentication, depending on
+There are 3 different supported approaches to authentication, depending on
`credentials` in `SorobanAuthorizationEntry`:
- For `SOROBAN_CREDENTIALS_SOURCE_ACCOUNT` authentication is automatically
-considered successful, as host trusts the Core to authenticate the transaction
-source account.
+ considered successful, as host trusts the Core to authenticate the
+ transaction source account.
- For `SOROBAN_CREDENTIALS_ADDRESS` with `SC_ADDRESS_TYPE_ACCOUNT` address
-authenticate operation using the standard Stellar account authentication
-procedure. Medium signature threshold has to be reached. Account sequence number
-is not increased as Soroban nonce is used for replay prevention.
+ authenticate operation using the standard Stellar account authentication
+ procedure. Medium signature threshold has to be reached. Account sequence
+ number is not increased as Soroban nonce is used for replay prevention.
- For `SOROBAN_CREDENTIALS_ADDRESS` with `SC_ADDRESS_TYPE_CONTRACT` address
-authentication is delegated to the custom account contract with the corresponding
-address using the reserved `__check_auth` function. If `check_auth` traps or
-returns an error, the authentication is considered failed.
+ authentication is delegated to the custom account contract with the
+ corresponding address using the reserved `__check_auth` function. If
+ `check_auth` traps or returns an error, the authentication is considered
+ failed.
###### Stellar Account Authentication
In case of `SC_ADDRESS_TYPE_ACCOUNT` address credentials the value of the
-`signature` `SCVal` is supposed to be the `SCVal::Vec` of the following contract
-type structures:
+`signature` `SCVal` is supposed to be the `SCVal::Vec` of the following
+contract type structures:
```rust
#[contracttype]
@@ -359,16 +366,16 @@ pub struct AccountEd25519Signature {
The vector has to be sorted in increasing order of public keys and contain no
duplicate public keys. Every public key has to be a valid signer of the Stellar
account being authenticated and every signature has to be a valid signature of
-SHA-256 of the expected [signature payload](#soroban-authorization-signature-payload)
-specified above. The total weight of the signatures has to be equal to or
-greater than the medium threshold of the Stellar account. The maximum allowed
-amount of signatures is 20 (consistently with the maximum number of transaction
-signers).
+SHA-256 of the expected
+[signature payload](#soroban-authorization-signature-payload) specified above.
+The total weight of the signatures has to be equal to or greater than the
+medium threshold of the Stellar account. The maximum allowed amount of
+signatures is 20 (consistently with the maximum number of transaction signers).
###### Custom Account Authentication
-In case of `SC_ADDRESS_TYPE_CONTRACT` address credentials the value of the
-`signature` `SCVal` is arbitrary value that a custom account contract can
+In case of `SC_ADDRESS_TYPE_CONTRACT` address credentials the value of the
+`signature` `SCVal` is arbitrary value that a custom account contract can
interpret.
Any contract that implements the special reserved `__check_auth` function is
@@ -384,7 +391,7 @@ fn __check_auth(
) -> Result<(), Error>;
```
-If `__check_auth` doesn't return `()` (i.e. returns error or traps),
+If `__check_auth` doesn't return `()` (i.e. returns error or traps),
authentication is considered failed.
`Context` provides information about the context in which `require_auth` has
@@ -417,121 +424,127 @@ Note, that the `ContractContext` and `ContractAuthorizationContext` structure
definitions are exactly the same as the structures defined for
`authorize_as_curr_contract` function.
-`auth_contexts` vector is produced via pre-order depth-first search of the
-`rootInvocation` in `SorobanAuthorizationEntry` that is being authenticated. The
-`subInvocations` are iterated in the same order as specified in authorization
-entry. For example, if an entry has the following invocation tree structure:
-`A->[B->[D, E], C->[F->[G]]` (where every letter corresponds to
+`auth_contexts` vector is produced via pre-order depth-first search of the
+`rootInvocation` in `SorobanAuthorizationEntry` that is being authenticated.
+The `subInvocations` are iterated in the same order as specified in
+authorization entry. For example, if an entry has the following invocation tree
+structure: `A->[B->[D, E], C->[F->[G]]` (where every letter corresponds to
`SorobanAuthorizedFunction` and `subInvocations` are listed inside `[]`), then
-the `auth_contexts` will contain functions in the following order: `A,B,D,E,C,F,G`.
+the `auth_contexts` will contain functions in the following order:
+`A,B,D,E,C,F,G`.
`Context` directly corresponds to the `SorobanAuthorizedFunction`.
-`SOROBAN_AUTHORIZED_FUNCTION_TYPE_CONTRACT_FN` is converted to `Context::Contract`.
-`InvokeContractArgs` are directly converted to `ContractContext`:
-`contractAddress` is written to the `contract` field, `functionName` to `fn_name`,
-and `args` vector of `SCVal` to `args` vector of `Val`.
-
-`HOST_FUNCTION_TYPE_CREATE_CONTRACT` is converted to `Context::CreateContractHostFn`.
-`CreateContractArgs` are converted to `CreateContractHostFnContext` with omission
-of some implied fields (such as deployer address from `CONTRACT_ID_PREIMAGE_FROM_ADDRESS`,
-that matches the address of the account performing authentication due to the
-matching algorithm). `CreateContractArgs` executable is written to
-`CreateContractHostFnContext` executable. Token executable
-causes authentication to fail (as built-in token contract creation doesn't need
+`SOROBAN_AUTHORIZED_FUNCTION_TYPE_CONTRACT_FN` is converted to
+`Context::Contract`. `InvokeContractArgs` are directly converted to
+`ContractContext`: `contractAddress` is written to the `contract` field,
+`functionName` to `fn_name`, and `args` vector of `SCVal` to `args` vector of
+`Val`.
+
+`HOST_FUNCTION_TYPE_CREATE_CONTRACT` is converted to
+`Context::CreateContractHostFn`. `CreateContractArgs` are converted to
+`CreateContractHostFnContext` with omission of some implied fields (such as
+deployer address from `CONTRACT_ID_PREIMAGE_FROM_ADDRESS`, that matches the
+address of the account performing authentication due to the matching
+algorithm). `CreateContractArgs` executable is written to
+`CreateContractHostFnContext` executable. Token executable causes
+authentication to fail (as built-in token contract creation doesn't need
authorization). `contractIDPreimage.fromAddress.salt` is written to the `salt`
-field of the struct. Entries with `CONTRACT_ID_PREIMAGE_FROM_ASSET`
+field of the struct. Entries with `CONTRACT_ID_PREIMAGE_FROM_ASSET`
`contractIDPreimage` are not allowed and will cause authentication to fail.
###### Self-reentrancy for Custom Accounts
The custom account authentication is the only case of contract invocation with
-self-reentrancy allowed. More specifically, the contract with address A can call
-`require_auth(A)`, which would result in calling `A.__check_auth` via the host
-invocation. Only self-reentrancy is allowed, i.e. re-entering any other contract
-than A in the example is not still not allowed.
+self-reentrancy allowed. More specifically, the contract with address A can
+call `require_auth(A)`, which would result in calling `A.__check_auth` via the
+host invocation. Only self-reentrancy is allowed, i.e. re-entering any other
+contract than A in the example is not still not allowed.
##### Signature Expiration Verification
Before trying to consume the nonce, host makes sure that the signature
-expiration ledger is valid.
-
-- If the signature `expirationLedger` value is
-strictly less than the current ledger sequence number, then the signature is
-considered expired and authorization fails.
-- If the signature `expirationLedger` value is strictly greater than maximum
-allowed expiration ledger, signature is considered to be too early and
-authorization fails. Maximum allowed expiration ledger is determined by the
-network configuration and computed as
-`currentLedgerSequence + stateArchivalSettings.maxEntryTTL - 1`, where
-`stateArchivalSettings` is the value from `CONFIG_SETTING_STATE_ARCHIVAL`
-`ConfigSettingEntry`.
+expiration ledger is valid.
+
+- If the signature `expirationLedger` value is strictly less than the current
+ ledger sequence number, then the signature is considered expired and
+ authorization fails.
+- If the signature `expirationLedger` value is strictly greater than maximum
+ allowed expiration ledger, signature is considered to be too early and
+ authorization fails. Maximum allowed expiration ledger is determined by the
+ network configuration and computed as
+ `currentLedgerSequence + stateArchivalSettings.maxEntryTTL - 1`, where
+ `stateArchivalSettings` is the value from `CONFIG_SETTING_STATE_ARCHIVAL`
+ `ConfigSettingEntry`.
##### Nonce Consumption
-Nonce is consumed only for the authorization entries that have the root
+Nonce is consumed only for the authorization entries that have the root
invocation matched to an actual call, have passed authentication and don't have
-an expired signature. As mentioned in the authorization algorithm specification,
-nonce is not consumed (and not provided) for `SOROBAN_CREDENTIALS_SOURCE_ACCOUNT`
-credentials.
+an expired signature. As mentioned in the authorization algorithm
+specification, nonce is not consumed (and not provided) for
+`SOROBAN_CREDENTIALS_SOURCE_ACCOUNT` credentials.
-Nonces are stored in `CONTRACT_DATA` `LedgerEntry` with a special format. The
-contract data entry for a given nonce value N for address A is built in the
+Nonces are stored in `CONTRACT_DATA` `LedgerEntry` with a special format. The
+contract data entry for a given nonce value N for address A is built in the
following fashion:
+
- `contract` address is set to A (this includes the Stellar account addresses)
-- `key` is set to the special `SCV_LEDGER_KEY_NONCE` variant of `SCVal`.
-`nonceKey` payload of the value is set to N. Only authorization module is
-allowed to create contract data entries with this key.
+- `key` is set to the special `SCV_LEDGER_KEY_NONCE` variant of `SCVal`.
+ `nonceKey` payload of the value is set to N. Only authorization module is
+ allowed to create contract data entries with this key.
- `durability` is set to `TEMPORARY`
- `body` is set to `DATA_ENTRY` with `data.val` set to `SCV_VOID` `SCVal` and
-`data.flags` are `0`.
+ `data.flags` are `0`.
- `liveUntilLedgerSeq` is set to the `signatureExpirationLedger` from the
-credentials.
+ credentials.
-Before trying to consume the nonce host checks if it already exists in the
+Before trying to consume the nonce host checks if it already exists in the
ledger. If it does, then the signature is potentially replayed and thus the
authentication check fails.
-If nonce doesn't exist in the ledger yet, host creates the entry as per
+If nonce doesn't exist in the ledger yet, host creates the entry as per
speicifcation and writes it to the ledger.
Due to the signature expiration verification procedure the nonce entry is
guaranteed to stay in the ledger until the signature expires. After the nonce
-entry (and thus the signature) expires, nonce value can be reused for in the new
-`SorobanAddressCredentials` entry for the account with a new expiration ledger.
+entry (and thus the signature) expires, nonce value can be reused for in the
+new `SorobanAddressCredentials` entry for the account with a new expiration
+ledger.
##### Important Propeties of the Algorithm
The authorization algorithm specified above has the following important
properties:
-- There is no requirement for the root invocation of `SorobanAuthorizationEntry`
-to match the root invocation of `InvokeHostFunctionOp`. This allows users to
-bundle the signed operations in non-atomic fashion via a custom contract without
-requiring any additional authorization. Addresses also don't have to be unique
-in such scenario.
+- There is no requirement for the root invocation of
+ `SorobanAuthorizationEntry` to match the root invocation of
+ `InvokeHostFunctionOp`. This allows users to bundle the signed operations in
+ non-atomic fashion via a custom contract without requiring any additional
+ authorization. Addresses also don't have to be unique in such scenario.
- Contract invocations that don't call `require_auth` are ignored by the
-algorithm, which makes router-type contracts that just pass the
-invocation through without doing any writes transparent for the signer.
-- It is possible to have multiple authorized trees for the same address with the
-root in the same stack frame. In such case the inner nodes can be interchanged
-between trees while still satisfying the algorithm. For example, if
-contract A calls `require_auth` twice, then calls B and C both of which call
-`require_auth`, the following combinations of `SorobabAuthorizationEntry`
-invocations will pass authorization algorithm: `A->[B, C], A`, `A->B, A->C`,
-`A->C, A->B`, `A, A->[B, C]`. Note, that sequencing the calls changes the
-requirements, for example if A calls `require_auth` right before calling B and C,
-only the following combinations would pass: `A->B, A->C`, `A->[B, C], A`.
-- Authorized call trees can't be broken down into separate
-`SorobanAuthorizationEntry` entries (unless there are multiple valid trees, as
-described above). This means that if contract A calls contract B and both require
-authorization, the signature for 'A invokes B' has to be provided. Signatures
-for 'only A' and 'only B' won't be matched by the algorithm. This makes it hard
-to create a set of disjoint frontrunnable signatures.
+ algorithm, which makes router-type contracts that just pass the invocation
+ through without doing any writes transparent for the signer.
+- It is possible to have multiple authorized trees for the same address with
+ the root in the same stack frame. In such case the inner nodes can be
+ interchanged between trees while still satisfying the algorithm. For example,
+ if contract A calls `require_auth` twice, then calls B and C both of which
+ call `require_auth`, the following combinations of
+ `SorobabAuthorizationEntry` invocations will pass authorization algorithm:
+ `A->[B, C], A`, `A->B, A->C`, `A->C, A->B`, `A, A->[B, C]`. Note, that
+ sequencing the calls changes the requirements, for example if A calls
+ `require_auth` right before calling B and C, only the following combinations
+ would pass: `A->B, A->C`, `A->[B, C], A`.
+- Authorized call trees can't be broken down into separate
+ `SorobanAuthorizationEntry` entries (unless there are multiple valid trees,
+ as described above). This means that if contract A calls contract B and both
+ require authorization, the signature for 'A invokes B' has to be provided.
+ Signatures for 'only A' and 'only B' won't be matched by the algorithm. This
+ makes it hard to create a set of disjoint frontrunnable signatures.
#### Authorization Algorithm for Invoker Contracts
-Whenver a contract calls `require_auth` host verifies if the address for which
+Whenver a contract calls `require_auth` host verifies if the address for which
authorization is required is the invoker contract, i.e. if its address is an
address of the contract that invoked the current contract. If that's the case,
the call immediately succeeds.
@@ -539,17 +552,17 @@ the call immediately succeeds.
In case if authorized address is not the direct invoker, it still might have
indirectly authorized the call using `authorize_as_curr_contract` host function
specified in the [respective section](#authorization-host-functions). Thus host
-executes the same algorithm as the one described in the section above with the
+executes the same algorithm as the one described in the section above with the
following differences:
-- Instead of `SorobanAuthorizationEntry` entries, operate on
-`InvokerContractAuthEntry` structures.
+- Instead of `SorobanAuthorizationEntry` entries, operate on
+ `InvokerContractAuthEntry` structures.
- Don't perform any additional authentication or replay prevention. Host stores
-`InvokerContractAuthEntry` attributed to the currently running contract and
-thus these can be considered authentic in later calls.
+ `InvokerContractAuthEntry` attributed to the currently running contract and
+ thus these can be considered authentic in later calls.
-All the relevant properties of the account-specific algorithm also apply to
-the invoker contract algorithm.
+All the relevant properties of the account-specific algorithm also apply to the
+invoker contract algorithm.
##### Invoker Contracts vs Custom Account Contracts Prioritization
@@ -558,48 +571,53 @@ time. Following the specified algorithms, the priority is always given to the
invoker contract auhtorization. For example, if contract A invokes contract B
and contract B calls `require_auth(A)`, then the call will immediately succeed
because A is invoker of B. Thus if there is a `SorobanAuthorizationEntry`
-payload for contract A address with a matching invocation of B contract, it will
-stay non-matched and non-exhausted.
+payload for contract A address with a matching invocation of B contract, it
+will stay non-matched and non-exhausted.
## Resource Utilization
-As mentioned in the Motivation section, authorization is necessary for most
-of the smart contracts, so moving the implementation to the compiled code (as
+As mentioned in the Motivation section, authorization is necessary for most of
+the smart contracts, so moving the implementation to the compiled code (as
opposed to Wasm) should on average reduce the resource utilization compared to
the contracts that use Wasm-based authorization approach.
Granular nonces consume more ledger space than autoincrement nonces, however
-they are evicted from the ledger after expiration as any other temporary entries.
-Thus given short enough signature expirations these shouldn't cause significant
-ledger bloat and in the end may be more efficient than persistent autoincrement
-entries.
+they are evicted from the ledger after expiration as any other temporary
+entries. Thus given short enough signature expirations these shouldn't cause
+significant ledger bloat and in the end may be more efficient than persistent
+autoincrement entries.
## Security Concerns
-Authorization framework built into protocol introduces a single potential common
-point of failure for all the contracts that are using it. However, it can can be
-evaluated and reviewed much more than an average contract, thus likely
-reducing the vulnerability probability compared to the manual implementation.
-Contracts are also not in any way forced to use the authorization framework
-beyond the interactions with the built-in token contract.
+Authorization framework built into protocol introduces a single potential
+common point of failure for all the contracts that are using it. However, it
+can can be evaluated and reviewed much more than an average contract, thus
+likely reducing the vulnerability probability compared to the manual
+implementation. Contracts are also not in any way forced to use the
+authorization framework beyond the interactions with the built-in token
+contract.
There are no specific security concerns from the protocol standpoint. As with
any piece of Soroban host, most of the potential issues are isolated to the
scope of the invoked contract.
Built-in token contract uses authorization framework as well, and there is a
-potential attack surface for unauthorized access to the trustlines and Stellar
+potential attack surface for unauthorized access to the trustlines and Stellar
account balances, but the authorization logic has to be in the host anyway, so
this CAP doesn't significantly change the risks for the built-in token.
## Implementation
[`auth.rs`](https://github.com/stellar/rs-soroban-env/blob/d92944576e2301c9866215efcdc4bbd24a5f3981/soroban-env-host/src/auth.rs)
-file of Soroban host contains the implementation for the authorization framework.
+file of Soroban host contains the implementation for the authorization
+framework.
[`account_contract.rs`](https://github.com/stellar/rs-soroban-env/blob/d92944576e2301c9866215efcdc4bbd24a5f3981/soroban-env-host/src/native_contract/account_contract.rs)
-file of Soroban host contains the implementation of Stellar account authentication as well as the harness for calling the custom contracts.
+file of Soroban host contains the implementation of Stellar account
+authentication as well as the harness for calling the custom contracts.
## Test Cases
-[test/auth.rs](https://github.com/stellar/rs-soroban-env/blob/d92944576e2301c9866215efcdc4bbd24a5f3981/soroban-env-host/src/test/auth.rs) in Soroban host contains the comprehensive tests for various authorization scenarios.
+[test/auth.rs](https://github.com/stellar/rs-soroban-env/blob/d92944576e2301c9866215efcdc4bbd24a5f3981/soroban-env-host/src/test/auth.rs)
+in Soroban host contains the comprehensive tests for various authorization
+scenarios.
diff --git a/core/cap-0046-12.md b/core/cap-0046-12.md
index d45a3d524..44693f43b 100644
--- a/core/cap-0046-12.md
+++ b/core/cap-0046-12.md
@@ -15,10 +15,10 @@ Protocol version: 20
## Simple Summary
-This proposal defines a state archival interface for Soroban Ledger Entries. This is
-just the initial step in a full state archival solution, but defines a stable
-interface such that all smart contracts deployed with this interface will be
-compatible with future state archival developments.
+This proposal defines a state archival interface for Soroban Ledger Entries.
+This is just the initial step in a full state archival solution, but defines a
+stable interface such that all smart contracts deployed with this interface
+will be compatible with future state archival developments.
## Motivation
@@ -28,85 +28,93 @@ In Stellar classic, the size of the ledger is growing exponentially and will
significantly hurt network performance if not mitigated. Many of these entries
are “spam” entries that are not actually used, such as claimable balance token
airdrops that accounts never claim. To mitigate this growth, this proposal will
-establish the concept of entry TTL, where the users of a given Soroban
-entry must pay for the entry to remain active and immediately useable.
+establish the concept of entry TTL, where the users of a given Soroban entry
+must pay for the entry to remain active and immediately useable.
### Goals Alignment
This CAP is aligned with the following Stellar Network Goals:
-- The Stellar Network should run at scale and at low cost to all participants of the network.
+- The Stellar Network should run at scale and at low cost to all participants
+ of the network.
## Abstract
This CAP introduces the Soroban State Archival interface. A complete state
archival implementation includes:
-1. Host functions and Operations for smart contracts and users to interact with state
-lifetimes.
+1. Host functions and Operations for smart contracts and users to interact with
+ state lifetimes.
2. A robust off-chain system for storing and providing cryptographic proofs of
-archived data.
+ archived data.
-This CAP implements part 1. Initially, most data will not actually be removed from validator
-databases. However, by introducing this interface at Soroban launch, all deployed smart contracts
-will be compatible with state archival when the full solution is introduced in a future
-protocol upgrade.
+This CAP implements part 1. Initially, most data will not actually be removed
+from validator databases. However, by introducing this interface at Soroban
+launch, all deployed smart contracts will be compatible with state archival
+when the full solution is introduced in a future protocol upgrade.
## Specification
### XDR
-See the XDR diffs in the Soroban overview CAP, specifically those referring to `LedgerCloseMetaV1`,
-`ContractDataDurability`, and `TTLEntry`.
+See the XDR diffs in the Soroban overview CAP, specifically those referring to
+`LedgerCloseMetaV1`, `ContractDataDurability`, and `TTLEntry`.
### Semantics
#### State Archival Entry States
-While not codified in XDR or explicitly defined in code, each Soroban entry must be in one of the
-following states. Note that the BucketList still only uses the `INITENTRY`, `LIVEENTRY`, and
-`DEADENTRY` states, which are unrelated to these archival states. These are only defined here to help
-discussion for this document.
+While not codified in XDR or explicitly defined in code, each Soroban entry
+must be in one of the following states. Note that the BucketList still only
+uses the `INITENTRY`, `LIVEENTRY`, and `DEADENTRY` states, which are unrelated
+to these archival states. These are only defined here to help discussion for
+this document.
-In this context, “accessibility” is defined as a Soroban TX being able to read the given entry. See
-[Lifetime Enforcement](#lifetime-enforcement) for more detail.
+In this context, “accessibility” is defined as a Soroban TX being able to read
+the given entry. See [Lifetime Enforcement](#lifetime-enforcement) for more
+detail.
-From the perspective of a TX, each `LedgerEntry` is in one of these three states:
+From the perspective of a TX, each `LedgerEntry` is in one of these three
+states:
- LIVE - Entry exists on the BucketList and validator DB, is accessible.
- ARCHIVED - PERSISTENT entry with TTL of 0, not accessible.
- DEAD - TEMPORARY entry with TTL of 0, not accessible.
-In a full state archival implementation, ARCHIVED and DEAD entries are eligible for removal
-from the BucketList and validator DB. From the perspective of TX invocation, it does not matter
-if an ARCHIVED or DEAD entry exists on the validator or not. While this state should not be
-exposed to contract developers and TX invokers, it is necessary for implementation:
+In a full state archival implementation, ARCHIVED and DEAD entries are eligible
+for removal from the BucketList and validator DB. From the perspective of TX
+invocation, it does not matter if an ARCHIVED or DEAD entry exists on the
+validator or not. While this state should not be exposed to contract developers
+and TX invokers, it is necessary for implementation:
-- EVICTED - Entry does not exist on the BucketList and validator DB. Only ARCHIVED and DEAD entries
-can be EVICTED.
+- EVICTED - Entry does not exist on the BucketList and validator DB. Only
+ ARCHIVED and DEAD entries can be EVICTED.
#### Contract Data Durability
-`ContractDataDurability` defines the durability of storage, either `TEMPORARY` or `PERSISTENT`.
-`ContractDataDurability` is included as a field in LedgerKey such that `TEMPORARY` and `PERSISTENT`
-entries are in separate key spaces.
+`ContractDataDurability` defines the durability of storage, either `TEMPORARY`
+or `PERSISTENT`. `ContractDataDurability` is included as a field in LedgerKey
+such that `TEMPORARY` and `PERSISTENT` entries are in separate key spaces.
-- `TEMPORARY` - Durability of storage that cannot be restored, "dies" and is permanently inaccessible after
-`liveUntilLedgerSeq`.
-- `PERSISTENT` - Durability of storage that is "archived" and can be restored after its `liveUntilLedgerSeq`.
+- `TEMPORARY` - Durability of storage that cannot be restored, "dies" and is
+ permanently inaccessible after `liveUntilLedgerSeq`.
+- `PERSISTENT` - Durability of storage that is "archived" and can be restored
+ after its `liveUntilLedgerSeq`.
#### TTL Entry
-`TTLEntry` is a new type of `LedgerEntry` that contains lifetime information. Every Soroban `LedgerEntry`
-type (`ContractData` and `ContractCode`) must always have an associated `TTLEntry`. Similarly, a
-`TTLEntry` cannot exist without an associated `ContractCode` or `ContractData` entry. It has the
+`TTLEntry` is a new type of `LedgerEntry` that contains lifetime information.
+Every Soroban `LedgerEntry` type (`ContractData` and `ContractCode`) must
+always have an associated `TTLEntry`. Similarly, a `TTLEntry` cannot exist
+without an associated `ContractCode` or `ContractData` entry. It has the
following fields:
-- `keyHash` - Serves as the `LedgerKey`, SHA256 hash of the `LedgerKey` of the associated `ContractCode`
-or `ContractData` entry.
-- `liveUntilLedgerSeq` - The ledger sequence number after which the associated `ContractCode` or
-`ContractData` entry will no longer be accessible. A `PERSISTENT` entry is ARCHIVED immediately following
-its `liveUntilLedgerSeq`, while a `TEMPORARY` entry DIES.
+- `keyHash` - Serves as the `LedgerKey`, SHA256 hash of the `LedgerKey` of the
+ associated `ContractCode` or `ContractData` entry.
+- `liveUntilLedgerSeq` - The ledger sequence number after which the associated
+ `ContractCode` or `ContractData` entry will no longer be accessible. A
+ `PERSISTENT` entry is ARCHIVED immediately following its
+ `liveUntilLedgerSeq`, while a `TEMPORARY` entry DIES.
An entry is considered to be in the LIVE state iff for its `TTLEntry` e:
@@ -116,310 +124,397 @@ isLive(e) == currentLedger <= e.liveUntilLedgerSeq
#### TTL
-An entry’s Time To Live (TTL) is the number of ledgers that the entry will be LIVE based on
-the current ledger sequence number. For example, if `currentLedger == 10` and
-`entry.liveUntilLedgerSeq == 15`, the entry has a TTL of 5 ledgers.
+An entry’s Time To Live (TTL) is the number of ledgers that the entry will be
+LIVE based on the current ledger sequence number. For example, if
+`currentLedger == 10` and `entry.liveUntilLedgerSeq == 15`, the entry has a TTL
+of 5 ledgers.
#### Restore
-To restore an entry is to change the entry’s state from ARCHIVED to LIVE. An entry that is currently LIVE
-cannot be restored. Only entries with `PERSISTENT` durability can be restored.
+To restore an entry is to change the entry’s state from ARCHIVED to LIVE. An
+entry that is currently LIVE cannot be restored. Only entries with `PERSISTENT`
+durability can be restored.
#### Rent Fee
-The cost of extending an entry’s TTL. This fee is variable depending on the current size of the
-BucketList. Fee is determined by entry durability, size, and number of ledgers added to the TTL:
+The cost of extending an entry’s TTL. This fee is variable depending on the
+current size of the BucketList. Fee is determined by entry durability, size,
+and number of ledgers added to the TTL:
```
rent_fee(size, durability, num_ledgers)
= (wfee_rate_average(size) / rent_rate_denominator(durability)) * num_ledgers + write_fee(sizeof(TTLEntry))
```
-See [Fee and resource model in smart contracts](cap-0046-07.md) for more details.
+See [Fee and resource model in smart contracts](cap-0046-07.md) for more
+details.
#### Network Config Settings
-See [Network Configuration Ledger Entries](cap-0046-09.md) for more details on Network Config Settings.
+See [Network Configuration Ledger Entries](cap-0046-09.md) for more details on
+Network Config Settings.
-- `maxEntryTTL` - Maximum TTL that an entry can be extended to at any given point.
+- `maxEntryTTL` - Maximum TTL that an entry can be extended to at any given
+ point.
- `minTemporaryTTL` - Minimum TTL a `TEMPORARY` entry must have on creation.
-- `minPersistentTTL` - Minimum lifetime a `PERSISTENT` entry must have on creation or restoration.
-- `persistentRentRateDenominator` - `rent_rate_denominator` for `PERSISTENT` entries used to calculate `rent_fee`.
- Must be strictly less than `tempRentRateDenominator`.
-- `tempRentRateDenominator` - `rent_rate_denominator` for `TEMPORARY` entries used to calculate `rent_fee`. Must be
- strictly greater than `persistentRentRateDenominator`.
-- `maxEntriesToArchive` - Maximum number of entries that can be evicted in a single ledger.
-- `evictionScanSize` - Maximum number of bytes of the BucketList that the eviction scan will consume per ledger.
+- `minPersistentTTL` - Minimum lifetime a `PERSISTENT` entry must have on
+ creation or restoration.
+- `persistentRentRateDenominator` - `rent_rate_denominator` for `PERSISTENT`
+ entries used to calculate `rent_fee`. Must be strictly less than
+ `tempRentRateDenominator`.
+- `tempRentRateDenominator` - `rent_rate_denominator` for `TEMPORARY` entries
+ used to calculate `rent_fee`. Must be strictly greater than
+ `persistentRentRateDenominator`.
+- `maxEntriesToArchive` - Maximum number of entries that can be evicted in a
+ single ledger.
+- `evictionScanSize` - Maximum number of bytes of the BucketList that the
+ eviction scan will consume per ledger.
#### EvictionIterator
-`EvictionIterator` keeps track of the current eviction scan position within the BucketList. It is updated and written
-to the BucketList after every eviction scan on ledger close.
+`EvictionIterator` keeps track of the current eviction scan position within the
+BucketList. It is updated and written to the BucketList after every eviction
+scan on ledger close.
- `bucketListLevel` - BucketList level of the scan.
-- `isCurrBucket` - Indicates if `curr` Bucket or `snap` bucket is being scanned.
+- `isCurrBucket` - Indicates if `curr` Bucket or `snap` bucket is being
+ scanned.
- `bucketFileOffset` - Indicates file offset to begin eviction scan at.
#### LedgerCloseMetaV1
-- `totalByteSizeOfBucketList` - Average BucketList size used in `wfee_rate` calculation.
-- `evictedTemporaryLedgerKeys` - Vector of `LedgerKey` containing `TEMPORARY` entries that have been evicted in the
-given ledger.
-- `evictedPersistentLedgerEntries` - Vector of `PERSISTENT` `LedgerEntry` that have been evicted in the given ledger.
-Note that this will always be empty until a protocol upgrade with the full state archival implementation.
+- `totalByteSizeOfBucketList` - Average BucketList size used in `wfee_rate`
+ calculation.
+- `evictedTemporaryLedgerKeys` - Vector of `LedgerKey` containing `TEMPORARY`
+ entries that have been evicted in the given ledger.
+- `evictedPersistentLedgerEntries` - Vector of `PERSISTENT` `LedgerEntry` that
+ have been evicted in the given ledger. Note that this will always be empty
+ until a protocol upgrade with the full state archival implementation.
### Design Rationale
#### Durability of Storage
-From a network health perspective, it would be ideal to not allow entry restoration and just
-permanently delete all entries after the `liveUntilLedgerSeq`. Unfortunately this would result in significant issues when
-it comes to user experience. Should a user forget to periodically issue TTL extensions to a high value entry, such as a
-token balance, the valuable entry could be permanently lost. To avoid this, there must be a durability of storage that is
-defined to be recoverable after its TTL goes to 0, `PERSISTENT` storage.
-
-While some data types, such as token balances, need to be recoverable after archival, there are also many data
-types that do not need such guarantees. Many entries can be arbitrarily recreated after their TTL goes to 0 or are only
-relevant for a given period of time. Examples include token allowances, oracle information, and time restricted KYC.
-For these entries that do not need to be recoverable, there is the `TEMPORARY` storage durability.
-
-In order to allow `PERSISTENT` storage restoration, some amount of data must be permanently stored, both on validators
-and in off-chain services. Because of this cost, `TEMPORARY` storage should always be preferred. To incentivize this at
-the protocol level, `TEMPORARY` storage fees are strictly cheaper than `PERSISTENT` storage fees.
+From a network health perspective, it would be ideal to not allow entry
+restoration and just permanently delete all entries after the
+`liveUntilLedgerSeq`. Unfortunately this would result in significant issues
+when it comes to user experience. Should a user forget to periodically issue
+TTL extensions to a high value entry, such as a token balance, the valuable
+entry could be permanently lost. To avoid this, there must be a durability of
+storage that is defined to be recoverable after its TTL goes to 0, `PERSISTENT`
+storage.
+
+While some data types, such as token balances, need to be recoverable after
+archival, there are also many data types that do not need such guarantees. Many
+entries can be arbitrarily recreated after their TTL goes to 0 or are only
+relevant for a given period of time. Examples include token allowances, oracle
+information, and time restricted KYC. For these entries that do not need to be
+recoverable, there is the `TEMPORARY` storage durability.
+
+In order to allow `PERSISTENT` storage restoration, some amount of data must be
+permanently stored, both on validators and in off-chain services. Because of
+this cost, `TEMPORARY` storage should always be preferred. To incentivize this
+at the protocol level, `TEMPORARY` storage fees are strictly cheaper than
+`PERSISTENT` storage fees.
#### Storage Guarantees
-At all times, there must only be a single valid version of an entry per key. This requires certain invariants for each
-durability type.
+At all times, there must only be a single valid version of an entry per key.
+This requires certain invariants for each durability type.
##### `PERSISTENT` Storage Invariants
-To maintain this single version invariant, a `PERSISTENT` entry cannot be recreated if an entry with the same key is
-ARCHIVED. If recreation was allowed, multiple different versions of an entry with the same key could exist in the
+To maintain this single version invariant, a `PERSISTENT` entry cannot be
+recreated if an entry with the same key is ARCHIVED. If recreation was allowed,
+multiple different versions of an entry with the same key could exist in the
ARCHIVED state. This creates many security vulnerabilities and is not allowed.
-If a given `PERSISTENT` entry key is ARCHIVED, the only valid operation for that key is restore. Any call to create,
-load, or erase on the key will fail. In order to delete an ARCHIVED `PERSISTENT` entry, it is necessary to restore the
-entry then delete the restored entry.
+If a given `PERSISTENT` entry key is ARCHIVED, the only valid operation for
+that key is restore. Any call to create, load, or erase on the key will fail.
+In order to delete an ARCHIVED `PERSISTENT` entry, it is necessary to restore
+the entry then delete the restored entry.
##### `TEMPORARY` Storage Invariants
-Because `TEMPORARY` entries cannot be restored, it is much simpler to maintain uniqueness guarantees. Once a
-`TEMPORARY` DIES (i.e. TTL == 0), it is as if the entry never existed. Even if a DEAD `TEMPORARY` entry has not
-yet been EVICTED (i.e., deleted from the BucketList and DB), the key of the entry can be recreated.
+Because `TEMPORARY` entries cannot be restored, it is much simpler to maintain
+uniqueness guarantees. Once a `TEMPORARY` DIES (i.e. TTL == 0), it is as if the
+entry never existed. Even if a DEAD `TEMPORARY` entry has not yet been EVICTED
+(i.e., deleted from the BucketList and DB), the key of the entry can be
+recreated.
##### Soroban Entry Invariants
-For each `ContractCode` and `ContractWasm` entry, there must also be an associated `TTLEntry`. Similarly, each
-`TTLEntry` must always be accompanied by a `ContractCode` or `ContractData` entry.
+For each `ContractCode` and `ContractWasm` entry, there must also be an
+associated `TTLEntry`. Similarly, each `TTLEntry` must always be accompanied by
+a `ContractCode` or `ContractData` entry.
#### Contract Instance and Contract Code Durability
-All `ContractData` entries are subject to state archival, including contract instances. `ContractCode` is also
-subject to state archival. This means that contract instance and code must also periodically receive TTL
-extensions and may also become ARCHIVED and be inaccessible.
+All `ContractData` entries are subject to state archival, including contract
+instances. `ContractCode` is also subject to state archival. This means that
+contract instance and code must also periodically receive TTL extensions and
+may also become ARCHIVED and be inaccessible.
-To prevent difficult edge cases, all `ContractCode` and contract instance entries must be of `PERSISTENT` durability.
+To prevent difficult edge cases, all `ContractCode` and contract instance
+entries must be of `PERSISTENT` durability.
#### TTL Enforcement
-TTLs are enforced at the transaction level. Enforcement differs slightly based on the durability of the entry:
+TTLs are enforced at the transaction level. Enforcement differs slightly based
+on the durability of the entry:
##### `PERSISTENT` Entry TTL Enforcement
-If a transaction has the key of an ARCHIVED `PERSISTENT` entry in the read-only or read-write section of
-the footprint, the transaction immediately fails. This is necessary to maintain the `PERSISTENT` entry invariant that
-the entry cannot be recreated after being ARCHIVED.
+If a transaction has the key of an ARCHIVED `PERSISTENT` entry in the read-only
+or read-write section of the footprint, the transaction immediately fails. This
+is necessary to maintain the `PERSISTENT` entry invariant that the entry cannot
+be recreated after being ARCHIVED.
##### `TEMPORARY` Entry TTL Enforcement
-Unlike `PERSISTENT` entries, if the key of a DEAD `TEMPORARY` entry is in the read-only or read-write
-section of a transaction's footprint, the transaction does not immediately fail. Instead, during transaction
-application, it is as if the entry has never existed. The transaction may fail during application if the contract
-function attempts to read the entry. If the contract function writes to the key, the entry is recreated with the new
-value as if the key never existed.
+Unlike `PERSISTENT` entries, if the key of a DEAD `TEMPORARY` entry is in the
+read-only or read-write section of a transaction's footprint, the transaction
+does not immediately fail. Instead, during transaction application, it is as if
+the entry has never existed. The transaction may fail during application if the
+contract function attempts to read the entry. If the contract function writes
+to the key, the entry is recreated with the new value as if the key never
+existed.
#### TTL Management
-A LIVE entry’s TTL can be extended by a Soroban operation (`ExtendFootprintTTLOp`) or by a host function
-(`extend_*` family of functions). ARCHIVED entries can only be restored by a Soroban operation (`RestoreFootprintOp`).
+A LIVE entry’s TTL can be extended by a Soroban operation
+(`ExtendFootprintTTLOp`) or by a host function (`extend_*` family of
+functions). ARCHIVED entries can only be restored by a Soroban operation
+(`RestoreFootprintOp`).
See [Operations](#operations) and [Host Functions](#host-functions).
##### Authorization
-Any account may issue a TTL extension or restoration for any entry without authorization. TTL extension and
-restoration is defined to always be beneficial to the owner of the entry and can not be used maliciously. This means
-that an entry’s TTL must not be used for security guarantees. If an entry must be invalidated after some number
-of ledgers, a smart contract must define this behavior and can not assume that the entry will only live to a specific
-ledger.
+Any account may issue a TTL extension or restoration for any entry without
+authorization. TTL extension and restoration is defined to always be beneficial
+to the owner of the entry and can not be used maliciously. This means that an
+entry’s TTL must not be used for security guarantees. If an entry must be
+invalidated after some number of ledgers, a smart contract must define this
+behavior and can not assume that the entry will only live to a specific ledger.
##### Fees
-Since the fee to write to the BucketList increases as the BucketList grows in size, `rent_fee` for TTL extensions
-should grow at the same rate. Intuitively, the larger the BucketList size, the faster entries should be
-evicted. By increasing rent fees as write fees increase, the protocol creates a negative back pressure where the more
-expensive it is to add the BucketList, the faster entries will be evicted thus causing the size to decrease. By using
-`wfee_rate_average` as the basis for both write and rent fees, the protocol can ensure that neither the rate of adding
-to the BucketList nor the rate of eviction can outpace one another (within some reasonable correlation).
-
-The `rent_fee` is a fraction of the cost of writing an entry. However, updating an entry's TTL necessitates a
-write, as the `liveUntilLedgerSeq` field must be modified. Due to the structure of the BucketList, it is not possible
-to change a single field in a `LedgerEntry`, as the entire entry must be rewritten. If `liveUntilLedgerSeq` was stored
-directly in the `ContractData` or `ContractCode` entry, TTL extension would require rewriting the entire entry.
-This would make `ContractCode` TTL extensions particularly expensive, as the write fees for rewriting an entire
-WASM blob would be exponentially higher than the actual `rent_fee` cost.
-
-To avoid expensive rewrites, `liveUntilLedgerSeq` are stored in a dedicated `LedgerEntry` (`TTLEntry`)
-separate to the `ContractCode` or `ContractData` entry. These entries are a small, fixed size. While users must still
-pay write fees when updating entry TTLs, `TTLEntry` writes are significantly smaller and cheaper than
-rewriting the associated `ContractCode` or `ContractData` entry.
+Since the fee to write to the BucketList increases as the BucketList grows in
+size, `rent_fee` for TTL extensions should grow at the same rate. Intuitively,
+the larger the BucketList size, the faster entries should be evicted. By
+increasing rent fees as write fees increase, the protocol creates a negative
+back pressure where the more expensive it is to add the BucketList, the faster
+entries will be evicted thus causing the size to decrease. By using
+`wfee_rate_average` as the basis for both write and rent fees, the protocol can
+ensure that neither the rate of adding to the BucketList nor the rate of
+eviction can outpace one another (within some reasonable correlation).
+
+The `rent_fee` is a fraction of the cost of writing an entry. However, updating
+an entry's TTL necessitates a write, as the `liveUntilLedgerSeq` field must be
+modified. Due to the structure of the BucketList, it is not possible to change
+a single field in a `LedgerEntry`, as the entire entry must be rewritten. If
+`liveUntilLedgerSeq` was stored directly in the `ContractData` or
+`ContractCode` entry, TTL extension would require rewriting the entire entry.
+This would make `ContractCode` TTL extensions particularly expensive, as the
+write fees for rewriting an entire WASM blob would be exponentially higher than
+the actual `rent_fee` cost.
+
+To avoid expensive rewrites, `liveUntilLedgerSeq` are stored in a dedicated
+`LedgerEntry` (`TTLEntry`) separate to the `ContractCode` or `ContractData`
+entry. These entries are a small, fixed size. While users must still pay write
+fees when updating entry TTLs, `TTLEntry` writes are significantly smaller and
+cheaper than rewriting the associated `ContractCode` or `ContractData` entry.
###### TTL Entry Fees
-`TTLEntry` counts toward `readEntry` and `readBytes` fees. This means for each `ContractCode` and `ContractData`
-entry in the footprint (both `readOnly` and `readWrite`), the `TTLEntry` is implicitly included in the `readOnly`
-set. `TTLEntry` does not count towards `writeEntry` and `writeBytes`. Every key in both the `readOnly` and `readWrite`
-is eligible to have its TTL extended, but the extension is conditional on runtime state. If write related fees were
-charged, every key in the footprint would have to pay for a `TTLEntry` write even if no extension occurs. In order
-to account for the write, `rent_fee` includes the `TTLEntry` write fees. This allows for only charging `TTLEntry`
-write fees if an entry's TTL is actually extended. The side effect of this approach is that `TTLEntry` writes do not
-count towards resource write limits. However, `TTLEntry` has a small, fixed size, so this should not be an issue.
+`TTLEntry` counts toward `readEntry` and `readBytes` fees. This means for each
+`ContractCode` and `ContractData` entry in the footprint (both `readOnly` and
+`readWrite`), the `TTLEntry` is implicitly included in the `readOnly` set.
+`TTLEntry` does not count towards `writeEntry` and `writeBytes`. Every key in
+both the `readOnly` and `readWrite` is eligible to have its TTL extended, but
+the extension is conditional on runtime state. If write related fees were
+charged, every key in the footprint would have to pay for a `TTLEntry` write
+even if no extension occurs. In order to account for the write, `rent_fee`
+includes the `TTLEntry` write fees. This allows for only charging `TTLEntry`
+write fees if an entry's TTL is actually extended. The side effect of this
+approach is that `TTLEntry` writes do not count towards resource write limits.
+However, `TTLEntry` has a small, fixed size, so this should not be an issue.
##### Resize Fees
-When the size of an entry is increased, an additional `rent_fee` must be paid to account for the new size of the entry.
-This fee is as follows:
+When the size of an entry is increased, an additional `rent_fee` must be paid
+to account for the new size of the entry. This fee is as follows:
```
rent_fee_for_size_increase(sizeDelta, entryCurrTTL) = rent_fee(sizeDelta, entryCurrTTL)
```
-This additional fee is charged to prevent gamification, where an entry is originally created as the minimal size, pays
-for the maximum TTL extension, then is resized to a much larger entry. If an entry decreases in size, no additional
-rent_fee is charged and there is no fee refund (see [No Refunds of Rent Fees](#no-refunds-of-rent-fees)).
+This additional fee is charged to prevent gamification, where an entry is
+originally created as the minimal size, pays for the maximum TTL extension,
+then is resized to a much larger entry. If an entry decreases in size, no
+additional rent_fee is charged and there is no fee refund (see
+[No Refunds of Rent Fees](#no-refunds-of-rent-fees)).
##### TTL Limits
-While the cost of a TTL extension is dynamic based on BucketList size, once an entry’s TTL is extended, it
-cannot be reduced. This can create potential issues with rent gamification. Consider a smart contract that reserves
-storage on behalf of other contracts. When the BucketList size is small, the storage provider smart contract can
-allocate large amounts of storage with very large TTLs. Later when storage is more expensive, the storage provider
-contract can auction this storage to other smart contracts at rates below the protocol defined `rent_fee` and make a
-profit. This is detrimental to network health as it incentivizes intermediate storage contracts to allocate
-significant amounts of data by writing dummy values that will never be used. If storage intermediaries become the
-norm, this also harms network performance, where smart contract data access must invoke another smart contract. This
-is a common issue on several smart contract platforms (see Ethereum’s [Gas Token](https://gastoken.io/)).
+While the cost of a TTL extension is dynamic based on BucketList size, once an
+entry’s TTL is extended, it cannot be reduced. This can create potential issues
+with rent gamification. Consider a smart contract that reserves storage on
+behalf of other contracts. When the BucketList size is small, the storage
+provider smart contract can allocate large amounts of storage with very large
+TTLs. Later when storage is more expensive, the storage provider contract can
+auction this storage to other smart contracts at rates below the protocol
+defined `rent_fee` and make a profit. This is detrimental to network health as
+it incentivizes intermediate storage contracts to allocate significant amounts
+of data by writing dummy values that will never be used. If storage
+intermediaries become the norm, this also harms network performance, where
+smart contract data access must invoke another smart contract. This is a common
+issue on several smart contract platforms (see Ethereum’s
+[Gas Token](https://gastoken.io/)).
##### Initial TTL
-Whenever an entry is initially created or restored, it has a protocol defined minimum TTL by default. This
-minimum provides a better UX by giving users a reasonable time window to submit a TTL extension operation
-or invoke a smart contract function that calls the bump host function before an entry expires. This also helps reduce
-BucketList churn, as entries are guaranteed to live for a certain amount of time before they are eligible for eviction.
-
-`TEMPORARY` and `PERSISTENT` storage serve fundamentally different use cases so they have different minimum values.
-`TEMPORARY` entries are largely used for non-user facing contract specific data, such as oracle pricing information,
-nonces, etc. Because `TEMPORARY` entries are primarily managed by the smart contract itself and may only need to live
-for very short periods of time, `TEMPORARY` entries have a smaller minimum TTL. `PERSISTENT` entries are
-more likely to be used for user facing entries, such as token balances, and also put additional strain on the
-network if frequently ARCHIVED and restored. For these reasons, `PERSISTENT` entries have a longer minimum lifetime.
+Whenever an entry is initially created or restored, it has a protocol defined
+minimum TTL by default. This minimum provides a better UX by giving users a
+reasonable time window to submit a TTL extension operation or invoke a smart
+contract function that calls the bump host function before an entry expires.
+This also helps reduce BucketList churn, as entries are guaranteed to live for
+a certain amount of time before they are eligible for eviction.
+
+`TEMPORARY` and `PERSISTENT` storage serve fundamentally different use cases so
+they have different minimum values. `TEMPORARY` entries are largely used for
+non-user facing contract specific data, such as oracle pricing information,
+nonces, etc. Because `TEMPORARY` entries are primarily managed by the smart
+contract itself and may only need to live for very short periods of time,
+`TEMPORARY` entries have a smaller minimum TTL. `PERSISTENT` entries are more
+likely to be used for user facing entries, such as token balances, and also put
+additional strain on the network if frequently ARCHIVED and restored. For these
+reasons, `PERSISTENT` entries have a longer minimum lifetime.
#### Interface Limitations
-Under this proposal is no way for smart contracts to determine the current TTL of an entry. Additionally, all
-TTL extensions are conditional (i.e. there is only “extend TTL to at least 100 ledgers”, there is no way to
-“extend TTL by 100 ledgers no matter the current value). This is intentional. The current TTL extension
-interface is friendly to parallelism. Because TTL extensions are conditional, two transactions can issue a
-TTL extension to the same entry without creating a data dependency.
+Under this proposal is no way for smart contracts to determine the current TTL
+of an entry. Additionally, all TTL extensions are conditional (i.e. there is
+only “extend TTL to at least 100 ledgers”, there is no way to “extend TTL by
+100 ledgers no matter the current value). This is intentional. The current TTL
+extension interface is friendly to parallelism. Because TTL extensions are
+conditional, two transactions can issue a TTL extension to the same entry
+without creating a data dependency.
-Suppose TX A invokes the host function `extend_contract_data(“key”, TEMPORARY, 50)` and TX B invokes
-`extend_contract_data(“key”, TEMPORARY, 150)`. From a state and fee fairness perspective, the execution order
-of these TXs is irrelevant. Suppose `TEMPORARY(“key”)` has a current TTL of 10 ledgers.
+Suppose TX A invokes the host function
+`extend_contract_data(“key”, TEMPORARY, 50)` and TX B invokes
+`extend_contract_data(“key”, TEMPORARY, 150)`. From a state and fee fairness
+perspective, the execution order of these TXs is irrelevant. Suppose
+`TEMPORARY(“key”)` has a current TTL of 10 ledgers.
If execution order is A, B:
-- A sees that the entry’s TTL is 10 ledgers. A extends the entry’s TTL by 40 ledgers such that the resulting
-TTL is 50. A is charged for 40 ledgers of `rent_fees`.
+- A sees that the entry’s TTL is 10 ledgers. A extends the entry’s TTL by 40
+ ledgers such that the resulting TTL is 50. A is charged for 40 ledgers of
+ `rent_fees`.
-- B sees that the entry’s TTL is 50 ledgers. B extends the entry’s TTL by 100 ledgers such that the
-resulting TTL is 150. B is charged for 100 ledgers of `rent_fees`.
+- B sees that the entry’s TTL is 50 ledgers. B extends the entry’s TTL by 100
+ ledgers such that the resulting TTL is 150. B is charged for 100 ledgers of
+ `rent_fees`.
- The resulting state is that `TEMPORARY(“key”)` has a TTL of 150 ledgers.
If execution order is B, A:
-- B sees that the entry’s TTL is 10 ledgers. B extends the entry’s TTL by 140 ledgers such that the
-resulting TTL is 150. B is charged for 140 ledgers of `rent_fees`.
-- A sees that the entry’s TTL is 150 ledgers. A does nothing and is charged no fees.
+- B sees that the entry’s TTL is 10 ledgers. B extends the entry’s TTL by 140
+ ledgers such that the resulting TTL is 150. B is charged for 140 ledgers of
+ `rent_fees`.
+- A sees that the entry’s TTL is 150 ledgers. A does nothing and is charged no
+ fees.
- The resulting state is that `TEMPORARY(“key”)` has a TTL of 150 ledgers.
-No matter the execution order, the resulting state is the same. While the fees charged to each individual transaction
-are dependent on execution order, no single TX is charged more fees than it would have been charged should the other
-transaction not have occurred. A given TX may be charged less fees if another TX extends the TTL of the same entry,
-but is never more fees. This means that the TTL extension interface is thread safe from both a state and fairness
+No matter the execution order, the resulting state is the same. While the fees
+charged to each individual transaction are dependent on execution order, no
+single TX is charged more fees than it would have been charged should the other
+transaction not have occurred. A given TX may be charged less fees if another
+TX extends the TTL of the same entry, but is never more fees. This means that
+the TTL extension interface is thread safe from both a state and fairness
perspective.
-If the entry’s TTL was accessible to smart contract functions, contracts could define arbitrary execution paths
-based on the TTL value and create data dependencies. If unconditional rent bumps were allowed (i.e. extend the
-TTL by 100 ledgers no matter the current TTL), the end state of the entry’s TTL would be dependent on
-execution order.
+If the entry’s TTL was accessible to smart contract functions, contracts could
+define arbitrary execution paths based on the TTL value and create data
+dependencies. If unconditional rent bumps were allowed (i.e. extend the TTL by
+100 ledgers no matter the current TTL), the end state of the entry’s TTL would
+be dependent on execution order.
-Should more TTL management functionality be required, these host functions can be added in the future at the cost
-of parallelism. However, the current interface preserves parallelism and allows contracts to define a guaranteed lower
-bound TTL for all entries. From a contract execution standpoint, a TTL lower bound guarantee should be
-sufficient.
+Should more TTL management functionality be required, these host functions can
+be added in the future at the cost of parallelism. However, the current
+interface preserves parallelism and allows contracts to define a guaranteed
+lower bound TTL for all entries. From a contract execution standpoint, a TTL
+lower bound guarantee should be sufficient.
#### No Refunds of Rent Fees
-When reducing the size of an entry or deleting an entry that still has a non-zero TTL, there is no `rent_fee`
-refund. Due to the variable nature of `rent_fee`, a `rent_fee` refund could be gamed for a profit where a lifetime
-increase is paid for when `rent_fee` is low, but a refund is issued when `rent_fee` is high
-(see [Ethereum’s Gas Token](https://gastoken.io/)).
+When reducing the size of an entry or deleting an entry that still has a
+non-zero TTL, there is no `rent_fee` refund. Due to the variable nature of
+`rent_fee`, a `rent_fee` refund could be gamed for a profit where a lifetime
+increase is paid for when `rent_fee` is low, but a refund is issued when
+`rent_fee` is high (see [Ethereum’s Gas Token](https://gastoken.io/)).
-There is also a fairness issue with offering refunds. While many accounts may pay for TTL extensions,
-especially for shared entries such as contract instances and contract code, only a single account receives the refund.
-This opens the door for subtle attacks, where a contract developer could periodically delete a shared entry whose
-TTL is extended on each contract invocation, effectively stealing fees from invokers of the contract.
+There is also a fairness issue with offering refunds. While many accounts may
+pay for TTL extensions, especially for shared entries such as contract
+instances and contract code, only a single account receives the refund. This
+opens the door for subtle attacks, where a contract developer could
+periodically delete a shared entry whose TTL is extended on each contract
+invocation, effectively stealing fees from invokers of the contract.
#### EVICTED State
-From the perspective of transaction, it does not matter if an ARCHIVED or DEAD entry is EVICTED. Once an
-entry is no longer LIVE (the current ledger is greater than an entry’s `liveUntilLedgerSeq`), the entry is
-inaccessible. This applies whether an ARCHIVED or DEAD entry still remains in the BucketList and validator DB
-or has been EVICTED.
+From the perspective of transaction, it does not matter if an ARCHIVED or DEAD
+entry is EVICTED. Once an entry is no longer LIVE (the current ledger is
+greater than an entry’s `liveUntilLedgerSeq`), the entry is inaccessible. This
+applies whether an ARCHIVED or DEAD entry still remains in the BucketList and
+validator DB or has been EVICTED.
-This distinction is required for validators for operational reasons. Because TTL extensions may define arbitrary
-extension amounts, an arbitrary number of entries may have TTL of 0 on a given ledger. If
-entries were immediately EVICTED following their `liveUntilLedgerSeq`, the validator would have to emit an arbitrary
-amount of EVICTION meta and update an arbitrary number of database entries. This is especially detrimental because
-BucketList deletion requires a disk write. For this reason, ARCHIVED/DEAD entries are not immediately EVICTED.
+This distinction is required for validators for operational reasons. Because
+TTL extensions may define arbitrary extension amounts, an arbitrary number of
+entries may have TTL of 0 on a given ledger. If entries were immediately
+EVICTED following their `liveUntilLedgerSeq`, the validator would have to emit
+an arbitrary amount of EVICTION meta and update an arbitrary number of database
+entries. This is especially detrimental because BucketList deletion requires a
+disk write. For this reason, ARCHIVED/DEAD entries are not immediately EVICTED.
#### EVICTION Process
-In this proposal, only `TEMPORARY` entries are eligible for eviction. `PERSISTENT` entries will remain perpetually in the
-`ARCHIVED` state in the BucketList and validator DB until a restore operation makes them LIVE.
+In this proposal, only `TEMPORARY` entries are eligible for eviction.
+`PERSISTENT` entries will remain perpetually in the `ARCHIVED` state in the
+BucketList and validator DB until a restore operation makes them LIVE.
-To EVICT an entry is to delete it from the BucketList and validator DB. Only DEAD `TEMPORARY` entries can be evicted.
-Ideally, `TEMPORARY` entries would be EVICTED as soon as their TTL goes to 0, reducing the size of the BucketList as
-fast as possible. This is not possible in practice because an arbitrary number of entries can DIE on a given ledger.
-Instead, we periodically scan a portion of the BucketList and EVICT any DEAD entries in the given scan region as
+To EVICT an entry is to delete it from the BucketList and validator DB. Only
+DEAD `TEMPORARY` entries can be evicted. Ideally, `TEMPORARY` entries would be
+EVICTED as soon as their TTL goes to 0, reducing the size of the BucketList as
+fast as possible. This is not possible in practice because an arbitrary number
+of entries can DIE on a given ledger. Instead, we periodically scan a portion
+of the BucketList and EVICT any DEAD entries in the given scan region as
follows.
-On each ledger close, a small portion of the BucketList is scanned to check if any entries are eligible for EVICTION.
-The eviction scan is an expensive process. For each `TEMPORARY` entry in the region, it is necessary to load its
-`TTLEntry` to check if the entry's TTL is 0. Due to this disk read amplification, the amount of bytes scanned per
-ledger close is limited (`maxEvictionScanSize`). Additionally, to not overwhelm downstream systems, there is a maximum
-number of entries that can be evicted in a given ledger (`maxEntriesToArchive`).
-
-To keep track of the current scan position and so validators joining the network can maintain a uniform scan region, an
-iterator of the current scan position is stored in the BucketList as a `NetworkConfig` entry. If a bucket being scanned
-changes (either via an incoming merge or snap event), the iterator is reset to the beginning of the new bucket in the
-same position. Due to this, iteration scans begin at level 6 of the BucketList so that there is ample time to finish
-scanning a given bucket before it is modified. Should the BucketList size increase over time, it may be necessary to
-change the scan size and maximum allowed evicted entries via network config setting vote.
-
-On each ledger close, the validator scans a small section of the BucketList as follows:
+On each ledger close, a small portion of the BucketList is scanned to check if
+any entries are eligible for EVICTION. The eviction scan is an expensive
+process. For each `TEMPORARY` entry in the region, it is necessary to load its
+`TTLEntry` to check if the entry's TTL is 0. Due to this disk read
+amplification, the amount of bytes scanned per ledger close is limited
+(`maxEvictionScanSize`). Additionally, to not overwhelm downstream systems,
+there is a maximum number of entries that can be evicted in a given ledger
+(`maxEntriesToArchive`).
+
+To keep track of the current scan position and so validators joining the
+network can maintain a uniform scan region, an iterator of the current scan
+position is stored in the BucketList as a `NetworkConfig` entry. If a bucket
+being scanned changes (either via an incoming merge or snap event), the
+iterator is reset to the beginning of the new bucket in the same position. Due
+to this, iteration scans begin at level 6 of the BucketList so that there is
+ample time to finish scanning a given bucket before it is modified. Should the
+BucketList size increase over time, it may be necessary to change the scan size
+and maximum allowed evicted entries via network config setting vote.
+
+On each ledger close, the validator scans a small section of the BucketList as
+follows:
```
bytes_read = 0
@@ -448,102 +543,129 @@ update_and_write_iterator()
##### TTL Extension and Restore
-All TTL extension and restore operations are fundamentally just modifications of the `liveUntilLedgerSeq` of a given
-`TTLEntry`. This means no additional work or special casing is required to emit meta for these operations. The
-transaction meta for these operations will contain `ledger_entry_updated` meta events for the updated `TTLEntry`. This
-should require no ingestion logic changes to downstream systems (other than support for the new `LedgerEntry` type,
-`TTLEntry`).
+All TTL extension and restore operations are fundamentally just modifications
+of the `liveUntilLedgerSeq` of a given `TTLEntry`. This means no additional
+work or special casing is required to emit meta for these operations. The
+transaction meta for these operations will contain `ledger_entry_updated` meta
+events for the updated `TTLEntry`. This should require no ingestion logic
+changes to downstream systems (other than support for the new `LedgerEntry`
+type, `TTLEntry`).
##### EVICTION Meta
-Whenever an entry is EVICTED, meta should be emitted to help downstream systems garbage collect.
-This is not strictly necessary since downstream systems can deduce if an entry is eligible for EVICTION and should be
-deleted based on the current ledger number and the entry’s `liveUntilLedgerSeq`. However, this meta is useful for
-downstream system garbage collection and maintains the current status quo where all BucketList changes are
-emitted as meta.
-
-Because an arbitrary number of entries can become non-live on a given ledger, it is not possible to emit meta on the
-ledger in which an entry's TTL goes to 0. Due to this, there is a distinction between ARCHIVED/DEAD and EVICTION states.
-Meta is not emitted when an entry becomes non-live, but only when it is EVICTED.
-
-Due to this, downstream systems that simulate Soroban transactions must
-track an entry’s `liveUntilLedgerSeq` and enforce TX access control themselves. EVICTION meta does not tell
-downstream systems that an entry has a TTL of 0, but tells downstream systems that the validator has deleted the
-given entry from its database and that it is safe for the downstream system to also delete the entry.
-
-EVICTION is not caused by a transaction and is not included in transaction meta. Instead, `evictedTemporaryLedgerKeys`
-in `LedgerCloseMetaV1` contains the keys of evicted `TEMPORARY` `ContractData` entries and their
-associated `TTLEntry`. Downstream systems should delete these keys from their database when they receive this meta.
-
-This proposal does not evict `PERSISTENT` entries. `evictedPersistentLedgerEntries` will always be empty until a
-future protocol upgrade.
+Whenever an entry is EVICTED, meta should be emitted to help downstream systems
+garbage collect. This is not strictly necessary since downstream systems can
+deduce if an entry is eligible for EVICTION and should be deleted based on the
+current ledger number and the entry’s `liveUntilLedgerSeq`. However, this meta
+is useful for downstream system garbage collection and maintains the current
+status quo where all BucketList changes are emitted as meta.
+
+Because an arbitrary number of entries can become non-live on a given ledger,
+it is not possible to emit meta on the ledger in which an entry's TTL goes
+to 0. Due to this, there is a distinction between ARCHIVED/DEAD and EVICTION
+states. Meta is not emitted when an entry becomes non-live, but only when it is
+EVICTED.
+
+Due to this, downstream systems that simulate Soroban transactions must track
+an entry’s `liveUntilLedgerSeq` and enforce TX access control themselves.
+EVICTION meta does not tell downstream systems that an entry has a TTL of 0,
+but tells downstream systems that the validator has deleted the given entry
+from its database and that it is safe for the downstream system to also delete
+the entry.
+
+EVICTION is not caused by a transaction and is not included in transaction
+meta. Instead, `evictedTemporaryLedgerKeys` in `LedgerCloseMetaV1` contains the
+keys of evicted `TEMPORARY` `ContractData` entries and their associated
+`TTLEntry`. Downstream systems should delete these keys from their database
+when they receive this meta.
+
+This proposal does not evict `PERSISTENT` entries.
+`evictedPersistentLedgerEntries` will always be empty until a future protocol
+upgrade.
### Operations
#### `ExtendFootprintTTLOp`
-This is a Soroban operation that will bump the TTL of all entries specified in the read-only set of the footprint.
-If an entry is not LIVE, then that entry will not be bumped. Note that the transaction
-does not necessarily fail if a non LIVE entry is in the read-only set. The bump will ensure that each LIVE entry in the
-read-only set has a TTL of at least `extendTo` ledgers. The operation has the following requirements:
+This is a Soroban operation that will bump the TTL of all entries specified in
+the read-only set of the footprint. If an entry is not LIVE, then that entry
+will not be bumped. Note that the transaction does not necessarily fail if a
+non LIVE entry is in the read-only set. The bump will ensure that each LIVE
+entry in the read-only set has a TTL of at least `extendTo` ledgers. The
+operation has the following requirements:
-- `ExtendFootprintTTLOp` is a Soroban operation, and therefore must be the only operation in a transaction.
+- `ExtendFootprintTTLOp` is a Soroban operation, and therefore must be the only
+ operation in a transaction.
- The read-write set of the footprint must be empty.
-- Every key in the read-only set must be of type `ContractData` or `ContractCode`.
-- The transaction needs to populate the `SorobanTransactionData` transaction extension where `readBytes` includes the
-entry size of every entry in the `readOnly` set plus the size of the `TTLEntry` for each entry.
+- Every key in the read-only set must be of type `ContractData` or
+ `ContractCode`.
+- The transaction needs to populate the `SorobanTransactionData` transaction
+ extension where `readBytes` includes the entry size of every entry in the
+ `readOnly` set plus the size of the `TTLEntry` for each entry.
- `extendedMetaDataSizeBytes` is at least double of `readBytes`.
-- If `extendTo` > `maxEntryTTL - 1`, TX fails. Since the current ledger is included in the extension charges, the max
-extension value is `maxEntryTTL - 1`.
+- If `extendTo` > `maxEntryTTL - 1`, TX fails. Since the current ledger is
+ included in the extension charges, the max extension value is
+ `maxEntryTTL - 1`.
-This op charges `readEntry` and `readByte` fees for each entry in the footprint plus each entry's `TTLEntry`. There
-are no write fees. For any entry bumped, `rent_fee` is charged from the `refundableFee` (see [Fees](#fees)).
+This op charges `readEntry` and `readByte` fees for each entry in the footprint
+plus each entry's `TTLEntry`. There are no write fees. For any entry bumped,
+`rent_fee` is charged from the `refundableFee` (see [Fees](#fees)).
#### `RestoreFootprintOp`
-This is a Soroban operation that will restore PERSISTENT entries specified in the read-write set
-of the footprint that are ARCHIVED and make them LIVE. Entries that have been
-restored will have a lifetime of `minPersistentTTL` as if the entry was created for
-the first time. The operation has the following requirements:
+This is a Soroban operation that will restore PERSISTENT entries specified in
+the read-write set of the footprint that are ARCHIVED and make them LIVE.
+Entries that have been restored will have a lifetime of `minPersistentTTL` as
+if the entry was created for the first time. The operation has the following
+requirements:
-- `ExtendFootprintTTLOp` is a Soroban operation, and therefore must be the only operation in a transaction.
+- `ExtendFootprintTTLOp` is a Soroban operation, and therefore must be the only
+ operation in a transaction.
- The read-only set of the footprint must be empty.
-- Every key in the read-write set must be of type `ContractCode` or PERSISTENT `ContractData`.
-- The transaction needs to populate the `SorobanTransactionData` transaction extension where `writeBytes` includes the
-entry size of every entry in the `readWrite`.
-- `readBytes` must include the entry size of every entry in the `readWrite` set plus the size of the `TTLEntry` for each entry.
+- Every key in the read-write set must be of type `ContractCode` or PERSISTENT
+ `ContractData`.
+- The transaction needs to populate the `SorobanTransactionData` transaction
+ extension where `writeBytes` includes the entry size of every entry in the
+ `readWrite`.
+- `readBytes` must include the entry size of every entry in the `readWrite` set
+ plus the size of the `TTLEntry` for each entry.
- `extendedMetaDataSizeBytes` is at least double of writeBytes.
-This op charges `readEntry` and `readByte` fees for each entry in the footprint plus each entry's `TTLEntry`.
-`writeEntry` and `writeBytes` fees are charged for each entry in the `readWrite` set. For any entry restored,
-`rent_fee` is charged from the `refundableFee` (see [Fees](#fees)).
+This op charges `readEntry` and `readByte` fees for each entry in the footprint
+plus each entry's `TTLEntry`. `writeEntry` and `writeBytes` fees are charged
+for each entry in the `readWrite` set. For any entry restored, `rent_fee` is
+charged from the `refundableFee` (see [Fees](#fees)).
### Host Functions
`extend_contract_data(key:Val, type:StorageType, threshold: U32Val, extend_to: U32Val)`
-For the given `ContractData` entry specified by `key` and `val`, if the current TTL is less than
-`threshold`, set the TTL to `extend_to` ledgers, i.e., extend `live_until_ledger_seq` such that
-TTL == `extend_to`. This host function does not charge any read or
-write fees, but charges `rent_fee` if the entry's TTL is extended (see [Fees](#fees)). If
-`extendTo` > `maxEntryTTL - 1`, TX fails. Since the current ledger is included in the extension charges, the max
+For the given `ContractData` entry specified by `key` and `val`, if the current
+TTL is less than `threshold`, set the TTL to `extend_to` ledgers, i.e., extend
+`live_until_ledger_seq` such that TTL == `extend_to`. This host function does
+not charge any read or write fees, but charges `rent_fee` if the entry's TTL is
+extended (see [Fees](#fees)). If `extendTo` > `maxEntryTTL - 1`, TX fails.
+Since the current ledger is included in the extension charges, the max
extension value is `maxEntryTTL - 1`.
`extend_current_contract_instance_and_code(threshold: U32Val, extend_to: U32Val)`
-For the current contract instance and code (if applicable), if the current TTL is less than
-`threshold`, set the TTL to `extend_to` ledgers, i.e., extend `live_until_ledger_seq` such that
-TTL == `extend_to`. If a given entry’s TTL is >= `threshold`, that entry’s TTL is not changed.
-Note that if the contract instance TTL < `threshold` but the contract code TTL >=
-`threshold`, only the contract instance TTL will be extended. This host function does not
-charge any read or write fees, but charges `rent_fee` if the entry's TTL is extended (see [Fees](#fees)).
-If `extendTo` > `maxEntryTTL - 1`, TX fails. Since the current ledger is included in the extension charges, the max
-extension value is `maxEntryTTL - 1`.
+For the current contract instance and code (if applicable), if the current TTL
+is less than `threshold`, set the TTL to `extend_to` ledgers, i.e., extend
+`live_until_ledger_seq` such that TTL == `extend_to`. If a given entry’s TTL
+is >= `threshold`, that entry’s TTL is not changed. Note that if the contract
+instance TTL < `threshold` but the contract code TTL >= `threshold`, only the
+contract instance TTL will be extended. This host function does not charge any
+read or write fees, but charges `rent_fee` if the entry's TTL is extended (see
+[Fees](#fees)). If `extendTo` > `maxEntryTTL - 1`, TX fails. Since the current
+ledger is included in the extension charges, the max extension value is
+`maxEntryTTL - 1`.
`extend_contract_instance_and_code(contract:Address, threshold: U32Val, extend_to: U32Val)`
-Same as `extend_current_contract_instance_and_code`, but will extend the contract instance and
-`ContractCode` entry for `contract` instead of the current running contract.
+Same as `extend_current_contract_instance_and_code`, but will extend the
+contract instance and `ContractCode` entry for `contract` instead of the
+current running contract.
`get_max_live_until_ledger()`
@@ -555,46 +677,59 @@ get_max_live_until_ledger() == currentLedgerSeq - networkConfig.maxEntryTTL
### Resource Utilization
-By introducing a self-deleting entry type (`TEMPORARY` `ContractData`), this CAP should encourage healthy data usage and
-reduce the growth of validator databases. The full implementation of state archival will further reduce disk usage.
+By introducing a self-deleting entry type (`TEMPORARY` `ContractData`), this
+CAP should encourage healthy data usage and reduce the growth of validator
+databases. The full implementation of state archival will further reduce disk
+usage.
-The eviction scan requires blocking disk IO during ledger close. However, this is done on small batches and should close
-time increase too much, the batch size can be adjusted via Network Setting vote.
+The eviction scan requires blocking disk IO during ledger close. However, this
+is done on small batches and should close time increase too much, the batch
+size can be adjusted via Network Setting vote.
### Security Concerns
Given that
-1. Transaction footprints that contain an ARCHIVED `PERSISTENT` key immediately fail
+1. Transaction footprints that contain an ARCHIVED `PERSISTENT` key immediately
+ fail
2. There is at most one valid version of a given entry at all times
-there are no security issues at the protocol level. The greatest protocol security
-concern is that an entry that is in the ARCHIVED state may be recreated with a different
-value instead of being restored to its previous value. Because the state of a given key
-is checked before transaction application, recreation is not possible.
-
-While not a protocol security issue, `TEMPORARY` `ContractData` entries expose potential
-footguns for contract developers. There are three classes of vulnerability:
-
-1. A `TEMPORARY` entry's `liveUntilLedgerSeq` is relied upon for security purposes. For example,
-suppose a smart contract stores a signature in a `TEMPORARY` entry and bumps the TTL to 7 days
-with the expectation that the entry will become non-live and invalidate the signature in 7 days. Because
-TTL extensions do not require authorization, a byzantine actor could extend the TTL of
-the entry again, causing the signature to be valid for longer than intended. `liveUntilLedgerSeq`
-must never be relied on for security purposes.
-
-2. A `TEMPORARY` entry expires and is maliciously recreated. Because `TEMPORARY` entries can not
-be restored, the key can be recreated immediately after it's TTL goes to 0. Suppose a contract uses a
-`TEMPORARY` entry to store a nonce counter. On creation, the nonce is zero initialized. Throughout
-the lifetime of the entry, it is incremented several times. Eventually, the nonce entry's TTL goes to 0.
-At this point, a buggy contract may reinitialize the nonce value to zero, as the entry does not exist
-and is being recreated. This allows for potential nonce replay attacks. To mitigate this, smart
-contracts should not rely on the existence of a `TEMPORARY` entry for anything security related.
-
-These potential footguns are exclusive to `TEMPORARY` entries. Because `PERSISTENT` entries can be
-restored and cannot be recreated after being ARCHIVED, they are not subject to these concerns.
-
-These issues cannot be solved at the protocol level. While `TEMPORARY` entries do open the door
-to some security risks, they are also beneficial to contract developers and to network health
-as a whole. Documentation should warn developers about these potential issues and offer best
-practices regarding data durability and state archival.
+there are no security issues at the protocol level. The greatest protocol
+security concern is that an entry that is in the ARCHIVED state may be
+recreated with a different value instead of being restored to its previous
+value. Because the state of a given key is checked before transaction
+application, recreation is not possible.
+
+While not a protocol security issue, `TEMPORARY` `ContractData` entries expose
+potential footguns for contract developers. There are three classes of
+vulnerability:
+
+1. A `TEMPORARY` entry's `liveUntilLedgerSeq` is relied upon for security
+ purposes. For example, suppose a smart contract stores a signature in a
+ `TEMPORARY` entry and bumps the TTL to 7 days with the expectation that the
+ entry will become non-live and invalidate the signature in 7 days. Because
+ TTL extensions do not require authorization, a byzantine actor could extend
+ the TTL of the entry again, causing the signature to be valid for longer
+ than intended. `liveUntilLedgerSeq` must never be relied on for security
+ purposes.
+
+2. A `TEMPORARY` entry expires and is maliciously recreated. Because
+ `TEMPORARY` entries can not be restored, the key can be recreated
+ immediately after it's TTL goes to 0. Suppose a contract uses a `TEMPORARY`
+ entry to store a nonce counter. On creation, the nonce is zero initialized.
+ Throughout the lifetime of the entry, it is incremented several times.
+ Eventually, the nonce entry's TTL goes to 0. At this point, a buggy contract
+ may reinitialize the nonce value to zero, as the entry does not exist and is
+ being recreated. This allows for potential nonce replay attacks. To mitigate
+ this, smart contracts should not rely on the existence of a `TEMPORARY`
+ entry for anything security related.
+
+These potential footguns are exclusive to `TEMPORARY` entries. Because
+`PERSISTENT` entries can be restored and cannot be recreated after being
+ARCHIVED, they are not subject to these concerns.
+
+These issues cannot be solved at the protocol level. While `TEMPORARY` entries
+do open the door to some security risks, they are also beneficial to contract
+developers and to network health as a whole. Documentation should warn
+developers about these potential issues and offer best practices regarding data
+durability and state archival.
diff --git a/core/cap-0046.md b/core/cap-0046.md
index dea76e389..97d7fa92a 100644
--- a/core/cap-0046.md
+++ b/core/cap-0046.md
@@ -9,23 +9,24 @@ Working Group:
Consulted: Leigh McCulloch <@leighmcculloch>, Tomer Weller <@tomerweller>, Jon Jove <@jonjove>, Nicolas Barry <@MonsieurNicolas>, Thibault de Lacheze-Murel <@C0x41lch0x41>
Status: Final
Created: 2022-10-27
-Discussion:
+Discussion:
Protocol version: 20
```
## Simple Summary
This CAP is an overview of changes to stellar-core and the Stellar Protocol
-needed to enable the [Soroban smart contract system](https://soroban.stellar.org).
+needed to enable the
+[Soroban smart contract system](https://soroban.stellar.org).
Various aspects of the system design are described in Soroban "sub-CAPs". This
"overview CAP" exists to
- - Discuss motivation and design choices across the overall project, to avoid
- repeating it in each sub-CAP.
- - Give a cumulative XDR diff covering changes made in all related CAPs, to
- ease the burden of keeping the XDR diff current during development.
- - Link to and describe the relationships between sub-CAPs and the XDR changes.
+- Discuss motivation and design choices across the overall project, to avoid
+ repeating it in each sub-CAP.
+- Give a cumulative XDR diff covering changes made in all related CAPs, to ease
+ the burden of keeping the XDR diff current during development.
+- Link to and describe the relationships between sub-CAPs and the XDR changes.
## Working Group
@@ -55,65 +56,66 @@ sub-CAPs that it depends on or interacts with.
This CAP is aligned with the following Stellar Network Goals:
- - The Stellar Network should make it easy for developers of Stellar projects
- to create highly usable products
+- The Stellar Network should make it easy for developers of Stellar projects to
+ create highly usable products
## Abstract
Soroban adds a platform for executing smart contracts to the Stellar network.
-It is small, simple, efficient, and based on standardized technology, leveraging
-standard tools and techniques whenever possible, and focusing on providing a
-high-quality and low-effort developer experience for writing smart contracts.
+It is small, simple, efficient, and based on standardized technology,
+leveraging standard tools and techniques whenever possible, and focusing on
+providing a high-quality and low-effort developer experience for writing smart
+contracts.
## Specification
-All specifications _besides_ the cumulative XDR diffs below are provided in
-the following sub-CAPs:
-
- - [CAP-0046-01 (ex-0046) - Soroban Runtime Environment](./cap-0046-01.md)
- covers the code and data _environment_ that smart contracts run inside,
- rather than their relationship to the rest of the network. This mostly
- relates to the new XDR files below, rather than the diffs.
- - [CAP-0046-02 (ex-0047) - Smart Contract Lifecycle](./cap-0046-02.md) covers
- the mechanism for creating smart contracts.
- - CAPs 0048, 0049 and 0050 are abandoned explorations of interoperability
- between smart contracts and existing assets on the Stellar network.
- - [CAP-0046-03 (ex-0051) - Smart Contract Host Functions](./cap-0046-03.md)
- covers the new functions provided by the smart contract host to contracts.
- The semantics of some of these are specified in other topic-specific CAPs,
- but the central list of them will continue to live in CAP-0051.
- - [CAP-0046-04] - deprecated.
- - [CAP-0046-05 (ex-0053) - Smart Contract Data](./cap-0046-05.md) covers new
- ledger entries to store contract data.
- - [CAP-0046-06 (ex-0054) - Smart Contract Standardized Asset](./cap-0046-06.md)
- covers the built-in token contract, that can also "wrap" existing Stellar
- assets.
- - [CAP-0046-07 (ex-0055) - Fee and Resource Model in Smart Contracts](./cap-0046-07.md)
- covers changes to the network's fee-charging system to account for smart
- contracts.
- - [CAP-0046-08 (ex-0056) - Smart Contract Logging](./cap-0046-08.md) covers
- changes to the transaction result, metadata, and ledger close metadata XDR
- to capture a new set of "events" emitted during smart contract execution.
- - [CAP-0046-09 - Network Configuration Ledger Entries](./cap-0046-09.md)
- introduces configuration ledger entries and specifies the protocol upgrade
- process for such entries.
- - [CAP-0046-10 - Smart Contract Budget Metering](./cap-0046-10.md) introduces
+All specifications _besides_ the cumulative XDR diffs below are provided in the
+following sub-CAPs:
+
+- [CAP-0046-01 (ex-0046) - Soroban Runtime Environment](./cap-0046-01.md)
+ covers the code and data _environment_ that smart contracts run inside,
+ rather than their relationship to the rest of the network. This mostly
+ relates to the new XDR files below, rather than the diffs.
+- [CAP-0046-02 (ex-0047) - Smart Contract Lifecycle](./cap-0046-02.md) covers
+ the mechanism for creating smart contracts.
+- CAPs 0048, 0049 and 0050 are abandoned explorations of interoperability
+ between smart contracts and existing assets on the Stellar network.
+- [CAP-0046-03 (ex-0051) - Smart Contract Host Functions](./cap-0046-03.md)
+ covers the new functions provided by the smart contract host to contracts.
+ The semantics of some of these are specified in other topic-specific CAPs,
+ but the central list of them will continue to live in CAP-0051.
+- [CAP-0046-04] - deprecated.
+- [CAP-0046-05 (ex-0053) - Smart Contract Data](./cap-0046-05.md) covers new
+ ledger entries to store contract data.
+- [CAP-0046-06 (ex-0054) - Smart Contract Standardized Asset](./cap-0046-06.md)
+ covers the built-in token contract, that can also "wrap" existing Stellar
+ assets.
+- [CAP-0046-07 (ex-0055) - Fee and Resource Model in Smart Contracts](./cap-0046-07.md)
+ covers changes to the network's fee-charging system to account for smart
+ contracts.
+- [CAP-0046-08 (ex-0056) - Smart Contract Logging](./cap-0046-08.md) covers
+ changes to the transaction result, metadata, and ledger close metadata XDR to
+ capture a new set of "events" emitted during smart contract execution.
+- [CAP-0046-09 - Network Configuration Ledger Entries](./cap-0046-09.md)
+ introduces configuration ledger entries and specifies the protocol upgrade
+ process for such entries.
+- [CAP-0046-10 - Smart Contract Budget Metering](./cap-0046-10.md) introduces
the budget and metering framework.
- - [CAP-0046-11 - Soroban Authorization Framework](./cap-0046-11.md)
- introduces the authorization framework.
-- [CAP-0046-12 - Soroban State Archival Interface](./cap-0046-12.md)
- introduces the State Archival interface.
+- [CAP-0046-11 - Soroban Authorization Framework](./cap-0046-11.md) introduces
+ the authorization framework.
+- [CAP-0046-12 - Soroban State Archival Interface](./cap-0046-12.md) introduces
+ the State Archival interface.
### XDR changes
There are four entirely new XDR files:
- - [Stellar-contract.x](../contents/cap-0046/Stellar-contract.x)
- - [Stellar-contract-spec.x](../contents/cap-0046/Stellar-contract-spec.x)
- - [Stellar-contract-meta.x](../contents/cap-0046/Stellar-contract-env-meta.x)
- - [Stellar-contract-env-meta.x](../contents/cap-0046/Stellar-contract-env-meta.x)
- - [Stellar-contract-config-setting.x](../contents/cap-0046/Stellar-contract-config-setting.x)
+- [Stellar-contract.x](../contents/cap-0046/Stellar-contract.x)
+- [Stellar-contract-spec.x](../contents/cap-0046/Stellar-contract-spec.x)
+- [Stellar-contract-meta.x](../contents/cap-0046/Stellar-contract-env-meta.x)
+- [Stellar-contract-env-meta.x](../contents/cap-0046/Stellar-contract-env-meta.x)
+- [Stellar-contract-config-setting.x](../contents/cap-0046/Stellar-contract-config-setting.x)
As well as updates to several of the other XDR files, which are maintained and
modified on an ongoing basis during the development of Soroban in a separate,
@@ -135,14 +137,14 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
+++ src/protocol-next/xdr/Stellar-ledger-entries.x 2023-07-14 14:50:55.534242191 -0700
@@ -3,17 +3,16 @@
// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
-
+
%#include "xdr/Stellar-types.h"
+%#include "xdr/Stellar-contract.h"
+%#include "xdr/Stellar-contract-config-setting.h"
-
+
namespace stellar
{
-
+
-typedef PublicKey AccountID;
typedef opaque Thresholds[4];
typedef string string32<32>;
@@ -152,7 +154,7 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
-typedef uint64 Duration;
typedef opaque DataValue<64>;
typedef Hash PoolID; // SHA256(LiquidityPoolParameters)
-
+
@@ -98,7 +97,10 @@
OFFER = 2,
DATA = 3,
@@ -163,12 +165,12 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
+ CONTRACT_CODE = 7,
+ CONFIG_SETTING = 8
};
-
+
struct Signer
@@ -491,6 +493,60 @@
body;
};
-
+
+enum ContractEntryBodyType {
+ DATA_ENTRY = 0,
+ EXPIRATION_EXTENSION = 1
@@ -238,7 +240,7 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
+ ConfigSettingEntry configSetting;
}
data;
-
+
@@ -575,6 +637,25 @@
{
PoolID liquidityPoolID;
@@ -263,7 +265,7 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
+ ConfigSettingID configSettingID;
+ } configSetting;
};
-
+
// list of all envelope types used in the application
@@ -589,6 +670,8 @@
ENVELOPE_TYPE_SCPVALUE = 4,
@@ -281,10 +283,10 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
@@ -47,13 +47,17 @@
ext;
};
-
+
-const MASK_LEDGER_HEADER_FLAGS = 0x7;
+const MASK_LEDGER_HEADER_FLAGS = 0x7F;
-
+
enum LedgerHeaderFlags
{
DISABLE_LIQUIDITY_POOL_TRADING_FLAG = 0x1,
@@ -296,7 +298,7 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
+ DISABLE_CONTRACT_REMOVE = 0x20,
+ DISABLE_CONTRACT_INVOKE = 0x40
};
-
+
struct LedgerHeaderExtensionV1
@@ -122,7 +126,14 @@
LEDGER_UPGRADE_BASE_FEE = 2,
@@ -312,7 +314,7 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
+ Hash contractID;
+ Hash contentHash;
};
-
+
union LedgerUpgrade switch (LedgerUpgradeType type)
@@ -137,6 +148,17 @@
uint32 newBaseReserve; // update baseReserve
@@ -330,12 +332,12 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
+struct ConfigUpgradeSet {
+ ConfigSettingEntry updatedEntry<>;
};
-
+
/* Entries used to define the bucket list */
@@ -348,6 +370,74 @@
// applied if any
};
-
+
+enum ContractEventType
+{
+ SYSTEM = 0,
@@ -370,7 +372,7 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
+ ContractEvent event;
+};
+
-+struct SorobanTransactionMeta
++struct SorobanTransactionMeta
+{
+ ExtensionPoint ext;
+
@@ -393,11 +395,11 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
+ OperationMeta operations<>; // meta for each operation
+ LedgerEntryChanges txChangesAfter; // tx level changes after operations are
+ // applied if any
-+ SorobanTransactionMeta* sorobanMeta; // Soroban-specific meta (only for
++ SorobanTransactionMeta* sorobanMeta; // Soroban-specific meta (only for
+ // Soroban transactions).
+};
+
-+// This is in Stellar-ledger.x to due to a circular dependency
++// This is in Stellar-ledger.x to due to a circular dependency
+struct InvokeHostFunctionSuccessPreImage
+{
+ SCVal returnValue;
@@ -414,12 +416,12 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
+case 3:
+ TransactionMetaV3 v3;
};
-
+
// This struct groups together changes on a per transaction basis
@@ -414,11 +506,46 @@
SCPHistoryEntry scpInfo<>;
};
-
+
+struct LedgerCloseMetaV2
+{
+ // We forgot to add an ExtensionPoint in v1 but at least
@@ -469,13 +471,13 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
@@ -2,11 +2,15 @@
// under the Apache License, Version 2.0. See the COPYING file at the root
// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
-
+
+%#include "xdr/Stellar-contract.h"
%#include "xdr/Stellar-ledger-entries.h"
-
+
namespace stellar
{
-
+
+// maximum number of operations per transaction
+const MAX_OPS_PER_TX = 100;
+
@@ -492,12 +494,12 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
+ BUMP_FOOTPRINT_EXPIRATION = 25,
+ RESTORE_FOOTPRINT = 26
};
-
+
/* CreateAccount
@@ -465,6 +472,141 @@
int64 minAmountB; // minimum amount of second asset to withdraw
};
-
+
+enum HostFunctionType
+{
+ HOST_FUNCTION_TYPE_INVOKE_CONTRACT = 0,
@@ -510,7 +512,7 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
+ CONTRACT_ID_PREIMAGE_FROM_ADDRESS = 0,
+ CONTRACT_ID_PREIMAGE_FROM_ASSET = 1
+};
-+
++
+union ContractIDPreimage switch (ContractIDPreimageType type)
+{
+case CONTRACT_ID_PREIMAGE_FROM_ADDRESS:
@@ -569,7 +571,7 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
+{
+ SCAddress address;
+ int64 nonce;
-+ uint32 signatureExpirationLedger;
++ uint32 signatureExpirationLedger;
+ SCVal signature;
+};
+
@@ -589,7 +591,7 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
+
+/* Unit of authorization data for Soroban.
+
-+ Represents an authorization for executing the tree of authorized contract
++ Represents an authorization for executing the tree of authorized contract
+ and/or host function calls by the user defined by `credentials`.
+*/
+struct SorobanAuthorizationEntry
@@ -654,7 +656,7 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
{
AccountID sourceAccount;
- SequenceNumber seqNum;
-+ SequenceNumber seqNum;
++ SequenceNumber seqNum;
uint32 opNum;
PoolID liquidityPoolID;
Asset asset;
@@ -674,12 +676,12 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
+ SorobanAuthorizedInvocation invocation;
+ } sorobanAuthorization;
};
-
+
enum MemoType
@@ -632,8 +794,40 @@
PreconditionsV2 v2;
};
-
+
-// maximum number of operations per transaction
-const MAX_OPS_PER_TX = 100;
+// Ledger key sets touched by a smart contract transaction.
@@ -692,11 +694,11 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
+// Resource limits for a Soroban transaction.
+// The transaction will fail if it exceeds any of these limits.
+struct SorobanResources
-+{
++{
+ // The ledger footprint of the transaction.
+ LedgerFootprint footprint;
+ // The maximum number of instructions this transaction can use
-+ uint32 instructions;
++ uint32 instructions;
+
+ // The maximum number of bytes this transaction can read from ledger
+ uint32 readBytes;
@@ -716,7 +718,7 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
+ // Portion of transaction `fee` allocated to refundable fees.
+ int64 refundableFee;
+};
-
+
// TransactionV0 is a transaction with the AccountID discriminant stripped off,
// leaving a raw ed25519 public key to identify the source account. This is used
@@ -695,6 +889,8 @@
@@ -731,7 +733,7 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
@@ -1588,6 +1784,67 @@
void;
};
-
+
+enum InvokeHostFunctionResultCode
+{
+ // codes considered as "success" for the operation
@@ -818,7 +820,7 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
+ // declared Soroban resource usage exceeds the network limit
+ txSOROBAN_RESOURCE_LIMIT_EXCEEDED = -17
};
-
+
// InnerTransactionResult must be binary compatible with TransactionResult
@@ -1720,6 +1985,7 @@
case txBAD_SPONSORSHIP:
@@ -842,7 +844,7 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
@@ -14,6 +14,9 @@
typedef unsigned hyper uint64;
typedef hyper int64;
-
+
+typedef uint64 TimePoint;
+typedef uint64 Duration;
+
@@ -851,10 +853,10 @@ diff -ru '--exclude=*.h' '--exclude=.git*' '--exclude=*.md' src/protocol-curr/xd
// extend a structure.
@@ -79,6 +82,7 @@
typedef opaque SignatureHint[4];
-
+
typedef PublicKey NodeID;
+typedef PublicKey AccountID;
-
+
struct Curve25519Secret
{
```
diff --git a/core/cap-0048.md b/core/cap-0048.md
index 289c8e9d9..42ec7674d 100644
--- a/core/cap-0048.md
+++ b/core/cap-0048.md
@@ -19,17 +19,17 @@ Allow smart contracts to interoperate with Stellar assets.
## Motivation
-There is an existing ecosystem of assets on the Stellar Network. Smart contracts
-on the Stellar Network will be significantly less useful if it is not possible
-to interact with those assets. Therefore, we must build an interoperability
-layer.
+There is an existing ecosystem of assets on the Stellar Network. Smart
+contracts on the Stellar Network will be significantly less useful if it is not
+possible to interact with those assets. Therefore, we must build an
+interoperability layer.
### Requirements
#### Performance
-Tokens are the basic unit of blockchain applications, and as such they should be
-very efficient to use.
+Tokens are the basic unit of blockchain applications, and as such they should
+be very efficient to use.
#### Trust Transitivity
@@ -64,13 +64,14 @@ This CAP is aligned with the following Stellar Network Goals:
## Abstract
-This proposal provides an ERC-20 compliant interface (excluding `totalSupply()`)
-for Stellar assets. This is achieved by introducing a new type of ledger entry,
-`AllowanceEntry`, which is used to record ERC-20 style allowances. Atop this,
-a `ContractID` for cross-contract invocations is introduced which can refer to
-an actual smart contract or to native contracts. In this case, we provide a
-new type of native contract with type `ASSET_ADAPTOR` which implements the
-ERC-20 interface in terms of accounts, trustlines, and allowances.
+This proposal provides an ERC-20 compliant interface (excluding
+`totalSupply()`) for Stellar assets. This is achieved by introducing a new type
+of ledger entry, `AllowanceEntry`, which is used to record ERC-20 style
+allowances. Atop this, a `ContractID` for cross-contract invocations is
+introduced which can refer to an actual smart contract or to native contracts.
+In this case, we provide a new type of native contract with type
+`ASSET_ADAPTOR` which implements the ERC-20 interface in terms of accounts,
+trustlines, and allowances.
## Specification
@@ -89,12 +90,12 @@ index 377309f9..0337128b 100644
+ CONTRACT_DATA = 7,
+ ALLOWANCE = 8
};
-
+
struct Signer
@@ -499,6 +500,21 @@ struct ContractDataEntry {
SCVal *val;
};
-
+
+struct AllowanceEntry
+{
+ union switch (int v)
@@ -121,7 +122,7 @@ index 377309f9..0337128b 100644
+ AllowanceEntry allowance;
}
data;
-
+
diff --git a/src/xdr/Stellar-transaction.x b/src/xdr/Stellar-transaction.x
index 3d9ee3ea..fa6297a7 100644
--- a/src/xdr/Stellar-transaction.x
@@ -129,7 +130,7 @@ index 3d9ee3ea..fa6297a7 100644
@@ -14,6 +14,20 @@ case LIQUIDITY_POOL_CONSTANT_PRODUCT:
LiquidityPoolConstantProductParameters constantProduct;
};
-
+
+enum ContractType
+{
+ SMART_CONTRACT = 0,
@@ -154,8 +155,8 @@ index 3d9ee3ea..fa6297a7 100644
#### Asset Adaptors
Asset adaptors implement the ERC-20 interface in terms of accounts, trustlines,
-and allowances. They allow smart contracts that can interoperate with ERC-20 tokens
-to interoperate with Stellar assets.
+and allowances. They allow smart contracts that can interoperate with ERC-20
+tokens to interoperate with Stellar assets.
Specifically, asset adaptors implement the following
@@ -189,7 +190,7 @@ fn balanceOf(owner: AccountID) -> uint256;
// Returns PaymentResultCode == PAYMENT_SUCCESS
fn transfer(to: AccountID, value: uint256) -> bool;
-// Loads a = allowance(from, msg.sender)
+// Loads a = allowance(from, msg.sender)
// Returns false if a < amount
// Sets ApprovalEntry {
// transferFrom: msg.sender,
@@ -225,8 +226,8 @@ Note that `totalSupply()` is _not_ implemented.
Asset adaptors don't have normal contract addresses, because they always exist
and have no on-chain representation. Instead, an asset adaptor contract address
-is simply identified by a `ContractID` of type `ASSET_ADAPTOR` that contains the
-specified asset. We will need a host function like
+is simply identified by a `ContractID` of type `ASSET_ADAPTOR` that contains
+the specified asset. We will need a host function like
```rust
fn get_asset_adaptor(asset: Asset) -> ContractID;
@@ -258,11 +259,11 @@ nonetheless.
### Issue: Issuer Balance
The issuer of a non-native asset doesn't have a balance of the asset in the
-traditional sense. When the issuer sends a payment, it mints the asset. When the
-issuer receives a payment, it burns the asset.
+traditional sense. When the issuer sends a payment, it mints the asset. When
+the issuer receives a payment, it burns the asset.
-What should `balance(issuer)` return? Should `transfer()` mint when `msg.sender`
-is the issuer?
+What should `balance(issuer)` return? Should `transfer()` mint when
+`msg.sender` is the issuer?
### Issue: Not Compatible With High Supply Assets
@@ -274,8 +275,8 @@ reserves were `INT64_MAX` of each, then the contract would be frozen with only
withdrawals possible. This occurs because the contract trustlines can't hold
more than `INT64_MAX`.
-One potential option to alleviate this issue would be to support trustlines with
-higher limits.
+One potential option to alleviate this issue would be to support trustlines
+with higher limits.
### Issue: Classic Payments To Contracts
diff --git a/core/cap-0049.md b/core/cap-0049.md
index aecf84d20..446deaee8 100644
--- a/core/cap-0049.md
+++ b/core/cap-0049.md
@@ -17,17 +17,17 @@ Allow smart contracts to interoperate with Stellar assets.
## Motivation
-There is an existing ecosystem of assets on the Stellar Network. Smart contracts
-on the Stellar Network will be significantly less useful if it is not possible
-to interact with those assets. Therefore, we must build an interoperability
-layer.
+There is an existing ecosystem of assets on the Stellar Network. Smart
+contracts on the Stellar Network will be significantly less useful if it is not
+possible to interact with those assets. Therefore, we must build an
+interoperability layer.
### Requirements
#### Performance
-Tokens are the basic unit of blockchain applications, and as such they should be
-very efficient to use.
+Tokens are the basic unit of blockchain applications, and as such they should
+be very efficient to use.
#### Trust Transitivity
@@ -90,12 +90,12 @@ index 377309f9..f4847eb5 100644
+ WRAPPED_BALANCE = 8,
+ WRAPPED_ALLOWANCE = 9
};
-
+
struct Signer
@@ -499,6 +501,36 @@ struct ContractDataEntry {
SCVal *val;
};
-
+
+struct WrappedBalanceEntry
+{
+ union switch (int v)
@@ -139,7 +139,7 @@ index 377309f9..f4847eb5 100644
+ WrappedAllowanceEntry wrappedAllowance;
}
data;
-
+
@@ -600,6 +636,21 @@ case CONTRACT_DATA:
int64 contractID;
SCVal *key;
@@ -160,7 +160,7 @@ index 377309f9..f4847eb5 100644
+ Asset asset;
+ } wrappedAllowance;
};
-
+
// list of all envelope types used in the application
diff --git a/src/xdr/Stellar-transaction.x b/src/xdr/Stellar-transaction.x
index 3d9ee3ea..fa6297a7 100644
@@ -169,7 +169,7 @@ index 3d9ee3ea..fa6297a7 100644
@@ -14,6 +14,20 @@ case LIQUIDITY_POOL_CONSTANT_PRODUCT:
LiquidityPoolConstantProductParameters constantProduct;
};
-
+
+enum ContractType
+{
+ SMART_CONTRACT = 0,
@@ -192,8 +192,8 @@ index 3d9ee3ea..fa6297a7 100644
### Semantics
Asset adaptors implement the ERC-20 interface, authorization/clawback, and
-wrap/unwrap. They allow smart contracts that can interoperate with ERC-20 tokens
-to interoperate with Stellar assets.
+wrap/unwrap. They allow smart contracts that can interoperate with ERC-20
+tokens to interoperate with Stellar assets.
Specifically, asset adaptors implement the following
@@ -315,8 +315,8 @@ fn unwrap(value: uint256) -> bool;
Asset adaptors don't have normal contract addresses, because they always exist
and have no on-chain representation. Instead, an asset adaptor contract address
-is simply identified by a `ContractID` of type `ASSET_ADAPTOR` that contains the
-specified asset. We will need a host function like
+is simply identified by a `ContractID` of type `ASSET_ADAPTOR` that contains
+the specified asset. We will need a host function like
```rust
fn get_asset_adaptor(asset: Asset) -> ContractID;
@@ -338,8 +338,8 @@ without requiring trust for any other party.
An account will have to call `wrap` directly in order to prepare assets from
their trustline for use in smart contracts, and these functions can be trusted
-because they are part of the protocol. Once assets are wrapped, they are subject
-to the ERC-20 allowance mechanism to protect them.
+because they are part of the protocol. Once assets are wrapped, they are
+subject to the ERC-20 allowance mechanism to protect them.
### Wrap/Unwrap Friction Is Acceptable
@@ -351,8 +351,8 @@ The core assumption here is that accounts are not doing a constant mixture of
Stellar operations and smart contract interactions. I anticipate that usage is
much more likely to go in phases: prepare Stellar asset for utilization in a
smart contract, leave it there for some time possibly moving it to other smart
-contracts, bring Stellar asset back. This usage pattern is reasonable given that
-most smart contracts take control of the asset anyway, requiring it to be
+contracts, bring Stellar asset back. This usage pattern is reasonable given
+that most smart contracts take control of the asset anyway, requiring it to be
withdrawn before it can be used elsewhere.
### Only Two Authorization States
diff --git a/core/cap-0050.md b/core/cap-0050.md
index ff8a683bf..cb7d07bff 100644
--- a/core/cap-0050.md
+++ b/core/cap-0050.md
@@ -24,7 +24,7 @@ correspond to either an externally owned account or a contract. For example,
- the ERC-20 `transfer` function sends funds from `msg.sender` to some other
address
- the ERC-20 `approve` function sets the allowance for `spender` to transfer
-funds from `msg.sender`
+ funds from `msg.sender`
- the ERC-20 `transferFrom` function sends funds from some address `A` to
address `B` if `msg.sender` has sufficient allowance
@@ -61,14 +61,14 @@ This CAP is aligned with the following Stellar Network Goals:
## Abstract
This proposal introduces `InvokeContractTransaction` and
-`InvokeContractTransactionEnvelope` which permit interacting with contracts.
-To maximize usability, a `InvokeContractTransactionEnvelope` takes an
-additional `KeyedSignature` which identifies who has invoked the contract
-(analogous to `msg.sender` in Ethereum). The proposal also introduces a new
-host function, `get_invoker`, so a contract can identify who has invoked the
-contract. The proposal also contains a number of extensions to support
-presigned messages, which would be relevant for an EIP-2612 style `permit`
-function, and the ability to run Wasm directly in a transaction.
+`InvokeContractTransactionEnvelope` which permit interacting with contracts. To
+maximize usability, a `InvokeContractTransactionEnvelope` takes an additional
+`KeyedSignature` which identifies who has invoked the contract (analogous to
+`msg.sender` in Ethereum). The proposal also introduces a new host function,
+`get_invoker`, so a contract can identify who has invoked the contract. The
+proposal also contains a number of extensions to support presigned messages,
+which would be relevant for an EIP-2612 style `permit` function, and the
+ability to run Wasm directly in a transaction.
## Specification
@@ -96,7 +96,7 @@ index 3d9ee3ea..20c3eaae 100644
@@ -20,6 +20,38 @@ struct DecoratedSignature
Signature signature; // actual signature
};
-
+
+enum KeyedSignatureType
+{
+ KEYED_SIGNATURE_NONE = 0,
@@ -135,7 +135,7 @@ index 3d9ee3ea..20c3eaae 100644
@@ -709,6 +741,68 @@ struct FeeBumpTransactionEnvelope
DecoratedSignature signatures<20>;
};
-
+
+struct ReadWriteSet
+{
+ // Keys that can be read but not written
@@ -208,7 +208,7 @@ index 3d9ee3ea..20c3eaae 100644
+case ENVELOPE_TYPE_INVOKE_CONTRACT_TX:
+ InvokeContractTransactionEnvelope invoke;
};
-
+
struct TransactionSignaturePayload
@@ -730,6 +826,10 @@ struct TransactionSignaturePayload
Transaction tx;
@@ -233,8 +233,8 @@ overlay messages and transaction queue work without modification.
#### Message Sender
-Whenever a contract is executing, the contract can determine the identity of the
-caller by invoking the host function
+Whenever a contract is executing, the contract can determine the identity of
+the caller by invoking the host function
```rust
fn get_invoker() -> Address;
@@ -254,9 +254,9 @@ To interact with a contract, a user will submit an
- `env.tx.symbol` is the function you want to call on the contract
- `env.tx.parameters` are the parameters you want to pass to the function
- `env.signatures` are valid signatures for signers of `env.tx.sourceAccount`
-with total weight exceeding `lowThreshold(env.tx.sourceAccount)`
+ with total weight exceeding `lowThreshold(env.tx.sourceAccount)`
- `env.invokerSignature.signature` is a valid signature for
-`env.invokerSignature.key`
+ `env.invokerSignature.key`
Immediately after the contract begins execution, a call to `get_invoker()` will
return `env.invokerSignature.key`.
@@ -280,10 +280,10 @@ return the `ContractID` for the calling contract.
### Why is Invoker Signature Over the Entire Transaction?
It has been suggested that the invoker signature should only be over the
-invocation details (contract identifier, symbol, and parameters) rather than the
-entire transaction (source account, sequence number, fee, invocation details).
-The argument is that contracts should implement their own replay prevention
-mechanisms. This argument does not hold up to closer scruity.
+invocation details (contract identifier, symbol, and parameters) rather than
+the entire transaction (source account, sequence number, fee, invocation
+details). The argument is that contracts should implement their own replay
+prevention mechanisms. This argument does not hold up to closer scruity.
We will examine some mechanisms for moving replay prevention into contracts to
determine their advantages and disadvantages. It will show that the proposed
@@ -372,16 +372,15 @@ In both cases, the called contract must check the signature. Why? Because
`get_invoker()` will return the proxy address, not the invoker address
specified at the transaction level. It follows that the called contract must be
able to identify and authenticate the invoker through some other mechanism. We
-will show below (see "Why Shouldn't Contracts do Their Own Signature Checking?")
-that requiring signature verification at the contract level is not a good
-pattern in general.
+will show below (see "Why Shouldn't Contracts do Their Own Signature
+Checking?") that requiring signature verification at the contract level is not
+a good pattern in general.
-One might argue that we could provide a host function that allows executing
-a cross-contract call in the name of the signer, meaning that immediately
-after control transfers to the called contract `get_invoker()` will return
-the signer rather than the caller. While this is possible, it doesn't solve
-the problem here. Why not? Because that call would itself require replay
-prevention!
+One might argue that we could provide a host function that allows executing a
+cross-contract call in the name of the signer, meaning that immediately after
+control transfers to the called contract `get_invoker()` will return the signer
+rather than the caller. While this is possible, it doesn't solve the problem
+here. Why not? Because that call would itself require replay prevention!
### Why Shouldn't Contracts do Their Own Signature Checking?
@@ -426,15 +425,15 @@ fn contract_transfer(to: Address, amount: u256) -> bool;
fn contract_transfer_from(from: Address, to: Address, amount: u256) -> bool;
```
-that first check `get_invoker()` is a contract, then take actions in the name of
-that contract. But such a function would just be the classic implementation with
-additional restrictions.
+that first check `get_invoker()` is a contract, then take actions in the name
+of that contract. But such a function would just be the classic implementation
+with additional restrictions.
A simpler solution is to always require an invoker signature. This preserves
the ability to use `get_invoker()` for identity everywhere without concern for
authorization. Contracts that desire an alternative signature scheme can always
-elect to ignore `get_invoker()`, meaning users can simply sign transactions with
-any arbitrary key.
+elect to ignore `get_invoker()`, meaning users can simply sign transactions
+with any arbitrary key.
When combined with the fact that the simpler approach also conveys replay
prevention automatically, this approach should be preferred.
@@ -453,9 +452,9 @@ If a contract wants to extend this mechanism generally, it can
- ignore the invoker signature (transactions can include a signature from any
key or use `KEYED_SIGNATURE_NONE`)
-- check signatures on every function (see "Why Shouldn't Contracts do Their
- Own Signature Checking?" for how one might handle this if contracts need to
- be supported)
+- check signatures on every function (see "Why Shouldn't Contracts do Their Own
+ Signature Checking?" for how one might handle this if contracts need to be
+ supported)
- perform replay protection on the data that was signed
Patterns like this can be used to implement complicated protocols like payment
@@ -486,13 +485,14 @@ ed25519).
### Message Sender Does Not Acquire Signing Authority Over a Stellar Account
-This proposal logically separates the notion of a Stellar account and an address
-which controls assets in a smart contract. Specifically, control over a Stellar
-account with a given public key is _not_ conferred by control over assets in a
-smart contract with the same public key. For example, a Stellar account might
-have its master weight set to 0 while its public key grants control over assets
-in a smart contract. Allowing the message sender to control that corresponding
-Stellar account would, therefore, be a catastrophic security failure.
+This proposal logically separates the notion of a Stellar account and an
+address which controls assets in a smart contract. Specifically, control over a
+Stellar account with a given public key is _not_ conferred by control over
+assets in a smart contract with the same public key. For example, a Stellar
+account might have its master weight set to 0 while its public key grants
+control over assets in a smart contract. Allowing the message sender to control
+that corresponding Stellar account would, therefore, be a catastrophic security
+failure.
### Incompatible with CAP-0048
@@ -562,8 +562,8 @@ While this proposal does not need to support presigned messages, it is
instructive to consider how they fit into this model. The key requirements for
presigned messages are
-- domain separation: a presigned message should not collide with any transaction
-or with any other presigned messages
+- domain separation: a presigned message should not collide with any
+ transaction or with any other presigned messages
- usability: a presigned message should be easy to prepare and verify
- performance: a presigned message should be efficient to prepare and verify
@@ -621,8 +621,8 @@ The host interface can also support an efficient hash on `SCVal`.
### Extension: More Powerful Contract Interactions
It could be desirable to allow a transaction to do more than just invoke a
-specific function on a contract. This could be permitted by adding the following
-XDR
+specific function on a contract. This could be permitted by adding the
+following XDR
```
struct RunWasmTransaction
@@ -703,9 +703,9 @@ with the following properties:
- `env.tx.symbol` is the function you want to call in the Wasm module
- `env.tx.parameters` are the parameters you want to pass to the function
- `env.signatures` are valid signatures for signers of `env.tx.sourceAccount`
-with total weight exceeding `lowThreshold(env.tx.sourceAccount)`
+ with total weight exceeding `lowThreshold(env.tx.sourceAccount)`
- `env.invokerSignature.signature` is a valid signature for
-`env.invokerSignature.key`
+ `env.invokerSignature.key`
Immediately after the Wasm begins execution, a call to `get_invoker()` will
return `env.invokerSignature.key`.
diff --git a/core/cap-0051.md b/core/cap-0051.md
index 790f17cfb..a98345cd5 100644
--- a/core/cap-0051.md
+++ b/core/cap-0051.md
@@ -19,14 +19,14 @@ interface.
## Motivation
Secp256r1, also sometimes referenced as prime256r1, p256, or ES256, is a common
-elliptic curve used with ECDSA. It's one of the common signature algorithms used
-in [Webauthn], which is the standard behind passkeys available on, browsers,
-computers, and phones. Supporting secp256r1 natively would allow for the
-execution of efficient webauthn implementations where-by browsers, computers,
-and phones could be the signers of accounts on Stellar via Soroban's custom
-account interface. While it is possible to embed ECDSA secp256r1 verification
-into contracts on the guest side, the instruction cost of ECDSA verification is
-greater than the networks current maximum limits.
+elliptic curve used with ECDSA. It's one of the common signature algorithms
+used in [Webauthn], which is the standard behind passkeys available on,
+browsers, computers, and phones. Supporting secp256r1 natively would allow for
+the execution of efficient webauthn implementations where-by browsers,
+computers, and phones could be the signers of accounts on Stellar via Soroban's
+custom account interface. While it is possible to embed ECDSA secp256r1
+verification into contracts on the guest side, the instruction cost of ECDSA
+verification is greater than the networks current maximum limits.
### Goals Alignment
@@ -46,9 +46,10 @@ for a public key.
A new function `verify_sig_ecdsa_secp256r1` with export name `3` in module `c`
is added to the Soroban environment's exported interface.
-It accepts a public key, message, and signature, and verifies that the signature
-was produced using the ECDSA signing algorithm with the provided message and the
-private key on the secp256r1 curve corresponding to the provided public key.
+It accepts a public key, message, and signature, and verifies that the
+signature was produced using the ECDSA signing algorithm with the provided
+message and the private key on the secp256r1 curve corresponding to the
+provided public key.
It returns if the signature is verified, and traps if verification fails.
@@ -56,8 +57,8 @@ The `public_key` parameters is a `BytesObject` that must have a length of
65-bytes, being the ECDSA secp256r1 public key SEC-1 encoded.
The `msg_digest` parameters is a `BytesObject` that must be a hash of the
-message, and must have been produced using a secure cryptographic hash function,
-otherwise an attacker can potentially forge signatures.
+message, and must have been produced using a secure cryptographic hash
+function, otherwise an attacker can potentially forge signatures.
The `signature` parameters is a `BytesObject` that must have a length of
64-bytes, with the first 32-bytes being the R-value big endian integer, and the
@@ -103,9 +104,14 @@ index df7d5c4..d1d7760 100644
#### Verify vs Recovery
-The interface of the function is different to the existing ECDSA secp256k1 interface because the latter is a recovery interface that recovers a public key given a message, signature, and a recovery ID.
+The interface of the function is different to the existing ECDSA secp256k1
+interface because the latter is a recovery interface that recovers a public key
+given a message, signature, and a recovery ID.
-The secp256r1 interface is proposed as a verification interface accepting a public key, message, and signature, because the majority of existing signing software and hardware that support ECDSA secp256r1 do not provide the recovery ID.
+The secp256r1 interface is proposed as a verification interface accepting a
+public key, message, and signature, because the majority of existing signing
+software and hardware that support ECDSA secp256r1 do not provide the recovery
+ID.
To support a recovery interface for secp256r1 the Soroban environment could
perform two recoveries, one with recovery ID 0 and again with recovery ID 1, to
@@ -122,12 +128,12 @@ systems, such as browsers or phones implementing webauthn/passkeys.
A motivation to use the alternative recovery interfaces is that it will likely
that contract that use secp256r1 verification will store a public key on chain,
or communicate the public key in invocation parameters. In both cases a
-verification interface will require transmission and storage of the full 65-byte
-public key. A recovery interface would allow for hashing and truncating the
-transmitted or stored public key to 32-bytes if hashed with SHA-256, or 20-bytes
-if hashed and truncated similar to how Ethereum addresses are produced. This
-space reduction would lower the read/write resource usages as well as
-transactions size, for an increase in instruction cost.
+verification interface will require transmission and storage of the full
+65-byte public key. A recovery interface would allow for hashing and truncating
+the transmitted or stored public key to 32-bytes if hashed with SHA-256, or
+20-bytes if hashed and truncated similar to how Ethereum addresses are
+produced. This space reduction would lower the read/write resource usages as
+well as transactions size, for an increase in instruction cost.
#### SEC1 Encoding of Public Key
@@ -135,7 +141,8 @@ The proposal specifies the public key to be SEC1 encoded. This encoding is
65-bytes, containing the point coordinates uncompressed, and is the same
encoding used by the existing `recover_key_ecdsa_secp256k1` function.
-Most applications using secp256r1 are likely to produce a 72-byte ASN.1 DER encoded public key, which is convertible.
+Most applications using secp256r1 are likely to produce a 72-byte ASN.1 DER
+encoded public key, which is convertible.
#### Compact Encoding of Signature
@@ -143,18 +150,24 @@ The proposal specifies the signature to be encoded as two 32-bit big endian
integers. This encoding is 64-bytes and is the same encoding used by the
existing `recover_key_ecdsa_secp256k1` function.
-Most applications using secp256r1 are likely to produce a 70 to 73-byte ASN.1 DER encoded signature, which is convertible.
+Most applications using secp256r1 are likely to produce a 70 to 73-byte ASN.1
+DER encoded signature, which is convertible.
#### Webauthn / Passkeys
-The secp256r1 verify interface is largely motivated by the webauthn use case. Webauthn involves an application registering with a client, that produces a private key, and provides the public key back to the application. The application can then engage the client to sign data that the application can verify using the public key shared earlier.
+The secp256r1 verify interface is largely motivated by the webauthn use case.
+Webauthn involves an application registering with a client, that produces a
+private key, and provides the public key back to the application. The
+application can then engage the client to sign data that the application can
+verify using the public key shared earlier.
The client may be a browser, phone, secret manager, or other software.
The application could be a dapp, application, browser extension, or any other
software.
-The public key could be an EdDSA (ed25519), ES256 (ECDSA secp256r1), PS256, or RS256, where the last two are both RSA signing algorithms.
+The public key could be an EdDSA (ed25519), ES256 (ECDSA secp256r1), PS256, or
+RS256, where the last two are both RSA signing algorithms.
In all signing algorithms the payload to sign is produced by concatenating the
webauthn authenticator data, and a SHA-256 hash of the client data JSON. The
@@ -164,27 +177,30 @@ application is base64 url encoded in the `challenge` field of the client data
JSON.
For example, a client data JSON:
+
```json
{
- "type":"webauthn.get",
- "challenge":"hJHFvaaoU7qkcH9kML46shLL_btpYGCA6ty3ie0M1Qw",
- "origin":"http://localhost:4507",
- "crossOrigin":false
+ "type": "webauthn.get",
+ "challenge": "hJHFvaaoU7qkcH9kML46shLL_btpYGCA6ty3ie0M1Qw",
+ "origin": "http://localhost:4507",
+ "crossOrigin": false
}
```
-For Stellar transactions intended to be authenticated by a webauthn
-signature in a Soroban custom account, this challenge can be the SHA-256 hash of
-the `HashIDPreimage` `ENVELOPE_TYPE_SOROBAN_AUTHORIZATION`.
+For Stellar transactions intended to be authenticated by a webauthn signature
+in a Soroban custom account, this challenge can be the SHA-256 hash of the
+`HashIDPreimage` `ENVELOPE_TYPE_SOROBAN_AUTHORIZATION`.
-In the ed25519 algorithm the payload as-is is passed to the signature verification function.
+In the ed25519 algorithm the payload as-is is passed to the signature
+verification function.
-In the ECDSA secp256r1 algorithm the payload is hashed using SHA-256, and the hash is passed to the verification function.
+In the ECDSA secp256r1 algorithm the payload is hashed using SHA-256, and the
+hash is passed to the verification function.
It could be argued that adding secp256r1 signature verification is insufficient
to implement webauthn signature verification in a contract because verification
-also requires JSON and base64 url support, neither of which are supported by the
-Soroban host functions.
+also requires JSON and base64 url support, neither of which are supported by
+the Soroban host functions.
There are some aspects of Webauthn that are discussed below by the proposal.
Even though the concerns are not critical concerns of the proposal of secp256r1
@@ -195,8 +211,8 @@ Webauthn contract on Soroban.
##### Base64 URL Encoding
It is possible to embed a small and efficient fixed width base64 url encoder
-into a contract. Therefore the lack of native base64 URL encoding in the Soroban
-environmen host interface is not a limiting factor. See
+into a contract. Therefore the lack of native base64 URL encoding in the
+Soroban environmen host interface is not a limiting factor. See
[leighmcculloch/soroban-base64].
##### JSON
@@ -210,8 +226,8 @@ Also, it is reasonable to take the position that the limited verification of
client data JSON, which is discussed at length in the specification, means that
a fully fledged JSON parser is not required.
-The specification says Relying Parties (RP) should handle key reordering and new
-values being introduced. But the specification also goes to great length to
+The specification says Relying Parties (RP) should handle key reordering and
+new values being introduced. But the specification also goes to great length to
detail how the client data json is a subset of JSON, and a limited resource
parser can be written so as to verify the client data JSON.
@@ -246,11 +262,11 @@ that there exists a cloned authenticator by seeing the counter go backwards.
It's worth noting that an RP wouldn't be able to identify which authenticator
was the cloned authenticator, and only that one existed.
-If a contract detected this scenario it wouldn't be able to take any independent
-action such as locking the account, as it would have no way to identify which
-authenticator was cloned. A contract might have a backup credential that could
-be used to unlock it in this situation, but that is out-of-scope of this
-proposal.
+If a contract detected this scenario it wouldn't be able to take any
+independent action such as locking the account, as it would have no way to
+identify which authenticator was cloned. A contract might have a backup
+credential that could be used to unlock it in this situation, but that is
+out-of-scope of this proposal.
## Protocol Upgrade Transition
@@ -267,7 +283,11 @@ must be appropriately metered.
#### Storage
-The use of a verify interface rather than a recovery interface requires that the 65-byte public key be stored or transmitted into the host for verification. It could be reasonable to present, or later add, a recovery interface that could more efficiently work with a truncated hash of the public key. See the Design Rationale section for more details.
+The use of a verify interface rather than a recovery interface requires that
+the 65-byte public key be stored or transmitted into the host for verification.
+It could be reasonable to present, or later add, a recovery interface that
+could more efficiently work with a truncated hash of the public key. See the
+Design Rationale section for more details.
## Security
@@ -285,9 +305,9 @@ reasons, such as resource cost or privacy.
### Audited Implementations
The only ECDSA secp256r1 pure-Rust crates that the Soroban environment could
-embed are the [p256] and [ecdsa] crates. At the date that this proposal has been
-written both crates have a warning in their readme indicating that they have
-never been independently audited.
+embed are the [p256] and [ecdsa] crates. At the date that this proposal has
+been written both crates have a warning in their readme indicating that they
+have never been independently audited.
## Test Cases
@@ -300,6 +320,8 @@ None yet. But will be tracked by [stellar/rs-sorovan-env#807] if implemented.
[Webauthn]: https://www.w3.org/TR/webauthn-2/
[p256]: https://crates.io/crates/p256
[ecdsa]: https://crates.io/crates/ecdsa
-[stellar/rs-soroban-env#807]: https://github.com/stellar/rs-soroban-env/issues/807
-[leighmcculloch/soroban-base64]: https://github.com/leighmcculloch/soroban-base64
+[stellar/rs-soroban-env#807]:
+ https://github.com/stellar/rs-soroban-env/issues/807
+[leighmcculloch/soroban-base64]:
+ https://github.com/leighmcculloch/soroban-base64
[leighmcculloch/soroban-json]: https://github.com/leighmcculloch/soroban-json
diff --git a/core/cap-0052.md b/core/cap-0052.md
index 532c5a17d..3f5dda2e1 100644
--- a/core/cap-0052.md
+++ b/core/cap-0052.md
@@ -44,9 +44,9 @@ This CAP is aligned with the following Stellar Network Goals:
## Abstract
-This proposal adds two functions to the Soroban environment's exported interface
-that accepts the parameters for configuring a variant of base64, and encode and
-decode bytes to and from base64 as specified in [RFC4648].
+This proposal adds two functions to the Soroban environment's exported
+interface that accepts the parameters for configuring a variant of base64, and
+encode and decode bytes to and from base64 as specified in [RFC4648].
## Specification
@@ -54,20 +54,21 @@ A new function `base64_encode` with export name `n` in module `b` is added to
the Soroban environment's exported interface. It accepts an `alphabet`,
`padding`, and returns the bytes encoded in base64.
-Another new function `base64_decode` with export name `o` in module `b` is added
-to the Soroban environment's exported interface. It accepts an `alphabet`,
-`padding`, and returns the bytes decoded. If the decode fails for any reason it
-returns an `ScError`.
+Another new function `base64_decode` with export name `o` in module `b` is
+added to the Soroban environment's exported interface. It accepts an
+`alphabet`, `padding`, and returns the bytes decoded. If the decode fails for
+any reason it returns an `ScError`.
For both functions the parameters behave the same.
-Parameter `alphabet` specifies the alphabet used by the encoder and decoder. The
-symbols `std` or `url` are accepted, where they map respectively to the standard
-alphabet and the URL and filename friendly alphabet as specified in [RFC4648].
+Parameter `alphabet` specifies the alphabet used by the encoder and decoder.
+The symbols `std` or `url` are accepted, where they map respectively to the
+standard alphabet and the URL and filename friendly alphabet as specified in
+[RFC4648].
-Parameter `padding` specifies the padding character with the same limitations on
-valid values as specified in [RFC4648]. The padding character is 8-bit, and is
-stored in the lower 8-bits of the U32Val type. If the padding value of
+Parameter `padding` specifies the padding character with the same limitations
+on valid values as specified in [RFC4648]. The padding character is 8-bit, and
+is stored in the lower 8-bits of the U32Val type. If the padding value of
0xffffffff is used it indicates no padding is to be used when encoding, and no
padding is required when decoding, but padding is permitted, unless strict is
specified.
@@ -138,15 +139,15 @@ required would cause an error.
#### Alphabet
The alphabet's included are commonly used. Standard base64 encoding is most
-commonly used in applications. Base64 url encoding is less commonly used, but is
-used in the webauthn standard.
+commonly used in applications. Base64 url encoding is less commonly used, but
+is used in the webauthn standard.
-No custom alphabets are supported because their use is rare, however support for
-custom alphabets could be added in the future by allowing for a 64 byte String
-or Bytes value to be passed as the alphabet instead of a Symbol. The use of
-Symbol to signal `std` or `url` is compact and efficient for known alphabets and
-makes those selections distinct from the provision of a custom String alphabet
-in the future.
+No custom alphabets are supported because their use is rare, however support
+for custom alphabets could be added in the future by allowing for a 64 byte
+String or Bytes value to be passed as the alphabet instead of a Symbol. The use
+of Symbol to signal `std` or `url` is compact and efficient for known alphabets
+and makes those selections distinct from the provision of a custom String
+alphabet in the future.
#### Webauthn / Passkeys
@@ -163,12 +164,13 @@ to encode the value it expects to compare with the encoded value found in the
JSON message.
For example, a client data JSON:
+
```json
{
- "type":"webauthn.get",
- "challenge":"hJHFvaaoU7qkcH9kML46shLL_btpYGCA6ty3ie0M1Qw",
- "origin":"http://localhost:4507",
- "crossOrigin":false
+ "type": "webauthn.get",
+ "challenge": "hJHFvaaoU7qkcH9kML46shLL_btpYGCA6ty3ie0M1Qw",
+ "origin": "http://localhost:4507",
+ "crossOrigin": false
}
```
diff --git a/core/cap-0053.md b/core/cap-0053.md
index 3a78a2b1f..c91add8dd 100644
--- a/core/cap-0053.md
+++ b/core/cap-0053.md
@@ -15,11 +15,13 @@ Protocol version: 21
## Simple Summary
-Allow extending the Time To Live (TTL) for contract instance and contract code with separate Soroban smart contract host functions.
+Allow extending the Time To Live (TTL) for contract instance and contract code
+with separate Soroban smart contract host functions.
## Working Group
-This change was authored by Tommaso De Ponti, with input from the consulted individuals mentioned at the top of this document.
+This change was authored by Tommaso De Ponti, with input from the consulted
+individuals mentioned at the top of this document.
#### Semantic protocol changes
@@ -27,30 +29,49 @@ Adding two Soroban smart contract host functions.
## Motivation
-Currently, Soroban smart contract system has a host function, `extend_contract_instance_and_code_ttl`, that extends the [TTL](cap-0046-12.md) of both contract instance and contract code ledger entries by the same value. In decentralized contracts, the contract can extend its own lifetime from within the code with certain thresholds with the idea that the cost is distributed among its users.
-
-Extending the TTL for contract code entries are very expensive due to the large binary sizes that occupy the ledger. There are numerous situations where a contract code entry is referenced by multiple contract instances. Thus allowing to extend them separately would enable implementing a more efficient lifetime extension logic.
-
-For example, a liquidity pool contract can be used by thousands of actively bumped contract instances. The contract instance of a single pool contract can be bumped by the users of that contract, but the contract code entry can be bumped by the users of all the pool contracts. So, when extending the lifetime of a contract instance, extending the lifetime of contract code separately and slightly less would make up for a better distribution of the fees across the network.
+Currently, Soroban smart contract system has a host function,
+`extend_contract_instance_and_code_ttl`, that extends the [TTL](cap-0046-12.md)
+of both contract instance and contract code ledger entries by the same value.
+In decentralized contracts, the contract can extend its own lifetime from
+within the code with certain thresholds with the idea that the cost is
+distributed among its users.
+
+Extending the TTL for contract code entries are very expensive due to the large
+binary sizes that occupy the ledger. There are numerous situations where a
+contract code entry is referenced by multiple contract instances. Thus allowing
+to extend them separately would enable implementing a more efficient lifetime
+extension logic.
+
+For example, a liquidity pool contract can be used by thousands of actively
+bumped contract instances. The contract instance of a single pool contract can
+be bumped by the users of that contract, but the contract code entry can be
+bumped by the users of all the pool contracts. So, when extending the lifetime
+of a contract instance, extending the lifetime of contract code separately and
+slightly less would make up for a better distribution of the fees across the
+network.
### Goals Alignment
This CAP is aligned with the following Stellar Network Goals:
- - The Stellar Network should make it easy for developers of Stellar projects to create highly
- usable products.
+- The Stellar Network should make it easy for developers of Stellar projects to
+ create highly usable products.
## Abstract
-This CAP introduces these two new Soroban smart contract host functions to bump the TTL of contract code and instance:
+This CAP introduces these two new Soroban smart contract host functions to bump
+the TTL of contract code and instance:
+
1. `extend_contract_instance_ttl` to extend the contract instance's TTL
2. `extend_contract_code_ttl` to extend the contract code's TTL
## Specification
-Two new functions, `extend_contract_instance_ttl` and `extend_contract_code_ttl`, with export names `c` and `d` in module `l` are added to the Soroban environment's exported interface.
+Two new functions, `extend_contract_instance_ttl` and
+`extend_contract_code_ttl`, with export names `c` and `d` in module `l` are
+added to the Soroban environment's exported interface.
-They both accept a contract, threshold, and extend_to as input arguments. The
+They both accept a contract, threshold, and extend_to as input arguments. The
functions extend the TTL and don't return anything.
The `contract` parameter is an `AddressObject` that is the contract's address.
@@ -58,6 +79,7 @@ The `contract` parameter is an `AddressObject` that is the contract's address.
The `threshold` and `extend_to` parameters are `U32Val` type.
The `env.json` in `rs-soroban-env` will be modified as so:
+
```
{
"export": "c",
@@ -105,8 +127,13 @@ The `env.json` in `rs-soroban-env` will be modified as so:
## Test Cases
-Unit tests will have to be written to test the extension of TTL separately for contract code and contract instance.
+Unit tests will have to be written to test the extension of TTL separately for
+contract code and contract instance.
## Implementation
-The host functions and other changes need to be implemented in [rs-soroban-env](https://github.com/stellar/rs-soroban-env). Here is a [draft PR](https://github.com/stellar/rs-soroban-env/pull/1355) with the env changes. There will be a corresponding change to [soroban-sdk](https://github.com/stellar/rs-soroban-sdk) repo as well.
+The host functions and other changes need to be implemented in
+[rs-soroban-env](https://github.com/stellar/rs-soroban-env). Here is a
+[draft PR](https://github.com/stellar/rs-soroban-env/pull/1355) with the env
+changes. There will be a corresponding change to
+[soroban-sdk](https://github.com/stellar/rs-soroban-sdk) repo as well.
diff --git a/core/cap-0054.md b/core/cap-0054.md
index 9c5c8c76a..ec484d617 100644
--- a/core/cap-0054.md
+++ b/core/cap-0054.md
@@ -15,7 +15,9 @@ Protocol version: 21
## Simple Summary
-Lower total costs by refining the Soroban cost model used for VM instantiation into multiple separate and more-accurate costs. Also prepare for subsequent work lowering certain of these separate costs.
+Lower total costs by refining the Soroban cost model used for VM instantiation
+into multiple separate and more-accurate costs. Also prepare for subsequent
+work lowering certain of these separate costs.
## Working Group
@@ -23,31 +25,46 @@ As specified in the Preamble.
## Motivation
-To lower the CPU cost charged for each contract invocation transaction thereby admitting more such transactions per ledger and increasing total throughput.
+To lower the CPU cost charged for each contract invocation transaction thereby
+admitting more such transactions per ledger and increasing total throughput.
### Goals Alignment
-This change is aligned with the goal of lowering the cost and increasing the scale of the network.
+This change is aligned with the goal of lowering the cost and increasing the
+scale of the network.
## Abstract
-As of protocol 20 the model CPU cost (which translates to fees) charged for most contract executions is overwhelmingly dominated by the "VM instantiation" cost model.
+As of protocol 20 the model CPU cost (which translates to fees) charged for
+most contract executions is overwhelmingly dominated by the "VM instantiation"
+cost model.
-This cost model takes the byte-size of a WASM contract as input and outputs a pessimistic cost based on worst-case assumptions about the possible meaning of each byte in the contract code.
+This cost model takes the byte-size of a WASM contract as input and outputs a
+pessimistic cost based on worst-case assumptions about the possible meaning of
+each byte in the contract code.
-A more refined model will enable a tigher bound on costs, essentially charging something closer to the "real" cost rather than the pessimistic assumption.
+A more refined model will enable a tigher bound on costs, essentially charging
+something closer to the "real" cost rather than the pessimistic assumption.
## Specification
There are three parts to this work:
- 1. On contract upload, the initial contract parse and validation pass will initially use the old cost model, but the host will then analyze the contract and extract refined cost-input values (i.e. it will count the number of functions, imports, exports, data segment sizes and so forth).
- 2. This new refined cost-input information will be saved into the ledger along with the uploaded WASM bytecode, so that _subsequent_ instantiations can use a refined cost model.
- 3. When instantiating a contract with saved refined cost inputs, the refined cost model will be charged. This will also allow the cost model to properly _reflect_ changes in subsequent CAPs that lower the actual amount of work done during VM instantiation.
+1. On contract upload, the initial contract parse and validation pass will
+ initially use the old cost model, but the host will then analyze the
+ contract and extract refined cost-input values (i.e. it will count the
+ number of functions, imports, exports, data segment sizes and so forth).
+2. This new refined cost-input information will be saved into the ledger along
+ with the uploaded WASM bytecode, so that _subsequent_ instantiations can use
+ a refined cost model.
+3. When instantiating a contract with saved refined cost inputs, the refined
+ cost model will be charged. This will also allow the cost model to properly
+ _reflect_ changes in subsequent CAPs that lower the actual amount of work
+ done during VM instantiation.
### XDR changes
-~~~
+```
diff --git a/Stellar-contract-config-setting.x b/Stellar-contract-config-setting.x
index 6b50747..d066029 100644
--- a/Stellar-contract-config-setting.x
@@ -101,7 +118,7 @@ index 6b50747..d066029 100644
+ // Cost of instantiating a known number of memory pages.
+ InstantiateWasmMemoryPages = 42
};
-
+
struct ContractCostParamEntry {
diff --git a/Stellar-ledger-entries.x b/Stellar-ledger-entries.x
index 8a8784e..ff50201 100644
@@ -110,7 +127,7 @@ index 8a8784e..ff50201 100644
@@ -508,8 +508,31 @@ struct ContractDataEntry {
SCVal val;
};
-
+
+struct ContractCodeCostInputs {
+ uint32 nInstructions;
+ uint32 nFunctions;
@@ -137,59 +154,112 @@ index 8a8784e..ff50201 100644
+ ContractCodeCostInputs costInputs;
+ } v1;
+ } ext;
-
+
Hash hash;
opaque code<>;
-~~~
+```
### Semantics
The change consists of two logical changes:
- 1. New content stored in `ContractCodeEntry` ledger entries. These are arranged in a new struct `ContractCodeCostInputs` which is added at the existing `ExtensionPoint` of `ContractCodeEntry`, and encode counts of various aspects of the parsed Wasm body of the contract that we intend to feed into subsequent cost models when instantiating the contract.
- 2. New cost types added to the enum `ContractCostType`. There are two new cost types for each field in the `ContractCodeCostInputs`, one for the cost of parsing a module and one for the cost of instantiating an already-parsed module.
+1. New content stored in `ContractCodeEntry` ledger entries. These are arranged
+ in a new struct `ContractCodeCostInputs` which is added at the existing
+ `ExtensionPoint` of `ContractCodeEntry`, and encode counts of various
+ aspects of the parsed Wasm body of the contract that we intend to feed into
+ subsequent cost models when instantiating the contract.
+2. New cost types added to the enum `ContractCostType`. There are two new cost
+ types for each field in the `ContractCodeCostInputs`, one for the cost of
+ parsing a module and one for the cost of instantiating an already-parsed
+ module.
The way these are intended to be used is as follows:
- - When a new contract is uploaded, it is initially parsed with the old `VmInstantiation` cost model using the contract byte size, in order to check the contract's validity. This already happens today.
- - Next, new code supporting this CAP performs an additional pass is made over the Wasm module extracting numbers for the new `ContractCodeCostInputs`, which is then stored in the new `ContractCodeEntry` ledger entry.
- - When a contract is instantiated, the host checks the `ext` field:
- - If the contract has `ContractCodeCostInputs` then new code supporting this CAP charges the instantiation each of the new `ContractCostType`s using the corresponding `ContractCodeCostInputs` as inputs.
- - Otherwise the instantiation is charged the old `VmInstantiation` cost model using the contract byte size, as happens today.
-
-Additionally, to enable gradual upgrades to existing contracts, the semantics of uploading a contract that _already exists_ are altered:
-
- - Before this change, uploading an existing contract has no effect.
- - With this change, uploading an existing contract will check to see if the existing contract has `ContractCodeInputs`, and if not will add them and write the contract back to storage with the new inputs. Since the Wasm bytecode of the contract does not change (its identity is actually the hash of its bytecode) there is no risk to allowing this operation: it does not change the contract's meaning, only how it is charged during instantiation.
+- When a new contract is uploaded, it is initially parsed with the old
+ `VmInstantiation` cost model using the contract byte size, in order to check
+ the contract's validity. This already happens today.
+- Next, new code supporting this CAP performs an additional pass is made over
+ the Wasm module extracting numbers for the new `ContractCodeCostInputs`,
+ which is then stored in the new `ContractCodeEntry` ledger entry.
+- When a contract is instantiated, the host checks the `ext` field:
+ - If the contract has `ContractCodeCostInputs` then new code supporting this
+ CAP charges the instantiation each of the new `ContractCostType`s using the
+ corresponding `ContractCodeCostInputs` as inputs.
+ - Otherwise the instantiation is charged the old `VmInstantiation` cost model
+ using the contract byte size, as happens today.
+
+Additionally, to enable gradual upgrades to existing contracts, the semantics
+of uploading a contract that _already exists_ are altered:
+
+- Before this change, uploading an existing contract has no effect.
+- With this change, uploading an existing contract will check to see if the
+ existing contract has `ContractCodeInputs`, and if not will add them and
+ write the contract back to storage with the new inputs. Since the Wasm
+ bytecode of the contract does not change (its identity is actually the hash
+ of its bytecode) there is no risk to allowing this operation: it does not
+ change the contract's meaning, only how it is charged during instantiation.
## Design Rationale
-Tightening the cost model to a closer approximation of reality requires two things:
+Tightening the cost model to a closer approximation of reality requires two
+things:
- - Having multiple inputs that more precisely characterize the content of the Wasm module, rather than just its byte size.
- - Having multiple cost models, one for each such input, that more precisely characterize the cost of each type of work that a Wasm module might incur depending on its contents.
+- Having multiple inputs that more precisely characterize the content of the
+ Wasm module, rather than just its byte size.
+- Having multiple cost models, one for each such input, that more precisely
+ characterize the cost of each type of work that a Wasm module might incur
+ depending on its contents.
-Additionally, while currently the `VmInstantiation` cost type charges the cost of parsing, validating and instantiating a given Wasm module all together, we anticipate (and have already implemented) subsequent CAPs will support splitting the parsing and validation stages off of the instantiation stage, in order to support caching modules. We therefore split all the cost types introduced in this CAP in two, one for the parsing and validation stage and one for the instantiation stage.
+Additionally, while currently the `VmInstantiation` cost type charges the cost
+of parsing, validating and instantiating a given Wasm module all together, we
+anticipate (and have already implemented) subsequent CAPs will support
+splitting the parsing and validation stages off of the instantiation stage, in
+order to support caching modules. We therefore split all the cost types
+introduced in this CAP in two, one for the parsing and validation stage and one
+for the instantiation stage.
## Protocol Upgrade Transition
### Backwards Incompatibilities
-The change is broadly backward compatible (new software can continue to process old data).
+The change is broadly backward compatible (new software can continue to process
+old data).
-The change adds new fields and new enumeration values, so it is not forward compatible (old software must be upgraded to accept the new data).
+The change adds new fields and new enumeration values, so it is not forward
+compatible (old software must be upgraded to accept the new data).
-There is a minor risk that by adding the approximately 40 bytes of new data to `ContractCode` ledger entries, an existing entry that fits within the network limit `CONFIG_SETTING_CONTRACT_MAX_SIZE_BYTES` will no longer fit (for example if the contract was already almost-exactly the size of the limit). At the time of writing there are no contracts on the public Stellar network that are within that range of the current network limits, and if such a contract occurs in practice the network limit can always be raised subsequently.
+There is a minor risk that by adding the approximately 40 bytes of new data to
+`ContractCode` ledger entries, an existing entry that fits within the network
+limit `CONFIG_SETTING_CONTRACT_MAX_SIZE_BYTES` will no longer fit (for example
+if the contract was already almost-exactly the size of the limit). At the time
+of writing there are no contracts on the public Stellar network that are within
+that range of the current network limits, and if such a contract occurs in
+practice the network limit can always be raised subsequently.
### Resource Utilization
-The change will add a small additional amount of storage on each contract ledger entry, as well as incurring a small additional amount of work to characterize the contract's code once, on upload (roughly double the CPU cost for an upload, due to parsing the code twice). While we could attempt to minimize this by more invasive changes to wasmi, it seems likely that the cost of an upload is dominated by its storage costs, and in any case uploads are expected to be much less frequent than invocations.
+The change will add a small additional amount of storage on each contract
+ledger entry, as well as incurring a small additional amount of work to
+characterize the contract's code once, on upload (roughly double the CPU cost
+for an upload, due to parsing the code twice). While we could attempt to
+minimize this by more invasive changes to wasmi, it seems likely that the cost
+of an upload is dominated by its storage costs, and in any case uploads are
+expected to be much less frequent than invocations.
-In general this change is aimed at reducing unnecessary costs, so should provide room to add more transactions to a ledger, which will increase overall resource utilization (intentionally).
+In general this change is aimed at reducing unnecessary costs, so should
+provide room to add more transactions to a ledger, which will increase overall
+resource utilization (intentionally).
## Security Concerns
-The main security risk is that the new cost model might undercount something, allowing malicious contracts to DoS validators by submitting expensive-to-instantiate contracts that the validators incorrectly assume are cheap-to-instantiate. We have attempted to minimize this in the implementation (eg. by prohibiting certain types of input that never occur in normal benign contracts) and to some extent this risk exists in today's coarse `VmInstantiation` cost model too, the risk is just elevated the tighter we make the cost model.
+The main security risk is that the new cost model might undercount something,
+allowing malicious contracts to DoS validators by submitting
+expensive-to-instantiate contracts that the validators incorrectly assume are
+cheap-to-instantiate. We have attempted to minimize this in the implementation
+(eg. by prohibiting certain types of input that never occur in normal benign
+contracts) and to some extent this risk exists in today's coarse
+`VmInstantiation` cost model too, the risk is just elevated the tighter we make
+the cost model.
## Test Cases
@@ -197,4 +267,5 @@ TBD
## Implementation
-A preliminary implementation is [underway in the soroban-env-host repository](https://github.com/stellar/rs-soroban-env/pull/1359)
+A preliminary implementation is
+[underway in the soroban-env-host repository](https://github.com/stellar/rs-soroban-env/pull/1359)
diff --git a/core/cap-0055.md b/core/cap-0055.md
index ba3e7fbe0..0ff40bf88 100644
--- a/core/cap-0055.md
+++ b/core/cap-0055.md
@@ -13,7 +13,8 @@ Protocol version: 21
## Simple Summary
-Lower total costs by linking fewer host functions during VM instantiation in Soroban.
+Lower total costs by linking fewer host functions during VM instantiation in
+Soroban.
## Working Group
@@ -21,23 +22,38 @@ As specified in the Preamble.
## Motivation
-To lower the CPU cost charged for each contract invocation transaction thereby admitting more such transactions per ledger and increasing total throughput.
+To lower the CPU cost charged for each contract invocation transaction thereby
+admitting more such transactions per ledger and increasing total throughput.
### Goals Alignment
-This change is aligned with the goal of lowering the cost and increasing the scale of the network.
+This change is aligned with the goal of lowering the cost and increasing the
+scale of the network.
## Abstract
-[CAP-0054](./cap-0054.md) introduces a refined cost model for VM instantiation. This in turn enables a possible optimization to the VM instantiation process: linking fewer host functions.
+[CAP-0054](./cap-0054.md) introduces a refined cost model for VM instantiation.
+This in turn enables a possible optimization to the VM instantiation process:
+linking fewer host functions.
## Specification
-The VM instantiation process adds a set of host functions into the VM's "linker" such that they are available for importing into a contract. There are over 100 host functions that can be added and at present they are all added for every contract. During profiling it has been observed that the set of host functions added to the linker is actually a source of significant cost.
+The VM instantiation process adds a set of host functions into the VM's
+"linker" such that they are available for importing into a contract. There are
+over 100 host functions that can be added and at present they are all added for
+every contract. During profiling it has been observed that the set of host
+functions added to the linker is actually a source of significant cost.
-This CAP therefore proposes limiting the set of host functions added to only those that are actually mentioned as imports in the contract. This set can easily be observed before the loop that adds functions to the linker, and doing so brings an immediate performance improvement.
+This CAP therefore proposes limiting the set of host functions added to only
+those that are actually mentioned as imports in the contract. This set can
+easily be observed before the loop that adds functions to the linker, and doing
+so brings an immediate performance improvement.
-This optimization is small and simple and without drawbacks. However, it depends on CAP-0054 in order to be reflected in a refined cost model, and therefore to influence the actual throughput of the system in terms of the _cost model_. So it is effectively a protocol change (or at least must co-occur with a protocol change).
+This optimization is small and simple and without drawbacks. However, it
+depends on CAP-0054 in order to be reflected in a refined cost model, and
+therefore to influence the actual throughput of the system in terms of the
+_cost model_. So it is effectively a protocol change (or at least must co-occur
+with a protocol change).
### XDR changes
@@ -45,28 +61,42 @@ There are no XDR changes beyond those proposed in CAP-0054.
### Semantics
- - During parsing the host records which host functions are imported.
- - Only those functions that are imported are added to the linker.
- - As a result, less work is done in the host.
- - The performance benefit of doing less real work may or may not result in higher transaction throughput:
- - If the contract has a new refined cost model (as defined in CAP-0054), the new cost model will have terms reflecting the real number of imports, so the contract will incur lower model resources, allowing more transactions to fit in the same model resource budget for the enclosing transaction set.
- - Otherwise the old cost model is used, and the performance benefit is limited to real costs rather than those in the resource model: the transaction still completes more quickly than before, but no additional transactions will be admitted to the transaction set because the model does not yet reflect the lower real costs.
+- During parsing the host records which host functions are imported.
+- Only those functions that are imported are added to the linker.
+- As a result, less work is done in the host.
+- The performance benefit of doing less real work may or may not result in
+ higher transaction throughput:
+ - If the contract has a new refined cost model (as defined in CAP-0054), the
+ new cost model will have terms reflecting the real number of imports, so
+ the contract will incur lower model resources, allowing more transactions
+ to fit in the same model resource budget for the enclosing transaction set.
+ - Otherwise the old cost model is used, and the performance benefit is
+ limited to real costs rather than those in the resource model: the
+ transaction still completes more quickly than before, but no additional
+ transactions will be admitted to the transaction set because the model does
+ not yet reflect the lower real costs.
## Design Rationale
-This is a simple change to do less work, the rationale should be self-explanatory.
+This is a simple change to do less work, the rationale should be
+self-explanatory.
## Protocol Upgrade Transition
### Backwards Incompatibilities
-The change is broadly backward compatible (new software can continue to process old data).
+The change is broadly backward compatible (new software can continue to process
+old data).
-The only difference is that the same contracts will, if run under the new cost model of CAP-0054, incur lower costs. The change will therefore only be meaningful to end-users if part of that protocol change. It will be useful to validator-operators in any case.
+The only difference is that the same contracts will, if run under the new cost
+model of CAP-0054, incur lower costs. The change will therefore only be
+meaningful to end-users if part of that protocol change. It will be useful to
+validator-operators in any case.
### Resource Utilization
-The change strictly lowers real resource usage, though it might not lower model resource usage until contracts are upgraded.
+The change strictly lowers real resource usage, though it might not lower model
+resource usage until contracts are upgraded.
## Security Concerns
@@ -78,4 +108,5 @@ TBD
## Implementation
-A preliminary implementation is [underway in the soroban-env-host repository](https://github.com/stellar/rs-soroban-env/pull/1359)
+A preliminary implementation is
+[underway in the soroban-env-host repository](https://github.com/stellar/rs-soroban-env/pull/1359)
diff --git a/core/cap-0056.md b/core/cap-0056.md
index 896e15337..a61feb03c 100644
--- a/core/cap-0056.md
+++ b/core/cap-0056.md
@@ -21,25 +21,44 @@ As specified in the Preamble.
## Motivation
-To lower the CPU cost charged for each contract invocation transaction thereby admitting more such transactions per ledger and increasing total throughput.
+To lower the CPU cost charged for each contract invocation transaction thereby
+admitting more such transactions per ledger and increasing total throughput.
### Goals Alignment
-This change is aligned with the goal of lowering the cost and increasing the scale of the network.
+This change is aligned with the goal of lowering the cost and increasing the
+scale of the network.
## Abstract
-Soroban transactions may invoke the same contract more than once in a transaction, for example by making multiple calls to different methods on the same token contract. Currently each such cross-contract call re-parses the called contract as part of instantiating it. This CAP proposes adding a module cache to the Soroban host, such that contracts are only parsed once per transaction.
+Soroban transactions may invoke the same contract more than once in a
+transaction, for example by making multiple calls to different methods on the
+same token contract. Currently each such cross-contract call re-parses the
+called contract as part of instantiating it. This CAP proposes adding a module
+cache to the Soroban host, such that contracts are only parsed once per
+transaction.
## Specification
-Each Soroban transaction typically consists of a tree of contract invocations. Each contract in that tree of invocations must be parsed and instantiated at least once, but if the tree calls the same contract more than once within a transaction, there is an easy opportunity for a performance improvement by _not_ parsing the contract repeatedly for each call, but reusing a cached copy.
-
-To do this requires both caching parsed contracts, as well as separately charging for parsing (which can be done one per module per transaction) and later phases of instantiation (which must be repeated per invocation). This separation existed in the previous (unused) distinction between the `VmInstantiation` and `VmCachedInstantiation` cost types, and is preserved by the separation of new cost types in CAP-0054. This CAP can be accepted with or without CAP-0054, but the split in cost-types in CAP-0054 between parsing and instantiation is motivated by this CAP.
+Each Soroban transaction typically consists of a tree of contract invocations.
+Each contract in that tree of invocations must be parsed and instantiated at
+least once, but if the tree calls the same contract more than once within a
+transaction, there is an easy opportunity for a performance improvement by
+_not_ parsing the contract repeatedly for each call, but reusing a cached copy.
+
+To do this requires both caching parsed contracts, as well as separately
+charging for parsing (which can be done one per module per transaction) and
+later phases of instantiation (which must be repeated per invocation). This
+separation existed in the previous (unused) distinction between the
+`VmInstantiation` and `VmCachedInstantiation` cost types, and is preserved by
+the separation of new cost types in CAP-0054. This CAP can be accepted with or
+without CAP-0054, but the split in cost-types in CAP-0054 between parsing and
+instantiation is motivated by this CAP.
### XDR changes
-There are no XDR changes beyond those proposed in CAP-0054, which as mentioned above are actually optional too. This CAP works with or without it.
+There are no XDR changes beyond those proposed in CAP-0054, which as mentioned
+above are actually optional too. This CAP works with or without it.
### Semantics
@@ -48,52 +67,97 @@ There are no XDR changes beyond those proposed in CAP-0054, which as mentioned a
This section describes the change to observable behaviour.
- When a Soroban transaction is constructed, before it begins executing:
- - For each `ContractCode` ledger entry mentioned in the read footprint of the transaction:
- - If CAP-0054 is accepted **and** the ledger entry contains the new CAP-0054 refined cost model input types:
- - The transaction is charged the new refined `ParseWasm*` module-parsing cost models with the new refined cost model inputs
- - Else:
- - The transaction is charged the old `VmInstantiation` cost type, which will have been recalibrated by this change to only cover the cost of parsing the `ContractCode`'s Wasm module, not fully instantiating it.
-- When a Soroban transaction performs an _invocation_ on some contract implemented by some Wasm module:
- - If CAP-0054 is accepted **and** the ledger entry contains the new CAP-0054 refined cost model input types:
- - The transaction is charged the new refined `InstantiateWasm*` module-instantiating cost models with the new refined cost model inputs
+ - For each `ContractCode` ledger entry mentioned in the read footprint of the
+ transaction:
+ - If CAP-0054 is accepted **and** the ledger entry contains the new
+ CAP-0054 refined cost model input types:
+ - The transaction is charged the new refined `ParseWasm*` module-parsing
+ cost models with the new refined cost model inputs
- Else:
- - The transaction is charged the old `VmCachedInstantiation` cost type, which will have been recalibrated by this change to only cover the cost of instantiating an already-parsed module.
+ - The transaction is charged the old `VmInstantiation` cost type, which
+ will have been recalibrated by this change to only cover the cost of
+ parsing the `ContractCode`'s Wasm module, not fully instantiating it.
+- When a Soroban transaction performs an _invocation_ on some contract
+ implemented by some Wasm module:
+ - If CAP-0054 is accepted **and** the ledger entry contains the new CAP-0054
+ refined cost model input types:
+ - The transaction is charged the new refined `InstantiateWasm*`
+ module-instantiating cost models with the new refined cost model inputs
+ - Else:
+ - The transaction is charged the old `VmCachedInstantiation` cost type,
+ which will have been recalibrated by this change to only cover the cost
+ of instantiating an already-parsed module.
#### Informative
-This section explains the implementation. It is intended for illustration purposes, and other possible impementations that achieve the same observable normative behaviour are possible.
-
-- When a Soroban `Host` is constructed for a transaction, it contains a new module cache that will hold parsed Wasm modules.
-- The module cache is then, before any modules are executed, unconditionally populated with a parsed Wasm module for every contract mentioned in the transaction's read footprint.
- - When parsing a module for caching, it will be charged to either the cost model of the old `VmInstantiation` cost type, or the new refined cost models for the CAP-0054 `ParseWasm*` cost types.
-- As invocations require instantiation of parsed modules, pre-parsed modules will be extracted from the cache and instantiated.
- - When instantiating a cached module, it will be charged to either the cost modelof the old `VmCachedInstantiation` cost type, or the new refined cost models for the CAP-0054 `InstantiateWasm*` cost types.
+This section explains the implementation. It is intended for illustration
+purposes, and other possible impementations that achieve the same observable
+normative behaviour are possible.
+
+- When a Soroban `Host` is constructed for a transaction, it contains a new
+ module cache that will hold parsed Wasm modules.
+- The module cache is then, before any modules are executed, unconditionally
+ populated with a parsed Wasm module for every contract mentioned in the
+ transaction's read footprint.
+ - When parsing a module for caching, it will be charged to either the cost
+ model of the old `VmInstantiation` cost type, or the new refined cost
+ models for the CAP-0054 `ParseWasm*` cost types.
+- As invocations require instantiation of parsed modules, pre-parsed modules
+ will be extracted from the cache and instantiated.
+ - When instantiating a cached module, it will be charged to either the cost
+ modelof the old `VmCachedInstantiation` cost type, or the new refined cost
+ models for the CAP-0054 `InstantiateWasm*` cost types.
## Design Rationale
For the most part the design is straightforward: add a cache and use it.
-The only subtle rationale is the need for eager instantiation. Due to particularities of the way the Wasm VM in Soroban works -- the VM's "engine" is locked during execution -- the cache must be fully populated before any execution begins; caching cannot happen "on demand" as a contract runs.
+The only subtle rationale is the need for eager instantiation. Due to
+particularities of the way the Wasm VM in Soroban works -- the VM's "engine" is
+locked during execution -- the cache must be fully populated before any
+execution begins; caching cannot happen "on demand" as a contract runs.
-As a result, the set of cached contracts will depend solely on the read footprint, not any further deviation from the footprint in the contract's execution: every contract present in the read footprint of a transaction will be parsed and cached eagerly when initializing the transaction.
+As a result, the set of cached contracts will depend solely on the read
+footprint, not any further deviation from the footprint in the contract's
+execution: every contract present in the read footprint of a transaction will
+be parsed and cached eagerly when initializing the transaction.
-Eager cache population is also the most likely structure to be compatible with future extensions of this work to include caching modules across transactions, or even across ledgers. However, such future work requires more complex fee and transaction-queueing logic, and is out of scope for the current proposed change.
+Eager cache population is also the most likely structure to be compatible with
+future extensions of this work to include caching modules across transactions,
+or even across ledgers. However, such future work requires more complex fee and
+transaction-queueing logic, and is out of scope for the current proposed
+change.
## Protocol Upgrade Transition
### Backwards Incompatibilities
-The change is broadly backward compatible (new software can continue to process old data).
+The change is broadly backward compatible (new software can continue to process
+old data).
-The change will charge nonzero costs to a cost type that, before the change, sees only zero costs. But users and operators should not be relying on those costs to be zero.
+The change will charge nonzero costs to a cost type that, before the change,
+sees only zero costs. But users and operators should not be relying on those
+costs to be zero.
### Resource Utilization
-While it is possible to construct a transaction that is charged more with the eager parsing in this CAP than it would with lazy parsing on today's network, such a transaction is quite contrived: it would require contract A to call contract B during simulation (when the footprint is recorded and initial fee is estimated) and then _change its decision_ when executing on-chain for real and _not_ call contract B, typically just moments after the simulation that recorded its intent to call B.
+While it is possible to construct a transaction that is charged more with the
+eager parsing in this CAP than it would with lazy parsing on today's network,
+such a transaction is quite contrived: it would require contract A to call
+contract B during simulation (when the footprint is recorded and initial fee is
+estimated) and then _change its decision_ when executing on-chain for real and
+_not_ call contract B, typically just moments after the simulation that
+recorded its intent to call B.
-This type of transaction seems sufficiently unlikely to occur that we think it can be discounted, especially given that the only penalty for it occurring would not be a transaction failure but merely a slightly higher-than-necessary fee being charged: the fee returned from simulation, rather than a lower one that (somehow) anticipated the transaction's changed decision.
+This type of transaction seems sufficiently unlikely to occur that we think it
+can be discounted, especially given that the only penalty for it occurring
+would not be a transaction failure but merely a slightly higher-than-necessary
+fee being charged: the fee returned from simulation, rather than a lower one
+that (somehow) anticipated the transaction's changed decision.
-We expect in practice that all transactions will be charged either the same as they are before the change (if there are no cache hits) or significantly less (if there are cache hits).
+We expect in practice that all transactions will be charged either the same as
+they are before the change (if there are no cache hits) or significantly less
+(if there are cache hits).
## Security Concerns
@@ -105,4 +169,5 @@ TBD
## Implementation
-A preliminary implementation is [underway in the soroban-env-host repository](https://github.com/stellar/rs-soroban-env/pull/1359)
+A preliminary implementation is
+[underway in the soroban-env-host repository](https://github.com/stellar/rs-soroban-env/pull/1359)
diff --git a/core/cap-0057.md b/core/cap-0057.md
index 83ef2405b..512fb3aaf 100644
--- a/core/cap-0057.md
+++ b/core/cap-0057.md
@@ -13,7 +13,8 @@ Protocol version: 23
## Simple Summary
-This proposal allows the network to evict `PERSISTENT` entries, i.e., delete archived `PERSISTENT` entries from validators.
+This proposal allows the network to evict `PERSISTENT` entries, i.e., delete
+archived `PERSISTENT` entries from validators.
## Working Group
@@ -21,29 +22,40 @@ As specified in the Preamble.
## Motivation
-To lower the storage requirements of validators and decrease the growth of History Archives.
+To lower the storage requirements of validators and decrease the growth of
+History Archives.
### Goals Alignment
-This change is aligned with the goal of lowering the cost and increasing the scale of the network.
+This change is aligned with the goal of lowering the cost and increasing the
+scale of the network.
## Abstract
-Whenever a `PERSISTENT` entry is evicted or deleted by a transaction (as in deleted as part of TX execution), it will be removed from
-the “Live State BucketList” (today called the BucketList) and added to the “Archived State Tree” (AST). The AST is made up of a collection
-of immutable Merkle trees stored on top of the BucketList, called “Archival Snapshots”, plus a single mutable store called the
-“Hot Archive”.
-
-At any given point, all validators store the Hot Archive on disk. Whenever a `PERSISTENT` entry is evicted, it is added to the
-Hot Archive. Eventually, the Hot Archive becomes full. At this point, the Hot Archive is “snapshotted” and converted into an immutable
-Archival Snapshot. Validators retain only the Merkle root of the newly created Archival Snapshot and delete the rest of the snapshot.
-The validators then initialize a new, empty Hot Archive and repeat the process.
-
-While validators only store the Merkle root of each Archival Snapshot, the complete Archival Snapshots are persisted in the History
-Archive. RPC nodes may locally store as few or as many of these snapshots as the operator desires, initialized directly via History
-Archive files (RPC providers may also shard snapshots across multiple nodes such that no one RPC node is required to store all
-snapshots). During preflight, RPC will use these local snapshots to attach Merkle style proofs for any archived entry encountered during preflight. These proofs can then be submitted via the `RestoreFootprintOp` to allow the entries to be used again and become part of the
-Live State BucketList.
+Whenever a `PERSISTENT` entry is evicted or deleted by a transaction (as in
+deleted as part of TX execution), it will be removed from the “Live State
+BucketList” (today called the BucketList) and added to the “Archived State
+Tree” (AST). The AST is made up of a collection of immutable Merkle trees
+stored on top of the BucketList, called “Archival Snapshots”, plus a single
+mutable store called the “Hot Archive”.
+
+At any given point, all validators store the Hot Archive on disk. Whenever a
+`PERSISTENT` entry is evicted, it is added to the Hot Archive. Eventually, the
+Hot Archive becomes full. At this point, the Hot Archive is “snapshotted” and
+converted into an immutable Archival Snapshot. Validators retain only the
+Merkle root of the newly created Archival Snapshot and delete the rest of the
+snapshot. The validators then initialize a new, empty Hot Archive and repeat
+the process.
+
+While validators only store the Merkle root of each Archival Snapshot, the
+complete Archival Snapshots are persisted in the History Archive. RPC nodes may
+locally store as few or as many of these snapshots as the operator desires,
+initialized directly via History Archive files (RPC providers may also shard
+snapshots across multiple nodes such that no one RPC node is required to store
+all snapshots). During preflight, RPC will use these local snapshots to attach
+Merkle style proofs for any archived entry encountered during preflight. These
+proofs can then be submitted via the `RestoreFootprintOp` to allow the entries
+to be used again and become part of the Live State BucketList.
## Specification
@@ -325,7 +337,7 @@ struct SorobanTransactionData
SorobanResources resources;
// Amount of the transaction `fee` allocated to the Soroban resource fees.
- // The fraction of `resourceFee` corresponding to `resources` specified
+ // The fraction of `resourceFee` corresponding to `resources` specified
// above is *not* refundable (i.e. fees for instructions, ledger I/O), as
// well as fees for the transaction size.
// The remaining part of the fee is refundable and the charged value is
@@ -394,27 +406,30 @@ case LEDGER_ENTRY_RESTORE:
#### Archival State Tree (AST)
-The Archive State Tree (AST) is a collection of immutable Merkle trees whose leaves
-are all archived entries and some `PERSISTENT` entry keys explicitly deleted via transaction
-execution. The AST is a collection of subtrees indexed `AST[0]`, `AST[1]`, …, `AST[N]`.
-The AST index number is called the Archival Epoch. We define the current
-Archival Epoch as N + 1, where N is the index of the most recently completed AST
-subtree.
+The Archive State Tree (AST) is a collection of immutable Merkle trees whose
+leaves are all archived entries and some `PERSISTENT` entry keys explicitly
+deleted via transaction execution. The AST is a collection of subtrees indexed
+`AST[0]`, `AST[1]`, …, `AST[N]`. The AST index number is called the Archival
+Epoch. We define the current Archival Epoch as N + 1, where N is the index of
+the most recently completed AST subtree.
For some Archival Epoch `k`, `AST[k]` is as follows:
-`AST[k]` is a sorted, balanced, binary Merkle tree. When `AST[k]` is initialized, it
-contains two dummy boundary leaves of type `COLD_ARCHIVE_BOUNDARY_LEAF`, one lower bound leaf and
-one upper bound leaf (these boundary leafs are required for proofs-of-nonexistence).
+`AST[k]` is a sorted, balanced, binary Merkle tree. When `AST[k]` is
+initialized, it contains two dummy boundary leaves of type
+`COLD_ARCHIVE_BOUNDARY_LEAF`, one lower bound leaf and one upper bound leaf
+(these boundary leafs are required for proofs-of-nonexistence).
In addition to these boundary leafs, `AST[k]` contains a leaf for
-1. Every `PERSISTENT` entry evicted (but not restored) during archival epoch `k`. These entries
-are stored in `AST[k]` as type `COLD_ARCHIVE_ARCHIVED_LEAF`.
+1. Every `PERSISTENT` entry evicted (but not restored) during archival epoch
+ `k`. These entries are stored in `AST[k]` as type
+ `COLD_ARCHIVE_ARCHIVED_LEAF`.
-2. Every `PERSISTENT` entry explicitly deleted via transaction execution during epoch `k`,
-iff an ARCHIVED entry with given entry's key exists in some subtree AST[i] where i < k.
-These keys are stored in `AST[k]` as type `COLD_ARCHIVE_DELETED_LEAF`.
+2. Every `PERSISTENT` entry explicitly deleted via transaction execution during
+ epoch `k`, iff an ARCHIVED entry with given entry's key exists in some
+ subtree AST[i] where i < k. These keys are stored in `AST[k]` as type
+ `COLD_ARCHIVE_DELETED_LEAF`.
Leaf nodes are sorted as follows:
@@ -446,10 +461,11 @@ cmp(lhs: ColdArchiveBucketEntry, rhs: ColdArchiveBucketEntry): // lhs < rhs
return LedgerKey(lhs) < LedgerKey(rhs) // pre-existing compare function
```
-Internal nodes, including the root node, are all of type `COLD_ARCHIVE_HASH` and constructed as follows:
+Internal nodes, including the root node, are all of type `COLD_ARCHIVE_HASH`
+and constructed as follows:
-Let us define a node's `level`, where leaf nodes have `level == 0`. For each leaf node with
-`index == i`, there is the parent `COLD_ARCHIVE_HASH` node:
+Let us define a node's `level`, where leaf nodes have `level == 0`. For each
+leaf node with `index == i`, there is the parent `COLD_ARCHIVE_HASH` node:
```
COLD_ARCHIVE_HASH {
@@ -459,9 +475,10 @@ COLD_ARCHIVE_HASH {
}
```
-Note that the entire leaf node of type `ColdArchiveBucketEntry` is hashed, not just the underlying
-`LedgerEntry` or `LedgerKey`. We must hash the entire `ColdArchiveBucketEntry` in order to verify
-the index of the entry being proved (a requirement for proofs of nonexistence).
+Note that the entire leaf node of type `ColdArchiveBucketEntry` is hashed, not
+just the underlying `LedgerEntry` or `LedgerKey`. We must hash the entire
+`ColdArchiveBucketEntry` in order to verify the index of the entry being proved
+(a requirement for proofs of nonexistence).
All internal nodes with `level > 1` are constructed as follows:
@@ -483,11 +500,14 @@ InternalNode = COLD_ARCHIVE_HASH {
#### Proof Structure
-In order to protect against "Double Restore" attacks (see Security Concerns), it is necessary to prove that the version of
-the `LedgerEntry` being restored is the newest version that exists in the entire AST. This is accomplished by proving that the
-entry exists in a given subtree, and proving that the key does not exist in every newer subtree.
+In order to protect against "Double Restore" attacks (see Security Concerns),
+it is necessary to prove that the version of the `LedgerEntry` being restored
+is the newest version that exists in the entire AST. This is accomplished by
+proving that the entry exists in a given subtree, and proving that the key does
+not exist in every newer subtree.
-A restoration proof for archived entry `e` that was archived in epoch `k` is as follows:
+A restoration proof for archived entry `e` that was archived in epoch `k` is as
+follows:
```
generateArchivalProof(e: ColdArchiveArchivedLeaf, k: Epoch):
@@ -509,11 +529,12 @@ generateArchivalProof(e: ColdArchiveArchivedLeaf, k: Epoch):
##### Proof of existence
-A proof of existence that `e` exists in some subtree is a Merkle style proof of inclusion. This includes
-the path from the root node of the tree down to the leaf node being proved and all sibling nodes along
-the path, excluding the root node. Since the root node is maintained by the validator validating the proof,
-it can be omitted. Additionally, if multiple entries are being proved, each `COLD_ARCHIVE_HASH` on the proof path
-must only be included once.
+A proof of existence that `e` exists in some subtree is a Merkle style proof of
+inclusion. This includes the path from the root node of the tree down to the
+leaf node being proved and all sibling nodes along the path, excluding the root
+node. Since the root node is maintained by the validator validating the proof,
+it can be omitted. Additionally, if multiple entries are being proved, each
+`COLD_ARCHIVE_HASH` on the proof path must only be included once.
```
// Merkle subtree is represented by the map [level][node_index]
@@ -547,9 +568,9 @@ generateProofOfExistence(subtree=AST[k], e: ColdArchiveBucketEntry, proof: optio
return proof
```
-Given a proof `p` and the root of the subtree `r`,
-a validator can verify the proof by recomputing the neighbor hashes along
-the path and checking the result against the saved root hash as follows:
+Given a proof `p` and the root of the subtree `r`, a validator can verify the
+proof by recomputing the neighbor hashes along the path and checking the result
+against the saved root hash as follows:
```
verifyProofOfExistence(p: ArchivalProof, rootHash: Hash):
@@ -586,8 +607,8 @@ subtree is a proof of existence for two keys `lowKey` and `highKey` such that:
1. `lowKey < k`
2. `highKey > k`
-3. `lowKey` and `highKey` are direct neighbors in the subtree,
- i.e. `subtree[highKey].index - subtree[lowKey].index == 1`
+3. `lowKey` and `highKey` are direct neighbors in the subtree, i.e.
+ `subtree[highKey].index - subtree[lowKey].index == 1`
The proof is generated as follows:
@@ -618,9 +639,9 @@ generateProofOfNonexistence(subtree=AST[n], k: LedgerKey, proof: optional i.
+If a key previously existed and has been deleted, a proof of the deletion is
+required. If the entry was deleted in epoch i, a proof of existence for a
+DELETED node must be given for AST[i], and a proof of nonexistence for every
+subtree AST[k] where k > i.
This verification is as follows:
@@ -781,168 +808,234 @@ isRestoreValid(key: LedgerKey, proofs: map(uint32, ArchivalProof), lastEpoch: ui
#### Generating the AST
-Each validator maintains the "Live State BucketList" (currently called the BucketList). This stores all live ledger state,
-including entries that are archived, but have not yet been evicted. Additionally, validators will maintain the "Hot Archive",
-an additional BucketList containing recently archived entries. Validators will maintain a "Cold Archive". The Cold
-Archive contains the oldest archived entries stored on the validator. The Cold Archive is an in-progress AST subtree that is
-slowly constructed over time. Eventually, the AST subtree is completed. The validators retain just the root Merkle hash,
-construct and Archival Filter for the contents of the subtree and drop the now completed Cold Archive.
-
-In this way each validator will maintain the Merkle root of all AST subtrees, as well as an Archival Filter for each subtree.
-These roots and filters are stored as `LedgerEntry` of type `BLOB` in the Live State BucketList.
+Each validator maintains the "Live State BucketList" (currently called the
+BucketList). This stores all live ledger state, including entries that are
+archived, but have not yet been evicted. Additionally, validators will maintain
+the "Hot Archive", an additional BucketList containing recently archived
+entries. Validators will maintain a "Cold Archive". The Cold Archive contains
+the oldest archived entries stored on the validator. The Cold Archive is an
+in-progress AST subtree that is slowly constructed over time. Eventually, the
+AST subtree is completed. The validators retain just the root Merkle hash,
+construct and Archival Filter for the contents of the subtree and drop the now
+completed Cold Archive.
+
+In this way each validator will maintain the Merkle root of all AST subtrees,
+as well as an Archival Filter for each subtree. These roots and filters are
+stored as `LedgerEntry` of type `BLOB` in the Live State BucketList.
#### AST Generation Flow
-Consider the initial archival epoch when full State Archival is first enabled. Only the current Hot Archive exists
-(the in-progress subtree AST[0]). The Pending Cold Archive queue, current Cold Archive, and History Archive snapshots are
-all empty.
+Consider the initial archival epoch when full State Archival is first enabled.
+Only the current Hot Archive exists (the in-progress subtree AST[0]). The
+Pending Cold Archive queue, current Cold Archive, and History Archive snapshots
+are all empty.

-After some time, the Hot Archive `AST[0]` will become full and enter the Pending Cold Archive Queue.
-A new, empty Hot Archive is initialized for `AST[1]`. While in the Pending Cold Archive Queue, `AST[0]`
-will be converted into a single Cold Archive Bucket and prepare the Archival Filter. In order to give
-validators time to perform this merge, `AST[0]`
-must stay in the queue for a minimum of `numLedgersToInitSnapshot` ledgers. In this example, `AST[1]` also becomes full
-before `numLedgersToInitSnapshot` ledgers occur. Thus, `AST[1]` is also added to this queue, and `AST[2]` is initialized as
-the current Hot Archive.
+After some time, the Hot Archive `AST[0]` will become full and enter the
+Pending Cold Archive Queue. A new, empty Hot Archive is initialized for
+`AST[1]`. While in the Pending Cold Archive Queue, `AST[0]` will be converted
+into a single Cold Archive Bucket and prepare the Archival Filter. In order to
+give validators time to perform this merge, `AST[0]` must stay in the queue for
+a minimum of `numLedgersToInitSnapshot` ledgers. In this example, `AST[1]` also
+becomes full before `numLedgersToInitSnapshot` ledgers occur. Thus, `AST[1]` is
+also added to this queue, and `AST[2]` is initialized as the current Hot
+Archive.

-After `numLedgersToInitSnapshot` ledgers have passed, `AST[0]` is now eligible to become the current Cold Archive. On the
-ledger that this occurs, `AST[0]` is removed from the Pending Cold Archive Queue and initialized as the current Cold Archive.
-At this time, the Archival Filter is also persisted to the live BucketList.
-Simultaneously, the single merged Cold Archive Bucket for `AST[0]` is published to history as the canonical Archival Snapshot for
-epoch 0.
+After `numLedgersToInitSnapshot` ledgers have passed, `AST[0]` is now eligible
+to become the current Cold Archive. On the ledger that this occurs, `AST[0]` is
+removed from the Pending Cold Archive Queue and initialized as the current Cold
+Archive. At this time, the Archival Filter is also persisted to the live
+BucketList. Simultaneously, the single merged Cold Archive Bucket for `AST[0]`
+is published to history as the canonical Archival Snapshot for epoch 0.

-On a deterministic schedule, the root Merkle hash for `AST[0]` is generated over many ledgers. Note that during this time, even if
-another `numLedgersToInitSnapshot` ledgers pass, `AST[1]` is not eligible to leave the queue until `AST[0]` has generated a Merkle
-root and has been dropped by validators.
+On a deterministic schedule, the root Merkle hash for `AST[0]` is generated
+over many ledgers. Note that during this time, even if another
+`numLedgersToInitSnapshot` ledgers pass, `AST[1]` is not eligible to leave the
+queue until `AST[0]` has generated a Merkle root and has been dropped by
+validators.
-Eventually, the Merkle root for `AST[0]` is generated. As this time, the Merkle root and Archival Filter for `AST[0]` is written to the
-Live State BucketList and the current Cold Archive `AST[0]` is dropped from validators.
+Eventually, the Merkle root for `AST[0]` is generated. As this time, the Merkle
+root and Archival Filter for `AST[0]` is written to the Live State BucketList
+and the current Cold Archive `AST[0]` is dropped from validators.

-After `AST[0]` is dropped from validators, if at least `numLedgersToInitSnapshot` have passed since `AST[1]` joined the Pending Cold
-Archive Queue, `AST[1]` now becomes the current Cold Archive. The snapshot for `AST[1]` is published to the History Archives and the
-cycle repeats.
+After `AST[0]` is dropped from validators, if at least
+`numLedgersToInitSnapshot` have passed since `AST[1]` joined the Pending Cold
+Archive Queue, `AST[1]` now becomes the current Cold Archive. The snapshot for
+`AST[1]` is published to the History Archives and the cycle repeats.

#### Live State BucketList
-The Live State BucketList most closely resembles the current BucketList. It will contain all “live” state of the ledger, including
-Stellar classic entries, live Soroban entries, network config settings, etc. Additionally, it will include all AST information that is
-permanent and must always be persisted by validators (Merkle roots and Archival Filters). This is a persistent store that is never deleted.
-The Live State BucketList is published to the History Archive on every checkpoint ledger via the "history" category.
+The Live State BucketList most closely resembles the current BucketList. It
+will contain all “live” state of the ledger, including Stellar classic entries,
+live Soroban entries, network config settings, etc. Additionally, it will
+include all AST information that is permanent and must always be persisted by
+validators (Merkle roots and Archival Filters). This is a persistent store that
+is never deleted. The Live State BucketList is published to the History Archive
+on every checkpoint ledger via the "history" category.
#### Hot Archive
-The Hot Archive is a BucketList that stores recently evicted and deleted `PERSISTENT` entries and is `AST[currentArchivalEpoch]`.
-It contains `HotArchiveBucketEntry` type entries and is constructed as follows:
-
-1. Whenever a `PERSISTENT` entry is evicted, the entry is deleted from the Live State BucketList and added to the Hot Archive as a `HOT_ARCHIVE_ARCHIVED`
-entry. The corresponding `TTLEntry` is deleted and not stored in the Hot Archive.
-2. Whenever a `PERSISTENT` entry is deleted as part of transaction execution (not deleted via eviction event), the key is stored in the Hot
-Archive as a `HOT_ARCHIVE_DELETED` entry iff an ARCHIVED entry with given entry's key exists in some subtree AST[i] where i < k.
-3. If an archived entry is restored and the entry currently exists in the Hot Archive, the `HOT_ARCHIVE_ARCHIVED` previously stored in the Hot
-Archive is overwritten by a `HOT_ARCHIVE_LIVE` entry.
-4. If a deleted key is recreated and the deleted key currently exists in the Hot Archive, the `HOT_ARCHIVE_DELETED` previously stored in the
-Hot Archive is overwritten by a `HOT_ARCHIVE_LIVE` entry.
-
-For Bucket merges, the newest version of a given key is always taken. At the bottom level, `HOT_ARCHIVE_LIVE` entries are dropped.
-The `HOT_ARCHIVE_LIVE` state indicates that the given key currently exists in the Live BucketList. Thus, any Hot Archive reference
-is out of date and can be dropped.
-
-The current Hot Archive is published to the History Archive via the "history" category on every checkpoint ledger.
-
-Whenever the size of the Hot Archive exceeds `archivalSnapshotSize`, the given AST subtree becomes immutable and is added to the
-`Pending Cold Archive Queue`, A FIFO queue holding all full AST subtrees that are "waiting" to become the current Cold Archive.
-At this point a new, empty Hot Archive is initialized and the current Archival Epoch is incremented to begin constructing the next subtree.
+The Hot Archive is a BucketList that stores recently evicted and deleted
+`PERSISTENT` entries and is `AST[currentArchivalEpoch]`. It contains
+`HotArchiveBucketEntry` type entries and is constructed as follows:
+
+1. Whenever a `PERSISTENT` entry is evicted, the entry is deleted from the Live
+ State BucketList and added to the Hot Archive as a `HOT_ARCHIVE_ARCHIVED`
+ entry. The corresponding `TTLEntry` is deleted and not stored in the Hot
+ Archive.
+2. Whenever a `PERSISTENT` entry is deleted as part of transaction execution
+ (not deleted via eviction event), the key is stored in the Hot Archive as a
+ `HOT_ARCHIVE_DELETED` entry iff an ARCHIVED entry with given entry's key
+ exists in some subtree AST[i] where i < k.
+3. If an archived entry is restored and the entry currently exists in the Hot
+ Archive, the `HOT_ARCHIVE_ARCHIVED` previously stored in the Hot Archive is
+ overwritten by a `HOT_ARCHIVE_LIVE` entry.
+4. If a deleted key is recreated and the deleted key currently exists in the
+ Hot Archive, the `HOT_ARCHIVE_DELETED` previously stored in the Hot Archive
+ is overwritten by a `HOT_ARCHIVE_LIVE` entry.
+
+For Bucket merges, the newest version of a given key is always taken. At the
+bottom level, `HOT_ARCHIVE_LIVE` entries are dropped. The `HOT_ARCHIVE_LIVE`
+state indicates that the given key currently exists in the Live BucketList.
+Thus, any Hot Archive reference is out of date and can be dropped.
+
+The current Hot Archive is published to the History Archive via the "history"
+category on every checkpoint ledger.
+
+Whenever the size of the Hot Archive exceeds `archivalSnapshotSize`, the given
+AST subtree becomes immutable and is added to the `Pending Cold Archive Queue`,
+A FIFO queue holding all full AST subtrees that are "waiting" to become the
+current Cold Archive. At this point a new, empty Hot Archive is initialized and
+the current Archival Epoch is incremented to begin constructing the next
+subtree.
-Every BucketList in the `Pending Cold Archive Queue` is published to the History Archive in the "history" category on every checkpoint ledger.
-Note that no Bucket files will actually be published, as the Pending Archival Snapshot is immutable.
+Every BucketList in the `Pending Cold Archive Queue` is published to the
+History Archive in the "history" category on every checkpoint ledger. Note that
+no Bucket files will actually be published, as the Pending Archival Snapshot is
+immutable.
#### Pending Cold Archive Queue
-Whenever an AST subtree enters the `Pending Cold Archive Queue`,
-it is converted into a single Bucket of type Cold Archive over many ledgers in a background thread. The full Merkle tree
-for the given subtree will eventually be built on top of this single Bucket. Merging the pending AST BucketList
-into a single Bucket before beginning Merkle tree construction allows for significantly more efficient History Archives.
-This involves merging the pending AST of type Hot Archive BucketList into a single Bucket as follows:
+Whenever an AST subtree enters the `Pending Cold Archive Queue`, it is
+converted into a single Bucket of type Cold Archive over many ledgers in a
+background thread. The full Merkle tree for the given subtree will eventually
+be built on top of this single Bucket. Merging the pending AST BucketList into
+a single Bucket before beginning Merkle tree construction allows for
+significantly more efficient History Archives. This involves merging the
+pending AST of type Hot Archive BucketList into a single Bucket as follows:
1. Merge all levels of the Hot Archive BucketList into a single Bucket
-2. Initialize a Cold Archive Bucket with a lower and upper bound entry of type `COLD_ARCHIVE_BOUNDARY_LEAF`.
-3. Insert every entry from the merged Hot Archive BucketList into the Cold Archive Bucket:
- - Entries of type `HOT_ARCHIVE_ARCHIVED` will be converted to entries of type `COLD_ARCHIVE_ARCHIVED_LEAF`.
- - Entries of type `HOT_ARCHIVE_DELETED` will be converted to entries of type `COLD_ARCHIVE_DELETED_LEAF`.
-
-Note that the initial Archival Snapshot has no entries of type `COLD_ARCHIVE_HASH`. This conversion is necessary because all
+2. Initialize a Cold Archive Bucket with a lower and upper bound entry of type
+ `COLD_ARCHIVE_BOUNDARY_LEAF`.
+3. Insert every entry from the merged Hot Archive BucketList into the Cold
+ Archive Bucket:
+ - Entries of type `HOT_ARCHIVE_ARCHIVED` will be converted to entries of
+ type `COLD_ARCHIVE_ARCHIVED_LEAF`.
+ - Entries of type `HOT_ARCHIVE_DELETED` will be converted to entries of type
+ `COLD_ARCHIVE_DELETED_LEAF`.
+
+Note that the initial Archival Snapshot has no entries of type
+`COLD_ARCHIVE_HASH`. This conversion is necessary because all
`ColdArchiveBucketEntry` must contain their index in the given Bucket.
-During this time, the Archival Filter for the given AST subtree is also generated.
+During this time, the Archival Filter for the given AST subtree is also
+generated.
-After entering the `Pending Cold Archive Queue`, a pending AST subtree becomes the current Cold Archive
-when the following conditions are met:
+After entering the `Pending Cold Archive Queue`, a pending AST subtree becomes
+the current Cold Archive when the following conditions are met:
-1. At least `numLedgersToInitSnapshot` have passed since the pending AST subtree entered the queue
+1. At least `numLedgersToInitSnapshot` have passed since the pending AST
+ subtree entered the queue
2. No Cold Archive currently exists
3. The given pending AST subtree is the oldest in the queue
-If a merge has not completed on the ledger in which an AST subtree must leave the queue and initialize the Cold Archive,
-the validator will block until the merge is complete. This is similar behavior to background Bucket merges today, where
-the specific merge timing and speed is an implementation detail not part of protocol. Similar to Bucket merge schedules
-today, very conservative time will be alloted for merges. However, if a new node is joining the network on the ledger
-before the Cold Archive is set to be initialized, the node may lose sync with the network initially. However, this is
-currently the case today with normal BucketList background merges and has not presented any significant issues.
+If a merge has not completed on the ledger in which an AST subtree must leave
+the queue and initialize the Cold Archive, the validator will block until the
+merge is complete. This is similar behavior to background Bucket merges today,
+where the specific merge timing and speed is an implementation detail not part
+of protocol. Similar to Bucket merge schedules today, very conservative time
+will be alloted for merges. However, if a new node is joining the network on
+the ledger before the Cold Archive is set to be initialized, the node may lose
+sync with the network initially. However, this is currently the case today with
+normal BucketList background merges and has not presented any significant
+issues.
#### Cold Archive
-The Cold Archive contains entries of type `ColdArchiveBucketEntryType` and represents the complete Merkle tree for a given AST subtree.
-The Cold Archive is initialized from a single, pre-existing Cold Archive Bucket constructed in the Pending Cold Archive Queue. This
-single Bucket contains all leaf nodes for the given Merkle tree, but does not yet contain any intermediate hash nodes.
-
-On the ledger in which the Cold Archive is initialized, the initial Cold Archive Bucket is published to history as the canonical
-"Archival Snapshot" for the given epoch. The single leaf Bucket is published in the `archivalsnapshot` category of the History Archives.
-Unlike Bucket snapshots in the `history` category of the History Archives, this snapshot in not used by validators to join the network.
-Instead, it is used to initialize AST Merkle trees for downstream systems in order to produce proofs.
-
-After being initialized with only leaf nodes, the Cold Archive slowly constructs a Merkle tree on a deterministic schedule over many ledgers.
-Each ledger, a background thread will hash up to `maxEntriesToHash` entries and `maxBytesToHash` bytes, inserting the required `COLD_ARCHIVE_HASH`
-entries. The current iterator position is recorded in `NetworkConfigSettings` via the `coldArchiveIterLevel` and `coldArchiveIterIndex` fields.
-
-The current Cold Archive is published to the History Archive via the "history" category on every checkpoint ledger. Note that this is different
-from the Archival Snapshot published on initialization. Since the Cold Archive is part of the BucketList hash used in consensus, it must be
-included in BucketList checkpoints.
-
-The Cold Archive should only every have a single version for each key, such that there does not need to be any special Bucket merge logic.
-
-Once the root Merkle node has been created, the root hash is persisted in the live state BucketList and the current Cold Archive is dropped.
+The Cold Archive contains entries of type `ColdArchiveBucketEntryType` and
+represents the complete Merkle tree for a given AST subtree. The Cold Archive
+is initialized from a single, pre-existing Cold Archive Bucket constructed in
+the Pending Cold Archive Queue. This single Bucket contains all leaf nodes for
+the given Merkle tree, but does not yet contain any intermediate hash nodes.
+
+On the ledger in which the Cold Archive is initialized, the initial Cold
+Archive Bucket is published to history as the canonical "Archival Snapshot" for
+the given epoch. The single leaf Bucket is published in the `archivalsnapshot`
+category of the History Archives. Unlike Bucket snapshots in the `history`
+category of the History Archives, this snapshot in not used by validators to
+join the network. Instead, it is used to initialize AST Merkle trees for
+downstream systems in order to produce proofs.
+
+After being initialized with only leaf nodes, the Cold Archive slowly
+constructs a Merkle tree on a deterministic schedule over many ledgers. Each
+ledger, a background thread will hash up to `maxEntriesToHash` entries and
+`maxBytesToHash` bytes, inserting the required `COLD_ARCHIVE_HASH` entries. The
+current iterator position is recorded in `NetworkConfigSettings` via the
+`coldArchiveIterLevel` and `coldArchiveIterIndex` fields.
+
+The current Cold Archive is published to the History Archive via the "history"
+category on every checkpoint ledger. Note that this is different from the
+Archival Snapshot published on initialization. Since the Cold Archive is part
+of the BucketList hash used in consensus, it must be included in BucketList
+checkpoints.
+
+The Cold Archive should only every have a single version for each key, such
+that there does not need to be any special Bucket merge logic.
+
+Once the root Merkle node has been created, the root hash is persisted in the
+live state BucketList and the current Cold Archive is dropped.
#### Writing Deleted key to the AST
-In order to prevent double restoration attacks, some deleted keys must be written to the AST to enforce proofs-of-nonexistence. However,
-writing deleted keys can be optimized, and not all deletion events require writing the deleted key.
-
-Suppose an entry is archived in epoch i, restored, then deleted. The goal is to prevent restoring this entry from epoch i again. If a
-DELETED entry for the given key is written in some subtree AST[k] for k > i, restorations will fail, as no proof-of-nonexistence for the
-key will exist for AST[k]. However, if an entry being deleted has not previously been archived, there is no reason to write the deletion
-event, as no double restores can occur. Thus, we only write a DELETED entry if some ARCHIVED entry exists for that key in the archive.
-
-This can be implemented by checking binary fuse filters. When a persistent entry is deleted, the validator will check the key against
-all Archival Filters for all subtrees, as well as check the Hot Archive and any pending Hot Archives for an ARCHIVED entry with
-the same key. If any binary fuse filter query indicates the existence of an ARCHIVED entry, or if an ARCHIVED entry exists in any Hot Archive,
-the deleted key must be written. If no binary fuse filter indicates this, no DELETED key must be written. While a filter false positive may
-occasionally require writing a DELETED key when no ARCHIVED entry actually exists, a DELETED entry will always be written iff an ARCHIVED
-entry exists.
-
-Typically, it seems like PERSISTENT entries are seldom deleted. Most deletion seem to occur briefly after creation. For example, a proposed
-DEX trading protocol creates temporary intermediary accounts to store liquidity when transaction between the classic DEX and Soroban based
-DEXs. Since this intermediary account stores token balances, it is not appropriate for he TEMPORARY durability class. However, these PERSISTENT
-storage entries are usually quickly deleted after their construction.
+In order to prevent double restoration attacks, some deleted keys must be
+written to the AST to enforce proofs-of-nonexistence. However, writing deleted
+keys can be optimized, and not all deletion events require writing the deleted
+key.
+
+Suppose an entry is archived in epoch i, restored, then deleted. The goal is to
+prevent restoring this entry from epoch i again. If a DELETED entry for the
+given key is written in some subtree AST[k] for k > i, restorations will fail,
+as no proof-of-nonexistence for the key will exist for AST[k]. However, if an
+entry being deleted has not previously been archived, there is no reason to
+write the deletion event, as no double restores can occur. Thus, we only write
+a DELETED entry if some ARCHIVED entry exists for that key in the archive.
+
+This can be implemented by checking binary fuse filters. When a persistent
+entry is deleted, the validator will check the key against all Archival Filters
+for all subtrees, as well as check the Hot Archive and any pending Hot Archives
+for an ARCHIVED entry with the same key. If any binary fuse filter query
+indicates the existence of an ARCHIVED entry, or if an ARCHIVED entry exists in
+any Hot Archive, the deleted key must be written. If no binary fuse filter
+indicates this, no DELETED key must be written. While a filter false positive
+may occasionally require writing a DELETED key when no ARCHIVED entry actually
+exists, a DELETED entry will always be written iff an ARCHIVED entry exists.
+
+Typically, it seems like PERSISTENT entries are seldom deleted. Most deletion
+seem to occur briefly after creation. For example, a proposed DEX trading
+protocol creates temporary intermediary accounts to store liquidity when
+transaction between the classic DEX and Soroban based DEXs. Since this
+intermediary account stores token balances, it is not appropriate for he
+TEMPORARY durability class. However, these PERSISTENT storage entries are
+usually quickly deleted after their construction.
#### Changes to History Archives
@@ -962,21 +1055,25 @@ pendingColdArchiveBuckets: an array of arrays containing an encoding of the pend
coldArchiveBuckets: an array containing an encoding of cold archive bucket list for this ledger
```
-Bucket files for the `hotArchiveBuckets`, `pendingColdArchiveBuckets` and `coldArchiveBuckets` will be stored
-just as Bucket files are currently stored.
+Bucket files for the `hotArchiveBuckets`, `pendingColdArchiveBuckets` and
+`coldArchiveBuckets` will be stored just as Bucket files are currently stored.
-In addition to these changes, a new `archivalsnapshot` category will be added. This directory will contain
-all Archival Snapshots, categorized by Archival Epoch, in the following format:
+In addition to these changes, a new `archivalsnapshot` category will be added.
+This directory will contain all Archival Snapshots, categorized by Archival
+Epoch, in the following format:
-Each Archival Epoch will have a directory in the form `archivalsnapshot/ww/xx/yy/` where
-`0xwwxxyyzz` is the 32bit ledger sequence number for the Archival Epoch at which the file was written,
-expressed as an 8 hex digit, lower-case ASCII string. This directory will store a single Bucket file of the
-form `archivalsnapshot/ww/xx/yy/archivalsnapshot-wwxxyyzz.xdr.gz`. While this file does not contain a complete
-Merkle tree, it contains all necessary information to generate `AST[0xwwxxyyzz]`.
+Each Archival Epoch will have a directory in the form
+`archivalsnapshot/ww/xx/yy/` where `0xwwxxyyzz` is the 32bit ledger sequence
+number for the Archival Epoch at which the file was written, expressed as an 8
+hex digit, lower-case ASCII string. This directory will store a single Bucket
+file of the form `archivalsnapshot/ww/xx/yy/archivalsnapshot-wwxxyyzz.xdr.gz`.
+While this file does not contain a complete Merkle tree, it contains all
+necessary information to generate `AST[0xwwxxyyzz]`.
#### Changes to LedgerHeader
-While the XDR structure of `LedgerHeader` remains unchanged, `bucketListHash` will be changed to the following:
+While the XDR structure of `LedgerHeader` remains unchanged, `bucketListHash`
+will be changed to the following:
```
header.bucketListHash = SHA256(liveStateBucketListHash,
@@ -988,24 +1085,26 @@ header.bucketListHash = SHA256(liveStateBucketListHash,
#### InvokeHostFunctionOp
-The most notable user facing difference in `InvokeHostFunctionOp` is a requirement for occasional
-"invocation proofs". If
-a key is being created for the first time, validators must check all archival filters to ensure the
-key does not already exist in the archive. Since archival filters are probabilistic, occasionally there
-may be a false positive, such that the archival filter indicates a key exists in the archive when it
-does not. To "override" this false positive, an "invocation proof" must be included in the transaction.
-An invocation proof is a proof of nonexistence for a given key, proving that a false positive occurred
-in the archival filter for the key.
+The most notable user facing difference in `InvokeHostFunctionOp` is a
+requirement for occasional "invocation proofs". If a key is being created for
+the first time, validators must check all archival filters to ensure the key
+does not already exist in the archive. Since archival filters are
+probabilistic, occasionally there may be a false positive, such that the
+archival filter indicates a key exists in the archive when it does not. To
+"override" this false positive, an "invocation proof" must be included in the
+transaction. An invocation proof is a proof of nonexistence for a given key,
+proving that a false positive occurred in the archival filter for the key.
-Invocation proofs, if required, must be included in `proofs` struct inside `SorobanTransactionData` for
-the given transaction.
+Invocation proofs, if required, must be included in `proofs` struct inside
+`SorobanTransactionData` for the given transaction.
Outside of this user facing change, operation behavior changes as follows.
-Prior to entering the VM, there will be an "archival phase" performed while loading entries from disk.
-The archival phase is responsible for:
+Prior to entering the VM, there will be an "archival phase" performed while
+loading entries from disk. The archival phase is responsible for:
1. Validating proofs of nonexistence for newly created entries, and
-2. Enforcing archival policies, i.e. failing the TX if the footprint contains an archived entry that has not been restored.
+2. Enforcing archival policies, i.e. failing the TX if the footprint contains
+ an archived entry that has not been restored.
The archival phase works as follows:
@@ -1035,17 +1134,19 @@ for key in footprint.readWrite:
failTx()
```
-Following this new archival phase, `InvokeHostFunctionOp` will function identically to today.
-There are no archival specific fees for creating a new entry even if a filter miss occurs
-and a proof must be verified (see No Archival Fees for Entry Creation).
+Following this new archival phase, `InvokeHostFunctionOp` will function
+identically to today. There are no archival specific fees for creating a new
+entry even if a filter miss occurs and a proof must be verified (see No
+Archival Fees for Entry Creation).
#### RestoreFootprintOp
-`RestoreFootprintOp` will now be required to provide "restoration proofs" for any archived entry that
-is not currently stored by the validators. The operation can restore a mix of entries that
-are archived but currently stored by validators and those that are not stored by validators.
-Restoration proofs are included in the `proofs` struct inside `SorobanTransactionData` for
-the given transaction. Restore proof verification works as follows:
+`RestoreFootprintOp` will now be required to provide "restoration proofs" for
+any archived entry that is not currently stored by the validators. The
+operation can restore a mix of entries that are archived but currently stored
+by validators and those that are not stored by validators. Restoration proofs
+are included in the `proofs` struct inside `SorobanTransactionData` for the
+given transaction. Restore proof verification works as follows:
```
// readOnly must be empty
@@ -1078,27 +1179,29 @@ for key in footprint.readWrite:
failTx()
```
-
-`RestoreFootprintOp` will now require `instruction` resources, metered based on the complexity
-of verifying the given proofs of inclusion. There are no additional fees or resources
-required for proofs of nonexistence.
+`RestoreFootprintOp` will now require `instruction` resources, metered based on
+the complexity of verifying the given proofs of inclusion. There are no
+additional fees or resources required for proofs of nonexistence.
#### Meta
-Meta will contain the current Archival Epoch via `currentArchivalEpoch`. This will be useful
-for downstream diagnostics such as Hubble. Meta will also contain the oldest Archival Snapshot
-persisted on validators via `lastArchivalEpochPersisted`. This will be useful to RPC preflight,
-as it is the cutoff point at which a proof will need to be generated for `RestoreFootprintOp`.
-
-Whenever a `PERSISTENT` entry is evicted (i.e. removed from the Live State BucketList and added to the Hot Archive),
-the entry key and its associated TTL key will be emitted via `evictedLedgerKeys`
-(this field has been renamed and was previously named `evictedTemporaryLedgerKeys`).
+Meta will contain the current Archival Epoch via `currentArchivalEpoch`. This
+will be useful for downstream diagnostics such as Hubble. Meta will also
+contain the oldest Archival Snapshot persisted on validators via
+`lastArchivalEpochPersisted`. This will be useful to RPC preflight, as it is
+the cutoff point at which a proof will need to be generated for
+`RestoreFootprintOp`.
-Whenever an entry is restored via `RestoreFootprintOp`, the `LedgerEntry` being restored and its
-associated TTL will be emitted as a `LedgerEntryChange` of type `LEDGER_ENTRY_RESTORE`. Note that
-entries being restored may not have been evicted such that entries currently in the Live State BucketList
-can see `LEDGER_ENTRY_RESTORE` events.
+Whenever a `PERSISTENT` entry is evicted (i.e. removed from the Live State
+BucketList and added to the Hot Archive), the entry key and its associated TTL
+key will be emitted via `evictedLedgerKeys` (this field has been renamed and
+was previously named `evictedTemporaryLedgerKeys`).
+Whenever an entry is restored via `RestoreFootprintOp`, the `LedgerEntry` being
+restored and its associated TTL will be emitted as a `LedgerEntryChange` of
+type `LEDGER_ENTRY_RESTORE`. Note that entries being restored may not have been
+evicted such that entries currently in the Live State BucketList can see
+`LEDGER_ENTRY_RESTORE` events.
#### Default Values for Network Configs
@@ -1115,19 +1218,21 @@ These are very modest, arbitrary starting limits that can be increased later.
uint32 archivalSnapshotSize = 100;
```
-Long term, 100 entries per archival snapshot is significantly smaller than is optimal.
-However, given that there is currently a small number of expired `PERSISTENT` Soroban entries
-(approximately 50 at the time of this writing), a small initial snapshot size will
-allow us to evict the first round of persistent entries as a sort of "test run" for early
-adopters before increasing this to a larger, more reasonable value.
+Long term, 100 entries per archival snapshot is significantly smaller than is
+optimal. However, given that there is currently a small number of expired
+`PERSISTENT` Soroban entries (approximately 50 at the time of this writing), a
+small initial snapshot size will allow us to evict the first round of
+persistent entries as a sort of "test run" for early adopters before increasing
+this to a larger, more reasonable value.
```
// Number of levels in Archival Snapshot BucketList
uint32 archivalSnapshotDepth = 4;
```
-This value should be increased to scale with `archivalSnapshotSize`. Since there is
-a very low initial `archivalSnapshotSize` value, this value should also begin very low.
+This value should be increased to scale with `archivalSnapshotSize`. Since
+there is a very low initial `archivalSnapshotSize` value, this value should
+also begin very low.
```
// The number of ledger between which the Hot Archive becomes full
@@ -1136,54 +1241,73 @@ a very low initial `archivalSnapshotSize` value, this value should also begin ve
uint32 numLedgersToInitSnapshot = 1000; // 1.5 hours
```
-Currently, even the largest Bucket merge events take less than 10 minutes. This value
-is very conservative in order to support many different SKUs and provide flexibility
-to instances that may have burst limited storage mediums.
+Currently, even the largest Bucket merge events take less than 10 minutes. This
+value is very conservative in order to support many different SKUs and provide
+flexibility to instances that may have burst limited storage mediums.
## Design Rationale
### No Archival Fees for Entry Creation
-While additional fees are charged for verifying proofs of existence, no fees are charged for validating proofs of nonexistence.
-There are several potential ways to break smart contracts should we charge for proofs of nonexistence:
-
-1. Meter instructions for each binary fuse filter lookup. As the age of the network increases and more Archival Snapshots
-are produced, the number of filter lookups, and thus the instruction cost, increase linearly. This means that a transaction
-that could fit within transaction instruction limits at a given epoch may not be valid in a future epoch. This is a significant
-footgun, as developers wish to maximize transaction complexity according to the current limits.
-
-2. Meter instructions for proof of nonexistence verification. Proofs of nonexistence should only be required very rarely,
-with the default false positive rate set to 1 out of a billion. This means that it is highly unlikely contract developers
-will account for the possibility of nonexistence proofs when writing contracts. A sufficiently complex contract may be close
-to transaction instruction limits in the "happy path," where no proof is required. Should this contract attempt to create a
-new key in the "unhappy path," the cost of proof verification will exceed transaction limits. This essentially bricks the
-contract, making the otherwise valid transaction invalid due to a highly unlikely probabilistic event. Should developers
-account for this case, they would be under utilizing available transaction limits for a highly unlikely edge case.
-
-At a given epoch, every new entry creation will require the same amount of filter instructions (assuming no filter misses).
-Several control mechanisms already exist to control entry creation price, such as write fees and minimum rent balances. Given
-potential footguns and that control mechanisms on entry creation already exists, there is no need for a filter specific resource
+While additional fees are charged for verifying proofs of existence, no fees
+are charged for validating proofs of nonexistence. There are several potential
+ways to break smart contracts should we charge for proofs of nonexistence:
+
+1. Meter instructions for each binary fuse filter lookup. As the age of the
+ network increases and more Archival Snapshots are produced, the number of
+ filter lookups, and thus the instruction cost, increase linearly. This means
+ that a transaction that could fit within transaction instruction limits at a
+ given epoch may not be valid in a future epoch. This is a significant
+ footgun, as developers wish to maximize transaction complexity according to
+ the current limits.
+
+2. Meter instructions for proof of nonexistence verification. Proofs of
+ nonexistence should only be required very rarely, with the default false
+ positive rate set to 1 out of a billion. This means that it is highly
+ unlikely contract developers will account for the possibility of
+ nonexistence proofs when writing contracts. A sufficiently complex contract
+ may be close to transaction instruction limits in the "happy path," where no
+ proof is required. Should this contract attempt to create a new key in the
+ "unhappy path," the cost of proof verification will exceed transaction
+ limits. This essentially bricks the contract, making the otherwise valid
+ transaction invalid due to a highly unlikely probabilistic event. Should
+ developers account for this case, they would be under utilizing available
+ transaction limits for a highly unlikely edge case.
+
+At a given epoch, every new entry creation will require the same amount of
+filter instructions (assuming no filter misses). Several control mechanisms
+already exist to control entry creation price, such as write fees and minimum
+rent balances. Given potential footguns and that control mechanisms on entry
+creation already exists, there is no need for a filter specific resource
charge.
-As for filter misses, it seems highly unlikely they could be abused to DOS the network. While the instruction consumption
-of proofs of exclusion is not metered, the transactions still have significant cost due to the increased transaction size
-incurred by including a proof. Finally the actual compute cost of verifying proofs is not significant, as it requires no
-disk lookups and only a small handful in-memory hash operations. The transaction size cost incurred by the attacker would
-significantly outweigh the network cost of the non metered computation.
+As for filter misses, it seems highly unlikely they could be abused to DOS the
+network. While the instruction consumption of proofs of exclusion is not
+metered, the transactions still have significant cost due to the increased
+transaction size incurred by including a proof. Finally the actual compute cost
+of verifying proofs is not significant, as it requires no disk lookups and only
+a small handful in-memory hash operations. The transaction size cost incurred
+by the attacker would significantly outweigh the network cost of the non
+metered computation.
### Expectations for Downstream Systems
-AST subtrees are deeply tied to BucketList structure. While `stellar-core` has an integrated high performance database
-built on top of the BucketList, there does not yet exist a standalone BucketList database. For this reason, all queries
-regarding archival state and proof production will be managed by `stellar-core`.
+AST subtrees are deeply tied to BucketList structure. While `stellar-core` has
+an integrated high performance database built on top of the BucketList, there
+does not yet exist a standalone BucketList database. For this reason, all
+queries regarding archival state and proof production will be managed by
+`stellar-core`.
-RPC instances preflighting transactions are expected to use the following `captive-core` endpoints when interacting with
-archived state:
+RPC instances preflighting transactions are expected to use the following
+`captive-core` endpoints when interacting with archived state:
-1. `getledgerentry` Given a set of keys, this endpoint will return the corresponding `LedgerEntry` along with archival
-related information. Specifically, this endpoint will return whether the entry is archived, and if it is archived, whether
-or not it requires a proof to be restored. Additionally, if a given key does not exist, this endpoint will return whether or
-not the entry creation requires a proof of nonexistence. An example return payload would be as follows:
+1. `getledgerentry` Given a set of keys, this endpoint will return the
+ corresponding `LedgerEntry` along with archival related information.
+ Specifically, this endpoint will return whether the entry is archived, and
+ if it is archived, whether or not it requires a proof to be restored.
+ Additionally, if a given key does not exist, this endpoint will return
+ whether or not the entry creation requires a proof of nonexistence. An
+ example return payload would be as follows:
```
{
@@ -1224,77 +1348,102 @@ not the entry creation requires a proof of nonexistence. An example return paylo
}
```
-2. `getinvokeproof` Given a set of keys that do not currently exist, this endpoint will return an `ArchivalProof`
-sufficient to prove the entries for the purposes of `InvokeHostFunctionOp`. This only returns proofs for new entry
-creation, not archived entry restoration.
+2. `getinvokeproof` Given a set of keys that do not currently exist, this
+ endpoint will return an `ArchivalProof` sufficient to prove the entries for
+ the purposes of `InvokeHostFunctionOp`. This only returns proofs for new
+ entry creation, not archived entry restoration.
-3. `getrestoreproof` Given a set of archived keys, this endpoint will return an `ArchivalProof`
-sufficient to prove the entries for the purposes of `RestoreFootprintOp`. This only returns proofs for archived
-entry restoration, not new entry creation.
+3. `getrestoreproof` Given a set of archived keys, this endpoint will return an
+ `ArchivalProof` sufficient to prove the entries for the purposes of
+ `RestoreFootprintOp`. This only returns proofs for archived entry
+ restoration, not new entry creation.
### History Archive
-In order for validators to join the network efficiently, all BucketLists required for consensus are stored in the
-History Archive State. While this does increase the size of the History Archives in the short term, there is no
-strong reason that History Archive State files must be persisted permanently at 64 ledger resolution. In the future,
-Tier 1 policy could be modified such that only resent History Archive State files need to be maintained at checkpoint
-resolution to assist in node catchup, while older History Archive State files could be dropped or stored at a lower resolution.
-Note that this does not impact the audibility of the network, as all transaction history will still be maintained.
-
-In the long term, the only State Archival files that will need to be persisted permanently in the archive are Archive
-Snapshot files. In order to keep this persisted data as small as possible, only the leaf nodes of these files are stored.
-This provides all the necessary information to construct the full Merkle tree required to generate proofs, but is significantly
-smaller. There is no security risk either, as after a full Merkle tree has been constructed from these files, the root can
-be checked against the roots stored in the validator's current live state.
-
-In addition to saving disk space, this strategy has the added advantage of reducing Archival Snapshot egress. Currently,
-egress fees are the dominating cost of History Archives. Should a complete Merkle tree exist in the History Archive, it
-would be easy to develop a lightweight application to generate a proof directly from the History Archive file, bypassing
-RPC nodes. There have been previous issues with similar applications abusing public History Archives, where large files are
-constantly downloaded and subsequently deleted repeatedly by such programs. By only storing leaf nodes, it is necessary for an
-RPC instance to perform an initialization step before generating proofs. While this step will still be relatively fast and
-cheap, it encourages proper data hygiene, protecting History Archive providers.
+In order for validators to join the network efficiently, all BucketLists
+required for consensus are stored in the History Archive State. While this does
+increase the size of the History Archives in the short term, there is no strong
+reason that History Archive State files must be persisted permanently at 64
+ledger resolution. In the future, Tier 1 policy could be modified such that
+only resent History Archive State files need to be maintained at checkpoint
+resolution to assist in node catchup, while older History Archive State files
+could be dropped or stored at a lower resolution. Note that this does not
+impact the audibility of the network, as all transaction history will still be
+maintained.
+
+In the long term, the only State Archival files that will need to be persisted
+permanently in the archive are Archive Snapshot files. In order to keep this
+persisted data as small as possible, only the leaf nodes of these files are
+stored. This provides all the necessary information to construct the full
+Merkle tree required to generate proofs, but is significantly smaller. There is
+no security risk either, as after a full Merkle tree has been constructed from
+these files, the root can be checked against the roots stored in the
+validator's current live state.
+
+In addition to saving disk space, this strategy has the added advantage of
+reducing Archival Snapshot egress. Currently, egress fees are the dominating
+cost of History Archives. Should a complete Merkle tree exist in the History
+Archive, it would be easy to develop a lightweight application to generate a
+proof directly from the History Archive file, bypassing RPC nodes. There have
+been previous issues with similar applications abusing public History Archives,
+where large files are constantly downloaded and subsequently deleted repeatedly
+by such programs. By only storing leaf nodes, it is necessary for an RPC
+instance to perform an initialization step before generating proofs. While this
+step will still be relatively fast and cheap, it encourages proper data
+hygiene, protecting History Archive providers.
### Long Term RPC Sustainability
-The AST has been designed such that an RPC node may store as many or as few Archival Snapshots as the operator desires.
-For example, an RPC provider may only serve the last two years worth of Archival Snapshots, such that any archived entry
-older than two years old cannot be restored by that node. Additionally, RPC providers may shard Archival Snapshots across
-many different nodes and route requests accordingly.
+The AST has been designed such that an RPC node may store as many or as few
+Archival Snapshots as the operator desires. For example, an RPC provider may
+only serve the last two years worth of Archival Snapshots, such that any
+archived entry older than two years old cannot be restored by that node.
+Additionally, RPC providers may shard Archival Snapshots across many different
+nodes and route requests accordingly.
-While this is possible long term, the size of the Archival Snapshots will not be significant enough to warrant this
-additional complexity. For the forseeable future, RPC providers should expect to maintain all Archival Snapshots. However,
-this does not need to be the case in the future should Archival State grow significantly.
+While this is possible long term, the size of the Archival Snapshots will not
+be significant enough to warrant this additional complexity. For the forseeable
+future, RPC providers should expect to maintain all Archival Snapshots.
+However, this does not need to be the case in the future should Archival State
+grow significantly.
### Calculating BucketList Size for Fees
-It is not immediately clear whether or not BucketLists other than the Live State BucketList should
-count towards BucketList size used in fee calculations. The Live State BucketList is a leading
-indicator of network state usage, while archival related BucketLists are lagging indicators. By counting
-archival related BucketLists in fee calculation, the network is penalized more from validator
-implementation details then penalized for actual network usage. Additionally, archival related
-BucketLists will introduce significant fee volatility on the network, as entire BucketLists will
-be dropped whenever a Merkle hash is generated. These reasons support only counting Live State BucketList
-size towards fees.
-
-However, the primary purpose behind size related fees is to apply back pressure when validator disk usage
-is high. Archival related BucketLists are stored on disk and therefore contribute to the storage issue,
-such that they should be included in fee calculations to provide appropriate back pressure.
-
-Ideally, archival related BucketLists should not count towards fees to provide the best experience to
-network users. The rate of Archival Snapshot production and root hash production should outpace that
-of eviction and entry creation. Should this be the case, there is no need for additional size related fees.
-Additionally, adding more complexity to BucketList size fee calculation will further complicate simulation,
-which has historically has issues with respect to fee estimation.
-
-Finally, because of the current relatively low Soroban usage rate compared to Stellar classic, classic
-entries dominate BucketList growth anyway. Soroban entries make up a small fraction of ledger state, so
-including archival BucketLists in size calculations would only mildly effect overall fees.
-Given that counting archival BucketLists towards fees provides a worse user experience, more complexity
-for downstream systems, and provides little actual benefit with current usage rates, no archival BucketLists
-will be included in fee calculation at this time. This means the current write fee calculation will remain
-unchanged and only depend on the Live BucketList size. Should this prove to be an issue in the future, another
-protocol upgrade may change fee calculations.
+It is not immediately clear whether or not BucketLists other than the Live
+State BucketList should count towards BucketList size used in fee calculations.
+The Live State BucketList is a leading indicator of network state usage, while
+archival related BucketLists are lagging indicators. By counting archival
+related BucketLists in fee calculation, the network is penalized more from
+validator implementation details then penalized for actual network usage.
+Additionally, archival related BucketLists will introduce significant fee
+volatility on the network, as entire BucketLists will be dropped whenever a
+Merkle hash is generated. These reasons support only counting Live State
+BucketList size towards fees.
+
+However, the primary purpose behind size related fees is to apply back pressure
+when validator disk usage is high. Archival related BucketLists are stored on
+disk and therefore contribute to the storage issue, such that they should be
+included in fee calculations to provide appropriate back pressure.
+
+Ideally, archival related BucketLists should not count towards fees to provide
+the best experience to network users. The rate of Archival Snapshot production
+and root hash production should outpace that of eviction and entry creation.
+Should this be the case, there is no need for additional size related fees.
+Additionally, adding more complexity to BucketList size fee calculation will
+further complicate simulation, which has historically has issues with respect
+to fee estimation.
+
+Finally, because of the current relatively low Soroban usage rate compared to
+Stellar classic, classic entries dominate BucketList growth anyway. Soroban
+entries make up a small fraction of ledger state, so including archival
+BucketLists in size calculations would only mildly effect overall fees. Given
+that counting archival BucketLists towards fees provides a worse user
+experience, more complexity for downstream systems, and provides little actual
+benefit with current usage rates, no archival BucketLists will be included in
+fee calculation at this time. This means the current write fee calculation will
+remain unchanged and only depend on the Live BucketList size. Should this prove
+to be an issue in the future, another protocol upgrade may change fee
+calculations.
## Security Concerns
@@ -1302,43 +1451,56 @@ Many attack vectors have been considered, including the following:
### Double Nonce
-Consider a one-time nonce created at a deterministic address (a common design practice for minting NFTs). The
-legitimate owner of the nonce creates the entry in Archival Epoch N, and the nonce entry is subsequently archived
-and evicted. Much later in epoch N + 5, and attacker attempts to recreate the nonce at the same address, but this time
-with the attacker as owner. The validators do not currently store the original nonce entry, and have no way of knowing
-the nonce exists and is owned by another entity. Because the validators believe this entry is being created for the first
-time, the nonce is recreated and the attacker now has established control.
-
-To defeat this, proofs of nonexistence are required when creating an entry for the first time. In the scenario above,
-because the nonce exists in the archive, no valid proof of nonexistence for the key exists, and the attacker would not
+Consider a one-time nonce created at a deterministic address (a common design
+practice for minting NFTs). The legitimate owner of the nonce creates the entry
+in Archival Epoch N, and the nonce entry is subsequently archived and evicted.
+Much later in epoch N + 5, and attacker attempts to recreate the nonce at the
+same address, but this time with the attacker as owner. The validators do not
+currently store the original nonce entry, and have no way of knowing the nonce
+exists and is owned by another entity. Because the validators believe this
+entry is being created for the first time, the nonce is recreated and the
+attacker now has established control.
+
+To defeat this, proofs of nonexistence are required when creating an entry for
+the first time. In the scenario above, because the nonce exists in the archive,
+no valid proof of nonexistence for the key exists, and the attacker would not
be able to recreate the entry.
### Double Spend via Multiple Restores
-Consider a balance entry with a non-zero balance that has been evicted and is no longer stored by validators. The attacker
-restores the entry, spends the balance, then deletes the balance entry. However, even though the entry has been deleted
-in the Live State, the non-zero version of the balance entry still exists in the archive. The attacker then restores the non-zero
-balance entry again. The entry is added back to the Live State with a non-zero balance, resulting in a double spend.
-
-The defense against this attack is to write deletion events into the Archival Snapshot. When restoring an entry, the proof
-must verify that the key exists in the given Archival Snapshot and does not exist in any newer epoch. Because deletion events
-are written to Archival Snapshots, the attacker will not be able to prove that no newer version of the balance key exists
-(as the deletion key is newer than the non-zero balance entry) defeating the attack.
+Consider a balance entry with a non-zero balance that has been evicted and is
+no longer stored by validators. The attacker restores the entry, spends the
+balance, then deletes the balance entry. However, even though the entry has
+been deleted in the Live State, the non-zero version of the balance entry still
+exists in the archive. The attacker then restores the non-zero balance entry
+again. The entry is added back to the Live State with a non-zero balance,
+resulting in a double spend.
+
+The defense against this attack is to write deletion events into the Archival
+Snapshot. When restoring an entry, the proof must verify that the key exists in
+the given Archival Snapshot and does not exist in any newer epoch. Because
+deletion events are written to Archival Snapshots, the attacker will not be
+able to prove that no newer version of the balance key exists (as the deletion
+key is newer than the non-zero balance entry) defeating the attack.
### Issues with Requiring Authorization for Restoration
-Consider a design where authorization was required for restoration such that only the entry owner could restore an archived
-entry. Consider a loan contract, where an attacker has put up some collateral for a loan. The collateral balance entries are
-archived, and the attacker defaults on the loan. When the loan contract attempts to collect the collateral, the entries are
-archived, and the contract cannot restore the entries to collect collateral.
+Consider a design where authorization was required for restoration such that
+only the entry owner could restore an archived entry. Consider a loan contract,
+where an attacker has put up some collateral for a loan. The collateral balance
+entries are archived, and the attacker defaults on the loan. When the loan
+contract attempts to collect the collateral, the entries are archived, and the
+contract cannot restore the entries to collect collateral.
-To defeat this attack, authorization is not required for restoration. There is no significant security concern with allowing
-arbitrary restoration. Even though any entity can restore an entry, authorization is still required to modify the entry
-once restored.
+To defeat this attack, authorization is not required for restoration. There is
+no significant security concern with allowing arbitrary restoration. Even
+though any entity can restore an entry, authorization is still required to
+modify the entry once restored.
## Test Cases
-Specific test cases are still TBD. Formal verification of proof generation and validation may be useful.
+Specific test cases are still TBD. Formal verification of proof generation and
+validation may be useful.
## Implementation
diff --git a/core/cap-0058.md b/core/cap-0058.md
index f86f84996..2ce0b121a 100644
--- a/core/cap-0058.md
+++ b/core/cap-0058.md
@@ -16,8 +16,8 @@ Protocol version: 22
## Simple Summary
Introduce 'constructor' feature for Soroban smart contracts: a mechanism that
-ensures that the contract will run custom initialization logic atomically
-when it's first created.
+ensures that the contract will run custom initialization logic atomically when
+it's first created.
## Working Group
@@ -25,27 +25,51 @@ As specified in the Preamble.
## Motivation
-Constructor support improves usability of Soroban for the contract and SDK developers:
-
-- A contract with a constructor is guaranteed to always be initialized, so there is never a need to ensure that initialization at run time, thus reducing the contract size, CPU usage, and potentially the contract storage needs
-- Using constructors makes it harder for developers to accidentally make their contracts prone to front-running the initialization (in case if factory is not being used), thus improving security
-- Constructors are supported in other smart contract frameworks/languages (such as Solidity), which both makes Soroban more compatible with the new SDKs (see [discussion](https://github.com/stellar/stellar-protocol/discussions/1501)), and also is more intuitive for developer on-boarding
-- Constructors make contract instantiation cheaper and faster, as only a single transaction is necessary vs multiple transactions or a factory (that requires an additional contract invocation)
+Constructor support improves usability of Soroban for the contract and SDK
+developers:
+
+- A contract with a constructor is guaranteed to always be initialized, so
+ there is never a need to ensure that initialization at run time, thus
+ reducing the contract size, CPU usage, and potentially the contract storage
+ needs
+- Using constructors makes it harder for developers to accidentally make their
+ contracts prone to front-running the initialization (in case if factory is
+ not being used), thus improving security
+- Constructors are supported in other smart contract frameworks/languages (such
+ as Solidity), which both makes Soroban more compatible with the new SDKs (see
+ [discussion](https://github.com/stellar/stellar-protocol/discussions/1501)),
+ and also is more intuitive for developer on-boarding
+- Constructors make contract instantiation cheaper and faster, as only a single
+ transaction is necessary vs multiple transactions or a factory (that requires
+ an additional contract invocation)
### Goals Alignment
This CAP is aligned with the following Stellar Network Goals:
- - The Stellar Network should make it easy for developers of Stellar projects to create highly usable products.
+- The Stellar Network should make it easy for developers of Stellar projects to
+ create highly usable products.
## Abstract
-This CAP introduces the set of the necessary changes to support defining the constructors and executing them, specifically:
-
-- Reserve a new special contract function `__constructor` that may only be called by the Soroban host environment. The environment will call `__constructor` function if and only if the contract is being created from a Wasm exporting that function
-- Introduce a new host function `create_contract_with_constructor` in Soroban environment that allows constracts to instantiate other contracts with constructors
-- Introduce a new `HostFunction` XDR variant `HOST_FUNCTION_TYPE_CREATE_CONTRACT_V2` that acts in the same way as `HOST_FUNCTION_TYPE_CREATE_CONTRACT`, but also allows users to specify the constructor arguments
-- Introduce a new `SorobanAuthorizedFunction` XDR variant `SOROBAN_AUTHORIZED_FUNCTION_TYPE_CREATE_CONTRACT_V2_HOST_FN` that allows users to sign the authorization payload corresponding to `HOST_FUNCTION_TYPE_CREATE_CONTRACT_V2` host function calls
+This CAP introduces the set of the necessary changes to support defining the
+constructors and executing them, specifically:
+
+- Reserve a new special contract function `__constructor` that may only be
+ called by the Soroban host environment. The environment will call
+ `__constructor` function if and only if the contract is being created from a
+ Wasm exporting that function
+- Introduce a new host function `create_contract_with_constructor` in Soroban
+ environment that allows constracts to instantiate other contracts with
+ constructors
+- Introduce a new `HostFunction` XDR variant
+ `HOST_FUNCTION_TYPE_CREATE_CONTRACT_V2` that acts in the same way as
+ `HOST_FUNCTION_TYPE_CREATE_CONTRACT`, but also allows users to specify the
+ constructor arguments
+- Introduce a new `SorobanAuthorizedFunction` XDR variant
+ `SOROBAN_AUTHORIZED_FUNCTION_TYPE_CREATE_CONTRACT_V2_HOST_FN` that allows
+ users to sign the authorization payload corresponding to
+ `HOST_FUNCTION_TYPE_CREATE_CONTRACT_V2` host function calls
## Specification
@@ -68,12 +92,12 @@ index 87dd32d..7da2f1d 100644
+ HOST_FUNCTION_TYPE_UPLOAD_CONTRACT_WASM = 2,
+ HOST_FUNCTION_TYPE_CREATE_CONTRACT_V2 = 3
};
-
+
enum ContractIDPreimageType
@@ -503,6 +504,14 @@ struct CreateContractArgs
ContractExecutable executable;
};
-
+
+struct CreateContractArgsV2
+{
+ ContractIDPreimage contractIDPreimage;
@@ -92,7 +116,7 @@ index 87dd32d..7da2f1d 100644
+case HOST_FUNCTION_TYPE_CREATE_CONTRACT_V2:
+ CreateContractArgsV2 createContractV2;
};
-
+
enum SorobanAuthorizedFunctionType
{
SOROBAN_AUTHORIZED_FUNCTION_TYPE_CONTRACT_FN = 0,
@@ -100,7 +124,7 @@ index 87dd32d..7da2f1d 100644
+ SOROBAN_AUTHORIZED_FUNCTION_TYPE_CREATE_CONTRACT_HOST_FN = 1,
+ SOROBAN_AUTHORIZED_FUNCTION_TYPE_CREATE_CONTRACT_V2_HOST_FN = 2
};
-
+
union SorobanAuthorizedFunction switch (SorobanAuthorizedFunctionType type)
@@ -531,6 +543,8 @@ case SOROBAN_AUTHORIZED_FUNCTION_TYPE_CONTRACT_FN:
InvokeContractArgs contractFn;
@@ -109,105 +133,179 @@ index 87dd32d..7da2f1d 100644
+case SOROBAN_AUTHORIZED_FUNCTION_TYPE_CREATE_CONTRACT_V2_HOST_FN:
+ CreateContractArgsV2 createContractV2HostFn;
};
-
+
struct SorobanAuthorizedInvocation
---
+--
```
### Semantics
#### Constructor function
-Every Wasm that exports `__constructor` function is considered to have a constructor. `__constructor` function may take an arbitrary number of arbitrary `SCVal`(XDR)/`Val`(Soroban host) arguments (0 arguments are supported as well).
+Every Wasm that exports `__constructor` function is considered to have a
+constructor. `__constructor` function may take an arbitrary number of arbitrary
+`SCVal`(XDR)/`Val`(Soroban host) arguments (0 arguments are supported as well).
The constructor has the following semantics from the Soroban host standpoint:
-- For any Wasm with Soroban environment version less than 22, `__constructor` function is treated as any other unused Soroban 'reserved' function (any contract function that has name starting with double `_`), i.e. it can be exported, but can not be invoked. These contracts are considered to not have any constructor, even past protocol 22.
- - Keep in mind, that it is not possible to upload Wasm with v22 environment prior to v22 protocol upgrade, thus it is guaranteed that for any given Wasm uploaded on-chain either all of, or none of the instances have constructor support (depending on the env version)
-- For any Wasm with Soroban environment version 22 or greater `__constructor` is treated as constructor function:
- - When a new contract is created from that Wasm, the environment calls `__constructor` with user-provided arguments immediately after creating a contract instance entry in the storage (without returning control to the caller)
- - If `__constructor` hasn't finished successfully (i.e. if VM traps, or a function returns an error), creation function fails and all the changes are rolled back
- - If `__constructor` succeeds, but returns a non-error value that is not `Val::Void` (unit-type return value in Rust), then then the creation function is considered to have failed as well
- - If a contract with constructor is instantiated by a function that doesn't allow specifying the constructor arguments, then constuctor is called with 0 arguments.
- - If a contract without constructor has any constructor arguments passed to it (i.e. >=1 arguments), the creation function fails
-
-Other than the semantics described above, `__constructor` behaves as a normal contract function that can manipulate contract storage, do cross-contract calls etc.
-
-`__constructor` must return `Val::VOID` (in the Rust SDK that is equivalent to returning no value). Returning any value that is not `Val::VOID` will result in error and contract won't be instantiated.
+- For any Wasm with Soroban environment version less than 22, `__constructor`
+ function is treated as any other unused Soroban 'reserved' function (any
+ contract function that has name starting with double `_`), i.e. it can be
+ exported, but can not be invoked. These contracts are considered to not have
+ any constructor, even past protocol 22.
+ - Keep in mind, that it is not possible to upload Wasm with v22 environment
+ prior to v22 protocol upgrade, thus it is guaranteed that for any given
+ Wasm uploaded on-chain either all of, or none of the instances have
+ constructor support (depending on the env version)
+- For any Wasm with Soroban environment version 22 or greater `__constructor`
+ is treated as constructor function:
+ - When a new contract is created from that Wasm, the environment calls
+ `__constructor` with user-provided arguments immediately after creating a
+ contract instance entry in the storage (without returning control to the
+ caller)
+ - If `__constructor` hasn't finished successfully (i.e. if VM traps, or a
+ function returns an error), creation function fails and all the changes
+ are rolled back
+ - If `__constructor` succeeds, but returns a non-error value that is not
+ `Val::Void` (unit-type return value in Rust), then then the creation
+ function is considered to have failed as well
+ - If a contract with constructor is instantiated by a function that doesn't
+ allow specifying the constructor arguments, then constuctor is called with
+ 0 arguments.
+ - If a contract without constructor has any constructor arguments passed to
+ it (i.e. >=1 arguments), the creation function fails
+
+Other than the semantics described above, `__constructor` behaves as a normal
+contract function that can manipulate contract storage, do cross-contract calls
+etc.
+
+`__constructor` must return `Val::VOID` (in the Rust SDK that is equivalent to
+returning no value). Returning any value that is not `Val::VOID` will result in
+error and contract won't be instantiated.
##### 'Default' constructor
-The semantics of contracts that don't have a constructor (i.e. those created before protocol 22 and those that simply don't have `__constructor` defined) can be significantly simplified by treating them as having a 'default' constructor that accepts no arguments and performs no operations. This streamlines the user experience as it makes calling the contracts without constructor to behave exactly the same as contracts with a 0-argument constructor. That's why, for example, it is possible to instantiate a constructor-less contract with a function that expects constructor arguments, but only as long as 0 arguments are passed (passing >0 arguments to the 'default' constructor would cause it to fail).
+The semantics of contracts that don't have a constructor (i.e. those created
+before protocol 22 and those that simply don't have `__constructor` defined)
+can be significantly simplified by treating them as having a 'default'
+constructor that accepts no arguments and performs no operations. This
+streamlines the user experience as it makes calling the contracts without
+constructor to behave exactly the same as contracts with a 0-argument
+constructor. That's why, for example, it is possible to instantiate a
+constructor-less contract with a function that expects constructor arguments,
+but only as long as 0 arguments are passed (passing >0 arguments to the
+'default' constructor would cause it to fail).
##### Interaction with contract updates
-Notice, that the above section refers only to creation of the contracts. Every contract may only be created just once (via `create_contract` host function or its `InvokeHostFunctionOp` counterpart). When the contract has its code updated it is not considered created and thus constructor won't be called.
+Notice, that the above section refers only to creation of the contracts. Every
+contract may only be created just once (via `create_contract` host function or
+its `InvokeHostFunctionOp` counterpart). When the contract has its code updated
+it is not considered created and thus constructor won't be called.
-This behavior has an important logical consequences: while the protocol guarantees that the constructor has been called if it was present in the _initial_ Wasm the contract has been created with, it does not guarantee that the constructor present in the _current_ Wasm of the contract has been called.
+This behavior has an important logical consequences: while the protocol
+guarantees that the constructor has been called if it was present in the
+_initial_ Wasm the contract has been created with, it does not guarantee that
+the constructor present in the _current_ Wasm of the contract has been called.
#### `create_contract_with_constructor` host function
-A new function, `create_contract_with_constructor`, with export name `e` in module `l` ('ledger' module) is added to the Soroban environment's exported interface.
+A new function, `create_contract_with_constructor`, with export name `e` in
+module `l` ('ledger' module) is added to the Soroban environment's exported
+interface.
The `env.json` in `rs-soroban-env` will be modified as so:
```json
{
- "export": "e",
- "name": "create_contract_with_constructor",
- "args": [
- {
- "name": "deployer",
- "type": "AddressObject"
- },
- {
- "name": "wasm_hash",
- "type": "BytesObject"
- },
- {
- "name": "salt",
- "type": "BytesObject"
- },
- {
- "name": "constructor_args",
- "type": "VecObject"
- }
- ],
- "return": "AddressObject",
- "docs": "Creates the contract instance on behalf of `deployer`. Created contract must be created from a Wasm that has a constructor. `deployer` must authorize this call via Soroban auth framework, i.e. this calls `deployer.require_auth` with respective arguments. `wasm_hash` must be a hash of the contract code that has already been uploaded on this network. `salt` is used to create a unique contract id. `constructor_args` are forwarded into created contract's constructor (`__constructor`) function. Returns the address of the created contract.",
- "min_supported_protocol": 22
+ "export": "e",
+ "name": "create_contract_with_constructor",
+ "args": [
+ {
+ "name": "deployer",
+ "type": "AddressObject"
+ },
+ {
+ "name": "wasm_hash",
+ "type": "BytesObject"
+ },
+ {
+ "name": "salt",
+ "type": "BytesObject"
+ },
+ {
+ "name": "constructor_args",
+ "type": "VecObject"
+ }
+ ],
+ "return": "AddressObject",
+ "docs": "Creates the contract instance on behalf of `deployer`. Created contract must be created from a Wasm that has a constructor. `deployer` must authorize this call via Soroban auth framework, i.e. this calls `deployer.require_auth` with respective arguments. `wasm_hash` must be a hash of the contract code that has already been uploaded on this network. `salt` is used to create a unique contract id. `constructor_args` are forwarded into created contract's constructor (`__constructor`) function. Returns the address of the created contract.",
+ "min_supported_protocol": 22
}
```
-The `deployer` (`AddressObject`), `wasm_hash` (`BytesObject`), and `salt` (`BytesObject`) semantics have exactly the same semantics as for the existing `create_contract` host function. `constructor_args` is a host vector of `Val`s containing the arguments to use in constructor.
+The `deployer` (`AddressObject`), `wasm_hash` (`BytesObject`), and `salt`
+(`BytesObject`) semantics have exactly the same semantics as for the existing
+`create_contract` host function. `constructor_args` is a host vector of `Val`s
+containing the arguments to use in constructor.
-The function creates a contract from Wasm and calls its constructor with the provided `constructor_args`. Contracts with no constructor may be created by this function as well, but no arguments have to be provided.
+The function creates a contract from Wasm and calls its constructor with the
+provided `constructor_args`. Contracts with no constructor may be created by
+this function as well, but no arguments have to be provided.
#### `HOST_FUNCTION_TYPE_CREATE_CONTRACT_V2` host function
-The new variant of the XDR `HostFunction` for the `InvokeHostFunctionOp` (`HOST_FUNCTION_TYPE_CREATE_CONTRACT_V2` defined in XDR changes section) allows users to create contracts both with and without constructors via a single transaction. The function is only supported from protocol 22 onwards.
+The new variant of the XDR `HostFunction` for the `InvokeHostFunctionOp`
+(`HOST_FUNCTION_TYPE_CREATE_CONTRACT_V2` defined in XDR changes section) allows
+users to create contracts both with and without constructors via a single
+transaction. The function is only supported from protocol 22 onwards.
-Contracts can be created from any valid Wasm on-chain with this function, i.e. pre-22 Wasms are supported as well and are always treated as contracts with a default constructor (even if they happened to have `__constructor` function defined before).
+Contracts can be created from any valid Wasm on-chain with this function, i.e.
+pre-22 Wasms are supported as well and are always treated as contracts with a
+default constructor (even if they happened to have `__constructor` function
+defined before).
-`constructorArgs` argument of `CreateContractArgsV2` is a vector of arguments to pass to the constructor function.
+`constructorArgs` argument of `CreateContractArgsV2` is a vector of arguments
+to pass to the constructor function.
-The host function implementation is routed to `create_contract_with_constructor` host function.
+The host function implementation is routed to
+`create_contract_with_constructor` host function.
-The 'legacy' `HOST_FUNCTION_TYPE_CREATE_CONTRACT` XDR host function will be preserved in protocol 22 for the sake of backwards compatibility. It will work with the contracts that don't require providing the constructor arguments, i.e. the contracts that have a 'default' constructor (no explicit constructor defined) and contracts that have an explicitly defined constructor that accepts 0 input arguments.
+The 'legacy' `HOST_FUNCTION_TYPE_CREATE_CONTRACT` XDR host function will be
+preserved in protocol 22 for the sake of backwards compatibility. It will work
+with the contracts that don't require providing the constructor arguments, i.e.
+the contracts that have a 'default' constructor (no explicit constructor
+defined) and contracts that have an explicitly defined constructor that accepts
+0 input arguments.
#### Authorization support
-`SOROBAN_AUTHORIZED_FUNCTION_TYPE_CREATE_CONTRACT_V2_HOST_FN` allows users to authorize both `HOST_FUNCTION_TYPE_CREATE_CONTRACT_V2` (when called directly from `InvokeHostFunctionOp`), and `create_contract`/`create_contract_with_constructor` host functions starting from protocol 22.
+`SOROBAN_AUTHORIZED_FUNCTION_TYPE_CREATE_CONTRACT_V2_HOST_FN` allows users to
+authorize both `HOST_FUNCTION_TYPE_CREATE_CONTRACT_V2` (when called directly
+from `InvokeHostFunctionOp`), and
+`create_contract`/`create_contract_with_constructor` host functions starting
+from protocol 22.
-For `HOST_FUNCTION_TYPE_CREATE_CONTRACT_V2` exactly the same `CreateContractArgsV2` structure as in the `HostFunction` definition has to be authorized.
+For `HOST_FUNCTION_TYPE_CREATE_CONTRACT_V2` exactly the same
+`CreateContractArgsV2` structure as in the `HostFunction` definition has to be
+authorized.
-For `create_contract` calls the respective XDR with 0 `constructorArgs` has to be authorized, and for `create_contract_with_constructor` `constructorArgs` has to be set to respective respective vector of arguments.
+For `create_contract` calls the respective XDR with 0 `constructorArgs` has to
+be authorized, and for `create_contract_with_constructor` `constructorArgs` has
+to be set to respective respective vector of arguments.
-`SOROBAN_AUTHORIZED_FUNCTION_TYPE_CREATE_CONTRACT_HOST_FN` payload may still be used to authorize creation of contracts that with 0 constructor arguments (i.e. contracts with 'default' constructor and contracts with explicity 0-argument constructor).
+`SOROBAN_AUTHORIZED_FUNCTION_TYPE_CREATE_CONTRACT_HOST_FN` payload may still be
+used to authorize creation of contracts that with 0 constructor arguments (i.e.
+contracts with 'default' constructor and contracts with explicity 0-argument
+constructor).
##### Authorization context for custom accounts
-Custom accounts must be aware of the context that they are authorizing. Thus we extend `AuthorizationContext` data structure that is passed to the special `__check_auth` function by adding a new enum variant `CreateContractWithCtorHostFn` to it (defined using the `contracttype` syntax of Soroban SDK):
+Custom accounts must be aware of the context that they are authorizing. Thus we
+extend `AuthorizationContext` data structure that is passed to the special
+`__check_auth` function by adding a new enum variant
+`CreateContractWithCtorHostFn` to it (defined using the `contracttype` syntax
+of Soroban SDK):
```rust
#[contracttype]
@@ -225,13 +323,29 @@ enum AuthorizationContext {
}
```
-`CreateContractWithCtorHostFn` is used to represent authorizing creating a contract with non-zero constructor arguments. Contracts that have a constructor that takes no arguments are still represented by `CreateContractHostFn` variant, which improves the backwards compatibility with the existing custom accounts.
-
-Constructors with non-empty arguments can't be supported by the existing custom accounts that care about the context without potentially compromising their assumptions about the context, so if they try to strictly parse the new context variant they will fail. That will be the case for all the custom accounts built with the Soroban Rust SDK, as it fails in case if it can't parse the input argument. Note, that these custom accounts will still be able to authorize regular contract invocations and creating contracts that have the default or 0-argument constructor.
+`CreateContractWithCtorHostFn` is used to represent authorizing creating a
+contract with non-zero constructor arguments. Contracts that have a constructor
+that takes no arguments are still represented by `CreateContractHostFn`
+variant, which improves the backwards compatibility with the existing custom
+accounts.
+
+Constructors with non-empty arguments can't be supported by the existing custom
+accounts that care about the context without potentially compromising their
+assumptions about the context, so if they try to strictly parse the new context
+variant they will fail. That will be the case for all the custom accounts built
+with the Soroban Rust SDK, as it fails in case if it can't parse the input
+argument. Note, that these custom accounts will still be able to authorize
+regular contract invocations and creating contracts that have the default or
+0-argument constructor.
##### 'Deep' invoker authorization
-The specification above only refers to externally provided authorization. Soroban also has a capability of authorizing 'deep' (i.e. non-direct) cross-contract calls on behalf of the current contract (invoker). We add support for authorizing creating contracts with constructor by extending the `InvokerContractAuthEntry` enum with the new variant `CreateContractWithCtorHostFn`:
+The specification above only refers to externally provided authorization.
+Soroban also has a capability of authorizing 'deep' (i.e. non-direct)
+cross-contract calls on behalf of the current contract (invoker). We add
+support for authorizing creating contracts with constructor by extending the
+`InvokerContractAuthEntry` enum with the new variant
+`CreateContractWithCtorHostFn`:
```rust
#[contracttype]
@@ -242,50 +356,99 @@ enum InvokerContractAuthEntry {
}
```
-`CreateContractWithConstructorHostFnContext` is exactly the same struct as defined in the section above.
+`CreateContractWithConstructorHostFnContext` is exactly the same struct as
+defined in the section above.
-Since there are no signatures involved, we can provide more flexible authorization rules, specifically:
+Since there are no signatures involved, we can provide more flexible
+authorization rules, specifically:
-- Contracts that have a default constructor or a 0-argument constructor can be authorized either by authorizing the respective `CreateContractHostFn` entry, or by authorizing `CreateContractWithCtorHostFn` entry with 0 constructor arguments.
-- Contracts that have a constructor with more than 0 arguments must be authorized with the respective `CreateContractWithCtorHostFn` entry.
+- Contracts that have a default constructor or a 0-argument constructor can be
+ authorized either by authorizing the respective `CreateContractHostFn` entry,
+ or by authorizing `CreateContractWithCtorHostFn` entry with 0 constructor
+ arguments.
+- Contracts that have a constructor with more than 0 arguments must be
+ authorized with the respective `CreateContractWithCtorHostFn` entry.
-This way the existing contract may still authorize creation of the contracts with 'trivial' constructors, but must be updated in order to authorize contracts that have non-trivial constructors.
+This way the existing contract may still authorize creation of the contracts
+with 'trivial' constructors, but must be updated in order to authorize
+contracts that have non-trivial constructors.
## Design Rationale
### Constructor vs Factory
-Factory contracts can be used as an alternative to constructors and they are already supported by the protocol. However, factory contracts don't provide a guarantee that every contract instance has been initialized via a factory.
+Factory contracts can be used as an alternative to constructors and they are
+already supported by the protocol. However, factory contracts don't provide a
+guarantee that every contract instance has been initialized via a factory.
-In theory, we could standardize certain generic factory contract, build it into the protocol and provide initialization guarantees. While that approach would allow us to avoid adding new interfaces, it is much less straightforward to use - some (constructor-less) contracts would still be created via `create_contract`/`HOST_FUNCTION_TYPE_CREATE_CONTRACT`, while other(constructor-enabled) contracts would be created via `call`/`HOST_FUNCTION_TYPE_INVOKE_CONTRACT`.
+In theory, we could standardize certain generic factory contract, build it into
+the protocol and provide initialization guarantees. While that approach would
+allow us to avoid adding new interfaces, it is much less straightforward to
+use - some (constructor-less) contracts would still be created via
+`create_contract`/`HOST_FUNCTION_TYPE_CREATE_CONTRACT`, while
+other(constructor-enabled) contracts would be created via
+`call`/`HOST_FUNCTION_TYPE_INVOKE_CONTRACT`.
### Only a single constructor function is allowed
-Allowing to overload constructors would be pretty tricky as Wasm doesn't support overloading functions.
+Allowing to overload constructors would be pretty tricky as Wasm doesn't
+support overloading functions.
-If 'overload' behavior is necessary, it can be implemented in custom fashion via e.g. using a single `enum` argument with different variants containing different sets of arguments.
+If 'overload' behavior is necessary, it can be implemented in custom fashion
+via e.g. using a single `enum` argument with different variants containing
+different sets of arguments.
### Returning custom values from constructor is not allowed
-The constructor return value has no semantics from the perspective of this CAP, so in theory we could allow contracts to return arbitrary values from constructors and just discard them after calling the constructor. However, this might lead to incorrect developer expectations. For example, a developer might return a custom 'error-code' as integer, or a boolean flag and expect that it is respected by the Soroban host. This is an unlikely case, but since we don't want to make any potentially incorrect assumptions about behavior of the user-defined contracts, we explicitly disallow returning any non-error values besides `Val::VOID`.
-
-Note, that returning an error-type value (`Val::Error`) has the same consequences as returning any other custom value (i.e. the constructor is considered to have failed and contract creation is aborted), even though semantically these cases are different (the constructor failed vs constructor succeeded, but returned an unsupported value).
+The constructor return value has no semantics from the perspective of this CAP,
+so in theory we could allow contracts to return arbitrary values from
+constructors and just discard them after calling the constructor. However, this
+might lead to incorrect developer expectations. For example, a developer might
+return a custom 'error-code' as integer, or a boolean flag and expect that it
+is respected by the Soroban host. This is an unlikely case, but since we don't
+want to make any potentially incorrect assumptions about behavior of the
+user-defined contracts, we explicitly disallow returning any non-error values
+besides `Val::VOID`.
+
+Note, that returning an error-type value (`Val::Error`) has the same
+consequences as returning any other custom value (i.e. the constructor is
+considered to have failed and contract creation is aborted), even though
+semantically these cases are different (the constructor failed vs constructor
+succeeded, but returned an unsupported value).
### Constructor is not called when Wasm is updated
-Contract re-initialization is risky for a lot of cases, e.g. for contracts with complex ownership model (DAO), multisig smart wallets, or contracts that have a part of state assumed to be constant. For all such scenarios a malicious or non-malicious buggy interaction may break the important invariants, used to revert contract to no longer state or even simply break it for everyone.
+Contract re-initialization is risky for a lot of cases, e.g. for contracts with
+complex ownership model (DAO), multisig smart wallets, or contracts that have a
+part of state assumed to be constant. For all such scenarios a malicious or
+non-malicious buggy interaction may break the important invariants, used to
+revert contract to no longer state or even simply break it for everyone.
-While in some cases additional initialization may be required after Wasm has been updated, we deem the risks of allowing it to be higher than the benefits for such cases. Unlike initialization, atomicity is not as important, because the authorization priviliges don't change between the call to update the contract and the call to re-initialize it. Thus a programmatic, contract-specific solution should be feasible.
+While in some cases additional initialization may be required after Wasm has
+been updated, we deem the risks of allowing it to be higher than the benefits
+for such cases. Unlike initialization, atomicity is not as important, because
+the authorization priviliges don't change between the call to update the
+contract and the call to re-initialize it. Thus a programmatic,
+contract-specific solution should be feasible.
## Security Concerns
-Constructors ensure atomic contract initialization that has to be authorized by the deployer of the contract (via `SOROBAN_AUTHORIZED_FUNCTION_TYPE_CREATE_CONTRACT_V2_HOST_FN` authorization payload). Thus, deployed contract can't have their initialization to be frontrun.
+Constructors ensure atomic contract initialization that has to be authorized by
+the deployer of the contract (via
+`SOROBAN_AUTHORIZED_FUNCTION_TYPE_CREATE_CONTRACT_V2_HOST_FN` authorization
+payload). Thus, deployed contract can't have their initialization to be
+frontrun.
-Executing `__constructor` function from within host allows contracts to execute arbitrary logic while being called from a host function. However, the risk surface is not really increased compared to regular contract calls, as the caller acknowledges that constructor will be called, similarly to how the caller acknowledges that a different contract's method is going to be called.
+Executing `__constructor` function from within host allows contracts to execute
+arbitrary logic while being called from a host function. However, the risk
+surface is not really increased compared to regular contract calls, as the
+caller acknowledges that constructor will be called, similarly to how the
+caller acknowledges that a different contract's method is going to be called.
## Test Cases
-To be implemented. The tests should cover the newly introduced host function and the invariants described in the 'Semantics' section.
+To be implemented. The tests should cover the newly introduced host function
+and the invariants described in the 'Semantics' section.
## Implementation
diff --git a/core/cap-0059.md b/core/cap-0059.md
index 8d1d9e25d..69f8a562a 100644
--- a/core/cap-0059.md
+++ b/core/cap-0059.md
@@ -14,29 +14,45 @@ Protocol version: 22
```
## Simple Summary
-BLS12-381 is a pairing-friendly elliptic curve which enables a new suite of applications. This CAP proposes a set of new host functions providing access to BLS12-381 curve operations.
+
+BLS12-381 is a pairing-friendly elliptic curve which enables a new suite of
+applications. This CAP proposes a set of new host functions providing access to
+BLS12-381 curve operations.
## Working Group
As described in the preamble section.
## Motivation
-Pairing friendly elliptic curve operations are the backbone of many advanced Zero Knowledge (ZK) constructions, with a wide array of applications ranging from scaling to identity management.
-BLS12-381 is one of the most well-known and widely adopted pairing friendly curves, due to its efficiency and 128-bit security. However, the curve operations are inherently computationally intensive, making them prohibitive to be implemented in a smart contract. Providing an efficient implementation for a well-chosen set of curve and field operations natively inside the Soroban host is crucial for unlocking zero knowledge functionality in smart contracts
+Pairing friendly elliptic curve operations are the backbone of many advanced
+Zero Knowledge (ZK) constructions, with a wide array of applications ranging
+from scaling to identity management.
+
+BLS12-381 is one of the most well-known and widely adopted pairing friendly
+curves, due to its efficiency and 128-bit security. However, the curve
+operations are inherently computationally intensive, making them prohibitive to
+be implemented in a smart contract. Providing an efficient implementation for a
+well-chosen set of curve and field operations natively inside the Soroban host
+is crucial for unlocking zero knowledge functionality in smart contracts
### Goals Alignment
+
This CAP is aligned with the following Stellar Network Goals:
- - The Stellar Network should make it easy for developers of Stellar projects
- to create highly usable products
+- The Stellar Network should make it easy for developers of Stellar projects to
+ create highly usable products
## Abstract
-11 new host functions are proposed for performing field and curve operations on BLS12-381. Definitions and the semantics of each new host function, as well as the associated new metering parameters, will be introduced and explained.
+
+11 new host functions are proposed for performing field and curve operations on
+BLS12-381. Definitions and the semantics of each new host function, as well as
+the associated new metering parameters, will be introduced and explained.
## Specification
### New host functions
+
```
{
"export": "4",
@@ -368,296 +384,536 @@ index 52cc022..b8ba009 100644
#### Field and groups
-`fp` - field element in the base field. Encoding rule: big-endian encoding of the underlying unsigned 48-byte integer.
+`fp` - field element in the base field. Encoding rule: big-endian encoding of
+the underlying unsigned 48-byte integer.
-`fp2`- field element in the quadratic extension of the base prime field. Encoding rule: concatenation of the two encoded-components `c1` and `c0` i.e. `be_encode(c1) || be_encode(c0)`
+`fp2`- field element in the quadratic extension of the base prime field.
+Encoding rule: concatenation of the two encoded-components `c1` and `c0` i.e.
+`be_encode(c1) || be_encode(c0)`
-`fp12` - field element in the 12-degree prime extension field. This is the output from the pairing operation. `fp12` is only used as intermediary and encoding is not needed.
+`fp12` - field element in the 12-degree prime extension field. This is the
+output from the pairing operation. `fp12` is only used as intermediary and
+encoding is not needed.
-`fr` - scalar. A scalar has maximum length of 32 bytes. `fr` is represented with an `U256Val`.
+`fr` - scalar. A scalar has maximum length of 32 bytes. `fr` is represented
+with an `U256Val`.
-`G1` - group containing points over the base prime field that satisfy the curve equation, plus point at infinity. Encoding rule: concatenation of the two encoded-coordinates (uncompressed form), each being an `fp`, i.e. `be_encode(X) || be_encode(Y)`
+`G1` - group containing points over the base prime field that satisfy the curve
+equation, plus point at infinity. Encoding rule: concatenation of the two
+encoded-coordinates (uncompressed form), each being an `fp`, i.e.
+`be_encode(X) || be_encode(Y)`
-`G2` - group containing points over the quadratic extension of the base prime field that satisfy the curve equation, plus the point at infinity. Encoding rule: concatenation of the two encoded-coordinates (uncompressed form), each following `fp2` encoding rule, i.e. `be_encode(X_c1) || be_encode(X_c0) || be_encode(Y_c1) || be_encode(Y_c0)`
+`G2` - group containing points over the quadratic extension of the base prime
+field that satisfy the curve equation, plus the point at infinity. Encoding
+rule: concatenation of the two encoded-coordinates (uncompressed form), each
+following `fp2` encoding rule, i.e.
+`be_encode(X_c1) || be_encode(X_c0) || be_encode(Y_c1) || be_encode(Y_c0)`
#### New host functions introduced
-Below is a detailed specification of every new host functions introduced, the new costs involved, as well as the new error conditions introduced. It is important to note that these functions exist in the larger context of the runtime, which has a set of restrains already in place and described in previous CAPs. Specification here only covers new behaviors (including costs and errors) unique to these functions.
+Below is a detailed specification of every new host functions introduced, the
+new costs involved, as well as the new error conditions introduced. It is
+important to note that these functions exist in the larger context of the
+runtime, which has a set of restrains already in place and described in
+previous CAPs. Specification here only covers new behaviors (including costs
+and errors) unique to these functions.
##### `bls12_381_g1_add`
-**Description**: perform point addition in G1.
+**Description**: perform point addition in G1.
+
+**Cost**: covers the cost of decoding (`Bls12381DecodeFp`) and validating
+(`Bls12381G1Validate`) G1 points, point addition (`Bls12381G1Add`), conversion
+of from projective to affine space (`Bls12381G1ProjectiveToAffine`), and
+encoding the result to bytes `Bls12381EncodeFp`.
-**Cost**: covers the cost of decoding (`Bls12381DecodeFp`) and validating (`Bls12381G1Validate`) G1 points, point addition (`Bls12381G1Add`), conversion of from projective to affine space (`Bls12381G1ProjectiveToAffine`), and encoding the result to bytes `Bls12381EncodeFp`.
+**Error condition**: if the input bytes contained in the `BytesObject` do not
+decode into valid G1 points or do not conform the specified encoding standard.
-**Error condition**: if the input bytes contained in the `BytesObject` do not decode into valid G1 points or do not conform the specified encoding standard.
- Bytes length is not equal to 96
-- The compression flag (the most significant bit) is set.
-- The infinity flag (the second most significant bit) is set, but the remaining bits are *not* all zero.
+- The compression flag (the most significant bit) is set.
+- The infinity flag (the second most significant bit) is set, but the remaining
+ bits are _not_ all zero.
- The sort flag (the third most significant bit) is set.
- Either input point does not belong on the G1 curve.
- Either input point does not belong to the correct subgroup.
##### `bls12_381_g1_mul`
-**Description** perform scalar multiplication in G1.
-**Cost**: includes decoding G1 point, converting `fr` from `U256` (`Bls12381FrFromU256`), point multiplication `Bls12381G1Mul`, converting the point from project to affine and encoding the result into bytes.
+**Description** perform scalar multiplication in G1.
+
+**Cost**: includes decoding G1 point, converting `fr` from `U256`
+(`Bls12381FrFromU256`), point multiplication `Bls12381G1Mul`, converting the
+point from project to affine and encoding the result into bytes.
+
+**Error condition**: if the input `BytesObject` does not decode into a valid G1
+points or does not conform the specified encoding standard.
-**Error condition**: if the input `BytesObject` does not decode into a valid G1 points or does not conform the specified encoding standard.
- Bytes length is not equal to 96
-- The compression flag (the most significant bit) is set.
-- The infinity flag (the second most significant bit) is set, but the remaining bits are *not* all zero.
+- The compression flag (the most significant bit) is set.
+- The infinity flag (the second most significant bit) is set, but the remaining
+ bits are _not_ all zero.
- The sort flag (the third most significant bit) is set.
- Either input point does not belong on the G1 curve.
- Either input point does not belong to the correct subgroup.
##### `bls12_381_g1_msm`
-**Description** perform multi-scalar-multiplication (MSM) in G1.
+**Description** perform multi-scalar-multiplication (MSM) in G1.
-**Cost**: includes decoding of the G1 vector, the fr vector, and the MSM operation `Bls12381G1Msm`, and encoding of the resulting G1 point.
+**Cost**: includes decoding of the G1 vector, the fr vector, and the MSM
+operation `Bls12381G1Msm`, and encoding of the resulting G1 point.
-**Error condition**:
-1. if the two vectors have different lengths
+**Error condition**:
+
+1. if the two vectors have different lengths
2. if the length of either vector is zero.
-3. if any point in the G1 points vector does not does not decode into a valid G1 points or does not conform the specified encoding standard.
+3. if any point in the G1 points vector does not does not decode into a valid
+ G1 points or does not conform the specified encoding standard.
+
- Bytes length is not equal to 96
- The compression flag (the most significant bit) is set.
-- The infinity flag (the second most significant bit) is set, but the remaining bits are *not* all zero.
+- The infinity flag (the second most significant bit) is set, but the remaining
+ bits are _not_ all zero.
- The sort flag (the third most significant bit) is set.
- Either input point does not belong on the G1 curve.
- Either input point does not belong to the correct subgroup.
##### `bls12_381_map_fp_to_g1`
-**Description**: maps an `fp` to a point in `G1`.
+**Description**: maps an `fp` to a point in `G1`.
+
+**Cost**: includes decoding of the G1 point, the mapping cost
+`Bls12381MapFpToG1`, and encoding of the resulting G1 point.
-**Cost**: includes decoding of the G1 point, the mapping cost `Bls12381MapFpToG1`, and encoding of the resulting G1 point.
+**Error condition**: if the input `BytesObject` does not serialize to a valid
+`fp`
-**Error condition**: if the input `BytesObject` does not serialize to a valid `fp`
- Bytes length is not equal to 48
##### `bls12_381_hash_to_g1`
-**Description**: hashes a message (a sequence of bytes) into a point in `G1`.
-following the specification in [Hashing to Elliptic Curves](https://datatracker.ietf.org/doc/html/rfc9380) under ciphersuite 'BLS12381G1_XMD:SHA-256_SSWU_RO_', using
+
+**Description**: hashes a message (a sequence of bytes) into a point in `G1`.
+following the specification in
+[Hashing to Elliptic Curves](https://datatracker.ietf.org/doc/html/rfc9380)
+under ciphersuite 'BLS12381G1*XMD:SHA-256_SSWU_RO*', using
+
- `expand_msg_xmd` with sha256 for uniformly random byte string generation
-- `hash_to_curve` to encode the byte string to points in G1, using "simplified SWU for AB == 0" as the underneath mapping method
+- `hash_to_curve` to encode the byte string to points in G1, using "simplified
+ SWU for AB == 0" as the underneath mapping method
-`dst` is the domain separation tag that will be concatenated with the `msg` during hashing, it is intended to keep hashing inputs of different applications separate. It is required `0 < len(dst_bytes) < 256`. DST **must** be chosen with care to avoid compromising the application's security properties. Refer to section 3.1 in the RFC on requirements of DST.
+`dst` is the domain separation tag that will be concatenated with the `msg`
+during hashing, it is intended to keep hashing inputs of different applications
+separate. It is required `0 < len(dst_bytes) < 256`. DST **must** be chosen
+with care to avoid compromising the application's security properties. Refer to
+section 3.1 in the RFC on requirements of DST.
-**Cost**: covered by `Bls12381HashToG1`.
+**Cost**: covered by `Bls12381HashToG1`.
**Error condition**: if the byte length of `dst` is 0 or greater than 255.
##### `bls12_381_g2_add`
-**Description**: perform point addition in G2.
+**Description**: perform point addition in G2.
-**Cost**: covers the cost of decoding (`Bls12381DecodeFp`) and validating (`Bls12381G2Validate`) G2 points, point addition (`Bls12381G2Add`), conversion of from projective to affine space (`Bls12381G2ProjectiveToAffine`), and encoding the result to bytes `Bls12381EncodeFp`.
+**Cost**: covers the cost of decoding (`Bls12381DecodeFp`) and validating
+(`Bls12381G2Validate`) G2 points, point addition (`Bls12381G2Add`), conversion
+of from projective to affine space (`Bls12381G2ProjectiveToAffine`), and
+encoding the result to bytes `Bls12381EncodeFp`.
+
+**Error condition**: if the input bytes contained in the `BytesObject` do not
+decode into valid G2 points or do not conform the specified encoding standard.
-**Error condition**: if the input bytes contained in the `BytesObject` do not decode into valid G2 points or do not conform the specified encoding standard.
- Bytes length is not equal to 192
- The compression flag (the most significant bit) is set.
-- The infinity flag (the second most significant bit) is set, but the remaining bits are *not* all zero.
+- The infinity flag (the second most significant bit) is set, but the remaining
+ bits are _not_ all zero.
- The sort flag (the third most significant bit) is set.
- Either input point does not belong on the G2 curve.
- Either input point does not belong to the correct subgroup.
##### `bls12_381_g2_mul`
-**Description** perform scalar multiplication in G2.
-**Cost**: includes decoding G2 point, converting `fr` from `U256` (`Bls12381FrFromU256`), point multiplication `Bls12381G2Mul`, converting the point from project to affine and encoding the result into bytes.
+**Description** perform scalar multiplication in G2.
+
+**Cost**: includes decoding G2 point, converting `fr` from `U256`
+(`Bls12381FrFromU256`), point multiplication `Bls12381G2Mul`, converting the
+point from project to affine and encoding the result into bytes.
+
+**Error condition**: if the input `BytesObject` does not decode into a valid G2
+points or does not conform the specified encoding standard.
-**Error condition**: if the input `BytesObject` does not decode into a valid G2 points or does not conform the specified encoding standard.
- Bytes length is not equal to 192
- The compression flag (the most significant bit) is set.
-- The infinity flag (the second most significant bit) is set, but the remaining bits are *not* all zero.
+- The infinity flag (the second most significant bit) is set, but the remaining
+ bits are _not_ all zero.
- The sort flag (the third most significant bit) is set.
- Either input point does not belong on the G2 curve.
- Either input point does not belong to the correct subgroup.
##### `bls12_381_g2_msm`
-**Description** perform multi-scalar-multiplication (MSM) in G2.
+**Description** perform multi-scalar-multiplication (MSM) in G2.
+
+**Cost**: includes decoding of the G2 vector, the fr vector, and the MSM
+operation `Bls12381G2Msm`, and encoding of the resulting G2 point.
-**Cost**: includes decoding of the G2 vector, the fr vector, and the MSM operation `Bls12381G2Msm`, and encoding of the resulting G2 point.
+**Error condition**:
-**Error condition**:
-1. if the two vectors have different lengths
+1. if the two vectors have different lengths
2. if the length of either vector is zero.
-3. if any point in the G2 points vector does not does not decode into a valid G2 points or does not conform the specified encoding standard.
+3. if any point in the G2 points vector does not does not decode into a valid
+ G2 points or does not conform the specified encoding standard.
+
- Bytes length is not equal to 192
- The compression flag (the most significant bit) is set.
-- The infinity flag (the second most significant bit) is set, but the remaining bits are *not* all zero.
+- The infinity flag (the second most significant bit) is set, but the remaining
+ bits are _not_ all zero.
- The sort flag (the third most significant bit) is set.
- Either input point does not belong on the G2 curve.
- Either input point does not belong to the correct subgroup.
##### `bls12_381_map_fp2_to_g2`
-**Description**: maps an `fp2` to a point in `G2`.
+**Description**: maps an `fp2` to a point in `G2`.
-**Cost**: includes decoding of the G2 point, the mapping cost `Bls12381MapFpToG2`, and encoding of the resulting G2 point.
+**Cost**: includes decoding of the G2 point, the mapping cost
+`Bls12381MapFpToG2`, and encoding of the resulting G2 point.
+
+**Error condition**: if the input `BytesObject` does not serialize to a valid
+`fp2`
-**Error condition**: if the input `BytesObject` does not serialize to a valid `fp2`
- Bytes length is not equal to 96
##### `bls12_381_hash_to_g2`
-**Description**: hashes a message (a sequence of bytes) into a point in `G2`.
-following the specification in [Hashing to Elliptic Curves](https://datatracker.ietf.org/doc/html/rfc9380) under ciphersuite 'BLS12381G2_XMD:SHA-256_SSWU_RO_', using
+
+**Description**: hashes a message (a sequence of bytes) into a point in `G2`.
+following the specification in
+[Hashing to Elliptic Curves](https://datatracker.ietf.org/doc/html/rfc9380)
+under ciphersuite 'BLS12381G2*XMD:SHA-256_SSWU_RO*', using
+
- `expand_msg_xmd` with sha256 for uniformly random byte string generation
-- `hash_to_curve` to encode the byte string to points in G1, using "simplified SWU for AB == 0" as the underneath mapping method
+- `hash_to_curve` to encode the byte string to points in G1, using "simplified
+ SWU for AB == 0" as the underneath mapping method
-`dst` is the domain separation tag that will be concatenated with the `msg` during hashing, it is intended to keep hashing inputs of different applications separate. It is required `0 < len(dst_bytes) < 256`. DST **must** be chosen with care to avoid compromising the application's security properties. Refer to section 3.1 in the RFC on requirements of DST.
+`dst` is the domain separation tag that will be concatenated with the `msg`
+during hashing, it is intended to keep hashing inputs of different applications
+separate. It is required `0 < len(dst_bytes) < 256`. DST **must** be chosen
+with care to avoid compromising the application's security properties. Refer to
+section 3.1 in the RFC on requirements of DST.
-**Cost**: covered by `Bls12381HashToG1`.
+**Cost**: covered by `Bls12381HashToG1`.
**Error condition**: if the byte length of `dst` is 0 or greater than 255.
##### `bls12_381_multi_pairing_check`
-**Description**: performs pairing operation on a vector of `G1` and a vector of `G2` points, returns `true` if the result equals `1_fp12`, otherwise returns `false`.
-**Cost**: includes deserialization of the point vectors (in G1 and G2 respectively), cost of performing the pairing operation `Bls12381Pairing`.
+**Description**: performs pairing operation on a vector of `G1` and a vector of
+`G2` points, returns `true` if the result equals `1_fp12`, otherwise returns
+`false`.
+
+**Cost**: includes deserialization of the point vectors (in G1 and G2
+respectively), cost of performing the pairing operation `Bls12381Pairing`.
+
+**Error conditions**:
-**Error conditions**:
1. two input vectors have different length
2. either input vector has zero length
-3. any element in the G1 vector does not decode into a valid G1 points or does not conform the specified encoding standard.
+3. any element in the G1 vector does not decode into a valid G1 points or does
+ not conform the specified encoding standard.
+
- Bytes length is not equal to 96
- The compression flag (the most significant bit) is set.
-- The infinity flag (the second most significant bit) is set, but the remaining bits are *not* all zero.
+- The infinity flag (the second most significant bit) is set, but the remaining
+ bits are _not_ all zero.
- The sort flag (the third most significant bit) is set.
- Either input point does not belong on the G1 curve.
- Either input point does not belong to the correct subgroup.
-4. any element in the G2 vector does not decode into a valid G2 points or does not conform the specified encoding standard.
+
+4. any element in the G2 vector does not decode into a valid G2 points or does
+ not conform the specified encoding standard.
+
- Bytes length is not equal to 192
- The compression flag (the most significant bit) is set.
-- The infinity flag (the second most significant bit) is set, but the remaining bits are *not* all zero.
+- The infinity flag (the second most significant bit) is set, but the remaining
+ bits are _not_ all zero.
- The sort flag (the third most significant bit) is set.
- Either input point does not belong on the G2 curve.
- Either input point does not belong to the correct subgroup.
##### `bls12_381_fr_add`
-**Description**: performs addition `(lhs + rhs) mod r` between two `fr`.
-**Cost**: conversion of fr from U256 (`Bls12381FrFromU256`), scalar addition `Bls12381FrAddSub`.
+**Description**: performs addition `(lhs + rhs) mod r` between two `fr`.
+
+**Cost**: conversion of fr from U256 (`Bls12381FrFromU256`), scalar addition
+`Bls12381FrAddSub`.
**Error condition**: None
##### `bls12_381_fr_sub`
-**Description**: performs subtraction `(lhs - rhs) mod r` between two `fr`.
+
+**Description**: performs subtraction `(lhs - rhs) mod r` between two `fr`.
**Cost**: conversion and scalar subtraction `Bls12381FrAddSub`.
**Error condition**: None
##### `bls12_381_fr_mul`
-**Description**: performs multiplication `(lhs * rhs) mod r` between two `fr`.
+
+**Description**: performs multiplication `(lhs * rhs) mod r` between two `fr`.
**Cost**: conversion and scalar multiplication `Bls12381FrMul`.
**Error condition**: None
##### `bls12_381_fr_pow`
-**Description**: performs exponentiation `lhs.exp(rhs) mod r` between `fr` and an u64 exponent.
+
+**Description**: performs exponentiation `lhs.exp(rhs) mod r` between `fr` and
+an u64 exponent.
**Cost**: conversion and scalar exponentiation `Bls12381FrPow`.
**Error condition**: None
##### `bls12_381_fr_inv`
-**Description**: performs inversion on `fr`.
+
+**Description**: performs inversion on `fr`.
**Cost**: conversion and scalar inversion `Bls12381FrInv`.
**Error condition**: if the provided input `fr` is zero.
#### New metering `CostType`s introduced
-- `Bls12381EncodeFp` - Cost of encoding a BLS12-381 Fp (base field element). Encoding includes the necessary conversion from the internal representation into integer form (e.g. Montgomery reduction), and serialization into bytes. Type: constant.
-- `Bls12381DecodeFp` - Cost of decoding a BLS12-381 Fp (base field element). Decoding includes deserialization from bytes into integer, and the necessary conversion from the integer form to the internal representation (e.g. Montgomery multiplication). Type: constant.
-- `Bls12381G1Validate` - Cost of validating a G1 point lies on the curve and belongs to the correct subgroup. Type: constant.
-- `Bls12381G2Validate` - Cost of validating a G2 point lies on the curve and belongs to the correct subgroup. Type: constant.
-- `Bls12381G1ProjectiveToAffine` - Cost of converting a BLS12-381 G1 point from projective to affine coordinates. Type: constant.
-- `Bls12381G2ProjectiveToAffine` - Cost of converting a BLS12-381 G2 point from projective to affine coordinates. Type: constant.
-- `Bls12381G1Add` - Cost of performing BLS12-381 G1 point addition. Type: constant.
-- `Bls12381G1Mul` - Cost of performing BLS12-381 G1 scalar multiplication. Type: constant.
-- `Bls12381G1Msm` - Cost of performing BLS12-381 G1 multi-scalar multiplication (MSM). Type: linear w.r.t the length of the input vectors.
-- `Bls12381MapFpToG1` - Cost of mapping a BLS12-381 Fp field element to a G1 point. Type: constant.
-- `Bls12381HashToG1` - Cost of hashing a message (a byte array) to a BLS12-381 G1 point. Type: linear w.r.t. the byte length.
-- `Bls12381G2Add` - Cost of performing BLS12-381 G2 point addition. Type: constant.
-- `Bls12381G2Mul` - Cost of performing BLS12-381 G2 scalar multiplication. Type: constant.
-- `Bls12381G2Msm` - Cost of performing BLS12-381 G2 multi-scalar multiplication (MSM). Type: linear w.r.t the length of the input vectors.
-- `Bls12381MapFp2ToG2` - Cost of mapping a BLS12-381 Fp2 field element to a G2 point. Type: constant.
-- `Bls12381HashToG2` - Cost of hashing a message (a byte array) to a BLS12-381 G2 point. Type: linear w.r.t. the byte length.
-- `Bls12381Pairing` - Cost of performing BLS12-381 pairing operation. Type: linear w.r.t to the length of the input vectors.
-- `Bls12381FrFromU256` - Cost of converting a BLS12-381 scalar element from U256. This includes necessary conversion from the integer form to the internal representation (e.g. Montgomery multiplication). Type: constant.
-- `Bls12381FrToU256` - Cost of converting a BLS12-381 scalar element to U256. This includes the necessary conversion from the internal representation into integer form (e.g. Montgomery reduction). Type: constant.
-- `Bls12381FrAddSub` - Cost of performing BLS12-381 scalar element addition/subtraction. Type: constant.
-- `Bls12381FrMul` - Cost of performing BLS12-381 scalar element multiplication. Type: constant.
-- `Bls12381FrPow` - Cost of performing BLS12-381 scalar element exponentiation. Type: linear w.r.t number of bits in the u64 exponent excluding leading zeros.
-- `Bls12381FrInv` - Cost of performing BLS12-381 scalar element inversion. Type: constant.
+
+- `Bls12381EncodeFp` - Cost of encoding a BLS12-381 Fp (base field element).
+ Encoding includes the necessary conversion from the internal representation
+ into integer form (e.g. Montgomery reduction), and serialization into bytes.
+ Type: constant.
+- `Bls12381DecodeFp` - Cost of decoding a BLS12-381 Fp (base field element).
+ Decoding includes deserialization from bytes into integer, and the necessary
+ conversion from the integer form to the internal representation (e.g.
+ Montgomery multiplication). Type: constant.
+- `Bls12381G1Validate` - Cost of validating a G1 point lies on the curve and
+ belongs to the correct subgroup. Type: constant.
+- `Bls12381G2Validate` - Cost of validating a G2 point lies on the curve and
+ belongs to the correct subgroup. Type: constant.
+- `Bls12381G1ProjectiveToAffine` - Cost of converting a BLS12-381 G1 point from
+ projective to affine coordinates. Type: constant.
+- `Bls12381G2ProjectiveToAffine` - Cost of converting a BLS12-381 G2 point from
+ projective to affine coordinates. Type: constant.
+- `Bls12381G1Add` - Cost of performing BLS12-381 G1 point addition. Type:
+ constant.
+- `Bls12381G1Mul` - Cost of performing BLS12-381 G1 scalar multiplication.
+ Type: constant.
+- `Bls12381G1Msm` - Cost of performing BLS12-381 G1 multi-scalar multiplication
+ (MSM). Type: linear w.r.t the length of the input vectors.
+- `Bls12381MapFpToG1` - Cost of mapping a BLS12-381 Fp field element to a G1
+ point. Type: constant.
+- `Bls12381HashToG1` - Cost of hashing a message (a byte array) to a BLS12-381
+ G1 point. Type: linear w.r.t. the byte length.
+- `Bls12381G2Add` - Cost of performing BLS12-381 G2 point addition. Type:
+ constant.
+- `Bls12381G2Mul` - Cost of performing BLS12-381 G2 scalar multiplication.
+ Type: constant.
+- `Bls12381G2Msm` - Cost of performing BLS12-381 G2 multi-scalar multiplication
+ (MSM). Type: linear w.r.t the length of the input vectors.
+- `Bls12381MapFp2ToG2` - Cost of mapping a BLS12-381 Fp2 field element to a G2
+ point. Type: constant.
+- `Bls12381HashToG2` - Cost of hashing a message (a byte array) to a BLS12-381
+ G2 point. Type: linear w.r.t. the byte length.
+- `Bls12381Pairing` - Cost of performing BLS12-381 pairing operation. Type:
+ linear w.r.t to the length of the input vectors.
+- `Bls12381FrFromU256` - Cost of converting a BLS12-381 scalar element from
+ U256. This includes necessary conversion from the integer form to the
+ internal representation (e.g. Montgomery multiplication). Type: constant.
+- `Bls12381FrToU256` - Cost of converting a BLS12-381 scalar element to U256.
+ This includes the necessary conversion from the internal representation into
+ integer form (e.g. Montgomery reduction). Type: constant.
+- `Bls12381FrAddSub` - Cost of performing BLS12-381 scalar element
+ addition/subtraction. Type: constant.
+- `Bls12381FrMul` - Cost of performing BLS12-381 scalar element multiplication.
+ Type: constant.
+- `Bls12381FrPow` - Cost of performing BLS12-381 scalar element exponentiation.
+ Type: linear w.r.t number of bits in the u64 exponent excluding leading
+ zeros.
+- `Bls12381FrInv` - Cost of performing BLS12-381 scalar element inversion.
+ Type: constant.
#### Tweak to the budget charging formula
-When a cost model is evaluated multiple times with the same input, an internal optimization is bulk charging. Consider the following linear model `Y = a + scaled_b * X`, where `X` and `Y` are model input and output, `a` is a regular constant, `scaled_b` is a scaled constant with extra bits of precision. The current sequence of computation 1. compute `m = scaled_b * X` 2. unscale the result `unscale(m)`, 3 add the unscaled output to `a`.
-In the current protocol, applying bulk model charging directly multiplies the iteration count `I` to the evaluated model output i.e. `I * Y`. In the new protocol, the iteration count is applied to individual term before computing the sum, i.e. `I * a + I * scaled_b * X`, this simple tweak helps increasing the budget bulk charge accuracy, by preserving precision during the intermediate computation of the linear term.
+When a cost model is evaluated multiple times with the same input, an internal
+optimization is bulk charging. Consider the following linear model
+`Y = a + scaled_b * X`, where `X` and `Y` are model input and output, `a` is a
+regular constant, `scaled_b` is a scaled constant with extra bits of precision.
+The current sequence of computation 1. compute `m = scaled_b * X` 2. unscale
+the result `unscale(m)`, 3 add the unscaled output to `a`.
+
+In the current protocol, applying bulk model charging directly multiplies the
+iteration count `I` to the evaluated model output i.e. `I * Y`. In the new
+protocol, the iteration count is applied to individual term before computing
+the sum, i.e. `I * a + I * scaled_b * X`, this simple tweak helps increasing
+the budget bulk charge accuracy, by preserving precision during the
+intermediate computation of the linear term.
## Design Rationale
### Function list choice
-The list of host functions follows closely what has been proposed in [eip-2537](https://eips.ethereum.org/EIPS/eip-2537), and has been adopted by Ethereum, with many proven ZKP use cases.
-The proposed list includes two additional "hash to curve" functions which performs hashing arbitrary message to curve points: `bls12_381_hash_to_g1` and `bls12_381_hash_to_g2`, which follow the IETF hash-to-curve standard as specified in [rfc9380](https://datatracker.ietf.org/doc/rfc9380/).
+The list of host functions follows closely what has been proposed in
+[eip-2537](https://eips.ethereum.org/EIPS/eip-2537), and has been adopted by
+Ethereum, with many proven ZKP use cases.
+
+The proposed list includes two additional "hash to curve" functions which
+performs hashing arbitrary message to curve points: `bls12_381_hash_to_g1` and
+`bls12_381_hash_to_g2`, which follow the IETF hash-to-curve standard as
+specified in [rfc9380](https://datatracker.ietf.org/doc/rfc9380/).
#### `U256Val` for scalar
-All field and group elements mentioned in the host functions are represented as `BytesObject`, with encoding rule specified in [fields and groups](#field-and-groups), except for the scalar element. Since the scalar can be up to 32-bytes, with same semantics as an unsigned integer, using `U256Val` is the natural choice. Extracting bytes in the correct format can be tricky and error prone, depending on the underlying implementation (for example it is common for field elements implementations to store the montgomery backend number for better computation efficiency, extracting the bytes require performing the reduction first). Using `U256Val` also takes advantage of the internal small value optimization which reduces storage space for small numbers, see [value type repertoire](cap-0046-01.md/#rationale-for-value-and-object-type-repertoires).
+
+All field and group elements mentioned in the host functions are represented as
+`BytesObject`, with encoding rule specified in
+[fields and groups](#field-and-groups), except for the scalar element. Since
+the scalar can be up to 32-bytes, with same semantics as an unsigned integer,
+using `U256Val` is the natural choice. Extracting bytes in the correct format
+can be tricky and error prone, depending on the underlying implementation (for
+example it is common for field elements implementations to store the montgomery
+backend number for better computation efficiency, extracting the bytes require
+performing the reduction first). Using `U256Val` also takes advantage of the
+internal small value optimization which reduces storage space for small
+numbers, see
+[value type repertoire](cap-0046-01.md/#rationale-for-value-and-object-type-repertoires).
#### No plain "pairing" function
-The multi-pairing-check function is provided in instead of a direct pairing function. This is also inline with the choice of eip-2537.
-Most applications requiring pairing also performs comparison of pairing results, such as BLS signature verification, or verification of zk-SNARKs. The pairing comparison `e(a, b) == e(c, d)` can be converted to a multi-pairing evaluation of `e(a, b) * e(-c, d) == 1`. The main benefit is cost saving, since final exponentiation (the most expensive part of pairing) only needs to be performed on the final result. Not providing the plain pairing function also simplifies the host interface, since it eliminates the need to serialize and return the pairing result in `fp12`, which is 576 bytes long, and prevents contract from unknowingly opting into the expensive pattern (in terms of both compute and storage).
+The multi-pairing-check function is provided in instead of a direct pairing
+function. This is also inline with the choice of eip-2537.
-### Choice of the cost types
-The new metering [cost types](#new-metering-costtypes-introduced) broadly follow the selection criteria outlined in [cap-0046-10](cap-0046-10.md). The set is designed to efficiently represent distinct, non-overlapping units of computation, ensuring optimal separation of work components.
+Most applications requiring pairing also performs comparison of pairing
+results, such as BLS signature verification, or verification of zk-SNARKs. The
+pairing comparison `e(a, b) == e(c, d)` can be converted to a multi-pairing
+evaluation of `e(a, b) * e(-c, d) == 1`. The main benefit is cost saving, since
+final exponentiation (the most expensive part of pairing) only needs to be
+performed on the final result. Not providing the plain pairing function also
+simplifies the host interface, since it eliminates the need to serialize and
+return the pairing result in `fp12`, which is 576 bytes long, and prevents
+contract from unknowingly opting into the expensive pattern (in terms of both
+compute and storage).
-#### Encode/Decode
-The only two cost types representing encoding/decoding are of the base field element `Bls12381EncodeFp` and `Bls12381DecodeFp`, since all field and group elements can be composed of the base elements. (En)Decoding `G1` is (en)decoding two `fp` separately, same for `fp2`. `G2` contains two `fp2`, that are (en)decoded separately. `fr` is also represented as a field element of a less order, thus (en)decoding work is strictly less than on an `fp` (and thus okay to use `fp` as an moderate overestimation for it).
+### Choice of the cost types
-Our encoding/decoding follows the original [spec](https://github.com/zcash/librustzcash/blob/6e0364cd42a2b3d2b958a54771ef51a8db79dd29/pairing/src/bls12_381/README.md).
+The new metering [cost types](#new-metering-costtypes-introduced) broadly
+follow the selection criteria outlined in [cap-0046-10](cap-0046-10.md). The
+set is designed to efficiently represent distinct, non-overlapping units of
+computation, ensuring optimal separation of work components.
-#### Contract panic on error
-While not particular to, or introduced by this proposal, it is important to point out that if any error occurs whiling calling a host function, the host will trap the guest VM, which terminates the execution of the guest contract. A host error can span a wide range of reasons, such as running out-of-budget or trying to access an invalid object reference, most of which aren't recoverable for the contract (the [`try_call`](cap-0046-03.md/#call-host-functions-mod-d) function allows calling a contract function in fallible ways, i.e. return `Void` on non-internal errors). This design is to ensure the integrity of the execution runtime (see [cap-0046-01](cap-0046-01.md/#interface) and [cap-0046-03](cap-0046-03.md/#error-handling) for details).
+#### Encode/Decode
-Therefore the new error conditions introduced in this proposal (see [New host functions introduced](#new-host-functions-introduced)) intend to make sure these are actual errors (not a legitimate `false` condition), or they are important for safeguarding the host's runtime integrity. (The [pairing function](#bls12_381_multi_pairing_check) for example does *not* error on a failed pairing check, it returns `true`/`false`).
+The only two cost types representing encoding/decoding are of the base field
+element `Bls12381EncodeFp` and `Bls12381DecodeFp`, since all field and group
+elements can be composed of the base elements. (En)Decoding `G1` is
+(en)decoding two `fp` separately, same for `fp2`. `G2` contains two `fp2`, that
+are (en)decoded separately. `fr` is also represented as a field element of a
+less order, thus (en)decoding work is strictly less than on an `fp` (and thus
+okay to use `fp` as an moderate overestimation for it).
-There are only two types of new errors introduced: 1. Decoding errors and 2. input vector length errors.
+Our encoding/decoding follows the original
+[spec](https://github.com/zcash/librustzcash/blob/6e0364cd42a2b3d2b958a54771ef51a8db79dd29/pairing/src/bls12_381/README.md).
-On the former, it is a crucial step and a standard practice to ensure all cryptographic inputs are properly formed and conforms to the specified standards. A malformed input is likely to be a mistake in the contract, or a malicious attempt at the underlying runtime (e.g. a DOS attempt to make runtime perform work without charging). (Note also the underlying library implementation most likely also performs all the checks and errors if they fail. We are just front loading them on the host as an extra safety measure, and to provide a better error code/message.) If there is an legitimate use case for badly-encoded input, those checks should also be easy to implement on the sdk/guest side.
+#### Contract panic on error
-On the latter, mismatched vector size, or a zero-sized vector input is most likely mistake in contract logic, and one that is easy to mitigate by including proper sdk/guest side checks.
+While not particular to, or introduced by this proposal, it is important to
+point out that if any error occurs whiling calling a host function, the host
+will trap the guest VM, which terminates the execution of the guest contract. A
+host error can span a wide range of reasons, such as running out-of-budget or
+trying to access an invalid object reference, most of which aren't recoverable
+for the contract (the [`try_call`](cap-0046-03.md/#call-host-functions-mod-d)
+function allows calling a contract function in fallible ways, i.e. return
+`Void` on non-internal errors). This design is to ensure the integrity of the
+execution runtime (see [cap-0046-01](cap-0046-01.md/#interface) and
+[cap-0046-03](cap-0046-03.md/#error-handling) for details).
+
+Therefore the new error conditions introduced in this proposal (see
+[New host functions introduced](#new-host-functions-introduced)) intend to make
+sure these are actual errors (not a legitimate `false` condition), or they are
+important for safeguarding the host's runtime integrity. (The
+[pairing function](#bls12_381_multi_pairing_check) for example does _not_ error
+on a failed pairing check, it returns `true`/`false`).
+
+There are only two types of new errors introduced: 1. Decoding errors and 2.
+input vector length errors.
+
+On the former, it is a crucial step and a standard practice to ensure all
+cryptographic inputs are properly formed and conforms to the specified
+standards. A malformed input is likely to be a mistake in the contract, or a
+malicious attempt at the underlying runtime (e.g. a DOS attempt to make runtime
+perform work without charging). (Note also the underlying library
+implementation most likely also performs all the checks and errors if they
+fail. We are just front loading them on the host as an extra safety measure,
+and to provide a better error code/message.) If there is an legitimate use case
+for badly-encoded input, those checks should also be easy to implement on the
+sdk/guest side.
+
+On the latter, mismatched vector size, or a zero-sized vector input is most
+likely mistake in contract logic, and one that is easy to mitigate by including
+proper sdk/guest side checks.
## Protocol Upgrade Transition
-The proposed host functions will become available protocol 22, i.e. with `"min_supported_protocol": 22` in the interface definition. For `protocol_version <= 21`, attempting to import any of these function definitions in the WASM will lead to a linking error during Vm instantiation time.
+
+The proposed host functions will become available protocol 22, i.e. with
+`"min_supported_protocol": 22` in the interface definition. For
+`protocol_version <= 21`, attempting to import any of these function
+definitions in the WASM will lead to a linking error during Vm instantiation
+time.
### Backwards Incompatibilities
+
This CAP does not introduce any backward incompatibilities.
### Resource Utilization
-The performance impact of the new host functions have been captured by the new `CostType` described above. The cpu and memory consumption need to be calibrated carefully on each new `CostType` to ensure that the cost of running BLS host functions are metered properly and subject to the network limits. Final calibration numbers are TBD.
+
+The performance impact of the new host functions have been captured by the new
+`CostType` described above. The cpu and memory consumption need to be
+calibrated carefully on each new `CostType` to ensure that the cost of running
+BLS host functions are metered properly and subject to the network limits.
+Final calibration numbers are TBD.
## Security Concerns
+
The main security concerns include
-- Logic correctness. The proposed set of functions cover a wide range of cryptographic operations, which rely on correctness of 3rd party implementations. Incorrect implementation or failure to cover certain corner case potentially be exploitable vulnerabilities.
-- Denial of service. Since the proposed operations are computationally intensive, failure to properly calibrate any part, or to properly account for an extra-expensive path, could lead to the actual computation time significantly exceeding the metered costs, thus potentially lead to denial of service.
+
+- Logic correctness. The proposed set of functions cover a wide range of
+ cryptographic operations, which rely on correctness of 3rd party
+ implementations. Incorrect implementation or failure to cover certain corner
+ case potentially be exploitable vulnerabilities.
+- Denial of service. Since the proposed operations are computationally
+ intensive, failure to properly calibrate any part, or to properly account for
+ an extra-expensive path, could lead to the actual computation time
+ significantly exceeding the metered costs, thus potentially lead to denial of
+ service.
## Test Cases
-Besides thorough unit and integration tests that are standard to any protocol implementation, in addition, we will be adding external test vectors as reference, these include:
-- [Ethereum BLS12-381 test vectors](https://github.com/ethereum/bls12-381-tests). This shall cover the 11 major host functions that overlap with Ethereum's precompile (eip-2537).
-- Hashing to Elliptic Curves RFC provided test vectors for [BLS12-381 G1](https://datatracker.ietf.org/doc/html/rfc9380#name-bls12-381-g1-2) and [BLS12-381 G2](https://datatracker.ietf.org/doc/html/rfc9380#name-bls12-381-g2-2). These shall cover the two hash-to-curve functions we added.
+
+Besides thorough unit and integration tests that are standard to any protocol
+implementation, in addition, we will be adding external test vectors as
+reference, these include:
+
+- [Ethereum BLS12-381 test vectors](https://github.com/ethereum/bls12-381-tests).
+ This shall cover the 11 major host functions that overlap with Ethereum's
+ precompile (eip-2537).
+- Hashing to Elliptic Curves RFC provided test vectors for
+ [BLS12-381 G1](https://datatracker.ietf.org/doc/html/rfc9380#name-bls12-381-g1-2)
+ and
+ [BLS12-381 G2](https://datatracker.ietf.org/doc/html/rfc9380#name-bls12-381-g2-2).
+ These shall cover the two hash-to-curve functions we added.
## Implementation
-An initial prototype of BLS12-381 host functions, SDK, and an example custom account contract with BLS signature has been implemented:
-- env-prototype: https://github.com/jayz22/rs-soroban-env/tree/bls12-318-field-arithmetic
-- sdk-prototype (linking to the env prototype above): https://github.com/jayz22/rs-soroban-sdk/tree/bls12-318-field-arithmetic
-- BLS-signature custom account example: https://github.com/jayz22/soroban-examples/tree/bls-signature/bls_signature
+An initial prototype of BLS12-381 host functions, SDK, and an example custom
+account contract with BLS signature has been implemented:
+
+- env-prototype:
+ https://github.com/jayz22/rs-soroban-env/tree/bls12-318-field-arithmetic
+- sdk-prototype (linking to the env prototype above):
+ https://github.com/jayz22/rs-soroban-sdk/tree/bls12-318-field-arithmetic
+- BLS-signature custom account example:
+ https://github.com/jayz22/soroban-examples/tree/bls-signature/bls_signature
diff --git a/core/cap-0060.md b/core/cap-0060.md
index ab8a15308..844adfef2 100644
--- a/core/cap-0060.md
+++ b/core/cap-0060.md
@@ -15,7 +15,8 @@ Protocol version: 22
## Simple Summary
-Transition Soroban from the Wasmi 0.31 stack machine to the post-Wasmi-0.32 register machine (0.36 at the time of writing).
+Transition Soroban from the Wasmi 0.31 stack machine to the post-Wasmi-0.32
+register machine (0.36 at the time of writing).
## Working Group
@@ -29,13 +30,19 @@ Improving the performance of Soroban transactions.
This CAP is aligned with the following Stellar Network Goals:
- - The Stellar Network should run at scale and at low cost to all participants of the network.
+- The Stellar Network should run at scale and at low cost to all participants
+ of the network.
## Abstract
-Wasmi 0.32 shipped a new execution engine. The new engine is both faster at straight-line code and also supports lazy validation and translation of its internal representation, reducing the amount of work performed during instantiation and thereby enabling lower-latency transactions.
+Wasmi 0.32 shipped a new execution engine. The new engine is both faster at
+straight-line code and also supports lazy validation and translation of its
+internal representation, reducing the amount of work performed during
+instantiation and thereby enabling lower-latency transactions.
-In general the new execution engine runs the same Wasm bytecode as the old one, with the same semantics, typically at lower cost and higher speed. However there are some minor differences that can be detected by a careful observer.
+In general the new execution engine runs the same Wasm bytecode as the old one,
+with the same semantics, typically at lower cost and higher speed. However
+there are some minor differences that can be detected by a careful observer.
## Specification
@@ -47,44 +54,77 @@ None.
#### VM IR: translation cost, execution cost and gas model
-Both the old and new versions of Wasmi execute Webassembly code by translating it to an internal representation (IR) that is convenient to execute, and then executing the IR. The old IR corresponded more closely to Webassembly's natural form than the new one. This has two immediate consequences:
+Both the old and new versions of Wasmi execute Webassembly code by translating
+it to an internal representation (IR) that is convenient to execute, and then
+executing the IR. The old IR corresponded more closely to Webassembly's natural
+form than the new one. This has two immediate consequences:
-1. It takes more work to translate any given Webassembly function into the new IR. This will be reflected in higher per-function CPU instruction costs during translation.
+1. It takes more work to translate any given Webassembly function into the new
+ IR. This will be reflected in higher per-function CPU instruction costs
+ during translation.
-2. It takes less work to execute the new IR produced by translating any given Webassembly function. This will be reflected in lower per-function CPU instruction costs during execution.
+2. It takes less work to execute the new IR produced by translating any given
+ Webassembly function. This will be reflected in lower per-function CPU
+ instruction costs during execution.
#### Lazy compilation
-In addition to the cost differences arising from the IR, the new version of Wasmi supports a significant new mode: _lazy compilation_.
+In addition to the cost differences arising from the IR, the new version of
+Wasmi supports a significant new mode: _lazy compilation_.
-When compiling a module _lazily_, Wasmi performs only a minimal "initial parse" of the structure of a module and defers the majority of work involved in validation and translation of each of the functions in the module until each function gets called. If any function is _never_ called in a transaction, only the minimal initial parse gets charged.
+When compiling a module _lazily_, Wasmi performs only a minimal "initial parse"
+of the structure of a module and defers the majority of work involved in
+validation and translation of each of the functions in the module until each
+function gets called. If any function is _never_ called in a transaction, only
+the minimal initial parse gets charged.
-Lazy compilation will be enabled for modules _after_ they are uploaded to the ledger, during subsequent execution.
+Lazy compilation will be enabled for modules _after_ they are uploaded to the
+ledger, during subsequent execution.
-During upload, compilation will be performed eagerly, ensuring that any invalid Webassembly code is caught before being admitted to the ledger at all.
+During upload, compilation will be performed eagerly, ensuring that any invalid
+Webassembly code is caught before being admitted to the ledger at all.
## Design Rationale
### Stack vs. register machines
-The new VM engine is a register machine. This model has been repeatedly shown in many VM designs over many years to provide measurably higher execution throughput by reducing the size of the IR and number of instruction-dispatches that occur when processing the same input code.
+The new VM engine is a register machine. This model has been repeatedly shown
+in many VM designs over many years to provide measurably higher execution
+throughput by reducing the size of the IR and number of instruction-dispatches
+that occur when processing the same input code.
-The tradeoff is that it takes a bit more time to compile, thereby increasing latency.
+The tradeoff is that it takes a bit more time to compile, thereby increasing
+latency.
-If we were considering only the latency and throughput of the same amount of total work, we might not consider this tradeoff worthwhile: it would be good for the longest-running (compute-intensive) transactions but bad for the shortest-running (latency-sensitive) transactions.
+If we were considering only the latency and throughput of the same amount of
+total work, we might not consider this tradeoff worthwhile: it would be good
+for the longest-running (compute-intensive) transactions but bad for the
+shortest-running (latency-sensitive) transactions.
-However, this change is being accompanied by a major improvement in latency as well: lazy compilation.
+However, this change is being accompanied by a major improvement in latency as
+well: lazy compilation.
### Lazy compilation
-Most contracts call only a subset of the code in a module. The exact amount will vary by contract, but for example if a transaction only calls half of the functions available in a contract, then only the minimal initial parse gets charged for the un-called (cold) functions. The initial parse of a function is as much as 100x cheaper than a full validation and translation of the function. So our expectation is that for the majority of functions, there will also be a significant improvement in latency.
+Most contracts call only a subset of the code in a module. The exact amount
+will vary by contract, but for example if a transaction only calls half of the
+functions available in a contract, then only the minimal initial parse gets
+charged for the un-called (cold) functions. The initial parse of a function is
+as much as 100x cheaper than a full validation and translation of the function.
+So our expectation is that for the majority of functions, there will also be a
+significant improvement in latency.
## Test Cases
-Testing so far has been included extensive upstream validation tests of the VM as well as the existing Soroban testsuite. We will additionally be replaying segments of the Stellar network's history of Soroban transactions to confirm identical behaviour (besides cost differences).
+Testing so far has been included extensive upstream validation tests of the VM
+as well as the existing Soroban testsuite. We will additionally be replaying
+segments of the Stellar network's history of Soroban transactions to confirm
+identical behaviour (besides cost differences).
## Implementation
-A pull request is available at https://github.com/stellar/rs-soroban-env/pull/1442
+A pull request is available at
+https://github.com/stellar/rs-soroban-env/pull/1442
-At the time of writing there remains some integration work but the code works well enough to pass most tests.
+At the time of writing there remains some integration work but the code works
+well enough to pass most tests.
diff --git a/core/cap-0061.md b/core/cap-0061.md
index 00ff1e904..97f0fc339 100644
--- a/core/cap-0061.md
+++ b/core/cap-0061.md
@@ -16,23 +16,27 @@ Implement the [SEP-44] for the Stellar Asset Contract.
## Motivation
-To support an unambiguous method by which contract wallets can transfer Stellar Assets to custodial wallets that require
-a memo to be associated with the transfer that is populated through to events used to track transfers.
+To support an unambiguous method by which contract wallets can transfer Stellar
+Assets to custodial wallets that require a memo to be associated with the
+transfer that is populated through to events used to track transfers.
### Goals Alignment
This CAP is aligned with the following Stellar Network Goals:
-- The Stellar Network should make it easy for developers of Stellar projects to create highly usable products
-- The Stellar Network should enable cross-border payments, i.e. payments via exchange of assets, throughout the globe,
- enabling users to make payments between assets in a manner that is fast, cheap, and highly usable.
+- The Stellar Network should make it easy for developers of Stellar projects to
+ create highly usable products
+- The Stellar Network should enable cross-border payments, i.e. payments via
+ exchange of assets, throughout the globe, enabling users to make payments
+ between assets in a manner that is fast, cheap, and highly usable.
## Abstract
-This proposal introduces a built-in implementation for Stellar Assets of the [SEP-44] standard for transferring with
-an associated memo.
+This proposal introduces a built-in implementation for Stellar Assets of the
+[SEP-44] standard for transferring with an associated memo.
-This proposal disallows the subsequent use of the transaction memo field on smart contract transactions.
+This proposal disallows the subsequent use of the transaction memo field on
+smart contract transactions.
## Specification
@@ -42,7 +46,8 @@ None.
### Transactions Changes
-The `memo` field of `Transaction` must have value `MEMO_NONE` when `SorobanTransactionData` is present.
+The `memo` field of `Transaction` must have value `MEMO_NONE` when
+`SorobanTransactionData` is present.
### Extended Token Interface
@@ -72,24 +77,28 @@ fn transfer_memo(env: Env, from: Address, to: Address, amount: i128, memo: u64);
See [SEP-44] for the design rationale of the `transfer_memo` interface.
-The `memo` field of transactions is disallowed on smart contract transactions to remove any ambiguity that arises by
-specifying different memos on the transaction vs within the smart contract invocation. An assessment of transaction memo
-usage today with smart contract transactions shows that their use are rare.
+The `memo` field of transactions is disallowed on smart contract transactions
+to remove any ambiguity that arises by specifying different memos on the
+transaction vs within the smart contract invocation. An assessment of
+transaction memo usage today with smart contract transactions shows that their
+use are rare.
## Protocol Upgrade Transition
### Backwards Incompatibilities
-This proposal introduces a breaking change into the Transaction structure where the memo cannot be set for transactions
-involving smart contracts.
+This proposal introduces a breaking change into the Transaction structure where
+the memo cannot be set for transactions involving smart contracts.
-This proposal is otherwise backwards compatible in regards to all existing functionality, however the introduction of
-the `transfer` event with the additional memo topic requires that existing applications be updated to handle the memo
-topic being present.
+This proposal is otherwise backwards compatible in regards to all existing
+functionality, however the introduction of the `transfer` event with the
+additional memo topic requires that existing applications be updated to handle
+the memo topic being present.
### Resource Utilization
-This proposal will lead to approximately the same resource usage as exists today.
+This proposal will lead to approximately the same resource usage as exists
+today.
## Security Concerns
diff --git a/core/cap-0062.md b/core/cap-0062.md
index 4af6c882d..71e8f6075 100644
--- a/core/cap-0062.md
+++ b/core/cap-0062.md
@@ -13,8 +13,9 @@ Protocol version: 23
## Simple Summary
-This proposal allows the network to evict `PERSISTENT` entries from the live state BucketList to a separate BucketList
-containing archival state. Note that evicted entries are not deleted from validators.
+This proposal allows the network to evict `PERSISTENT` entries from the live
+state BucketList to a separate BucketList containing archival state. Note that
+evicted entries are not deleted from validators.
## Working Group
@@ -22,28 +23,38 @@ As specified in the Preamble.
## Motivation
-This is a first step towards full State Archival, which will lower the storage requirements of validators
-and decrease the growth of History Archives. Additionally, this opens the door for database optimizations of
-live Soroban state, increasing limits and throughput, as well as automatic entry restoration.
+This is a first step towards full State Archival, which will lower the storage
+requirements of validators and decrease the growth of History Archives.
+Additionally, this opens the door for database optimizations of live Soroban
+state, increasing limits and throughput, as well as automatic entry
+restoration.
### Goals Alignment
-This change is aligned with the goal of lowering the cost and increasing the scale of the network.
+This change is aligned with the goal of lowering the cost and increasing the
+scale of the network.
## Abstract
-Currently both live and archived Soroban state are maintained in a single BucketList DB. This makes data prioritization difficult
-since all state exists in a singular data structure. This proposal separates archived state and live state into two separate DBs.
-While these DBs are still both persisted on disk by validators, optimizing live state access is significantly easier. In particular,
-live state can be entirely cached in memory, removing disk reads from the Soroban transaction execution path, greatly increasing
-read limits and throughput.
-
-Each validator will maintain two BucketLists, the Live BucketList and the Hot Archive BucketList. `PERSISTENT` Soroban
-data entries and code entries that have expired will be "evicted" from the Live BucketList and moved to the Hot Archive
-BucketList. Both the Live BucketList and Hot Archive BucketList are persisted on disk and written to History Archives.
-
-Note that this proposal is a subset of [CAP-0057](cap-0057.md). The Hot Archive here is the same as CAP-0057, and this CAP
-is a strict subset of CAP-0057. The purpose is not to replace CAP-0057, but to offer a smaller first step implementation wise.
+Currently both live and archived Soroban state are maintained in a single
+BucketList DB. This makes data prioritization difficult since all state exists
+in a singular data structure. This proposal separates archived state and live
+state into two separate DBs. While these DBs are still both persisted on disk
+by validators, optimizing live state access is significantly easier. In
+particular, live state can be entirely cached in memory, removing disk reads
+from the Soroban transaction execution path, greatly increasing read limits and
+throughput.
+
+Each validator will maintain two BucketLists, the Live BucketList and the Hot
+Archive BucketList. `PERSISTENT` Soroban data entries and code entries that
+have expired will be "evicted" from the Live BucketList and moved to the Hot
+Archive BucketList. Both the Live BucketList and Hot Archive BucketList are
+persisted on disk and written to History Archives.
+
+Note that this proposal is a subset of [CAP-0057](cap-0057.md). The Hot Archive
+here is the same as CAP-0057, and this CAP is a strict subset of CAP-0057. The
+purpose is not to replace CAP-0057, but to offer a smaller first step
+implementation wise.
## Specification
@@ -112,43 +123,56 @@ case LEDGER_ENTRY_RESTORE:
### Semantics
-With this change, the current BucketList is divided into two separate BucketLists as follows.
+With this change, the current BucketList is divided into two separate
+BucketLists as follows.
#### Live State BucketList
-The Live State BucketList most closely resembles the current BucketList. It will contain all “live” state of the ledger, including
-Stellar classic entries, live Soroban entries, network config settings, etc.
-The Live State BucketList is published to the History Archive on every checkpoint ledger via the "history" category. The Live BucketList
-functions identically to the current BucketList.
+The Live State BucketList most closely resembles the current BucketList. It
+will contain all “live” state of the ledger, including Stellar classic entries,
+live Soroban entries, network config settings, etc. The Live State BucketList
+is published to the History Archive on every checkpoint ledger via the
+"history" category. The Live BucketList functions identically to the current
+BucketList.
#### Hot Archive BucketList
-The Hot Archive is a BucketList that stores evicted `PERSISTENT` entries.
-It contains `HotArchiveBucketEntry` type entries and is constructed as follows:
+The Hot Archive is a BucketList that stores evicted `PERSISTENT` entries. It
+contains `HotArchiveBucketEntry` type entries and is constructed as follows:
-1. Whenever a `PERSISTENT` entry is evicted, the entry is deleted from the Live State BucketList and added to the Hot Archive as a `HOT_ARCHIVE_ARCHIVED`
-entry. The corresponding `TTLEntry` is deleted and not stored in the Hot Archive.
-2. If an archived entry is restored and the entry currently exists in the Hot Archive, the `HOT_ARCHIVE_ARCHIVED` node previously stored in the Hot
-Archive is overwritten by a `HOT_ARCHIVE_LIVE` entry. Similar to `DEADENTRY` in the Live BucketList, `HOT_ARCHIVE_LIVE` are dropped when the bottom
-most Bucket merges.
+1. Whenever a `PERSISTENT` entry is evicted, the entry is deleted from the Live
+ State BucketList and added to the Hot Archive as a `HOT_ARCHIVE_ARCHIVED`
+ entry. The corresponding `TTLEntry` is deleted and not stored in the Hot
+ Archive.
+2. If an archived entry is restored and the entry currently exists in the Hot
+ Archive, the `HOT_ARCHIVE_ARCHIVED` node previously stored in the Hot
+ Archive is overwritten by a `HOT_ARCHIVE_LIVE` entry. Similar to `DEADENTRY`
+ in the Live BucketList, `HOT_ARCHIVE_LIVE` are dropped when the bottom most
+ Bucket merges.
-For Bucket merges, the newest version of a given key is always taken. At the bottom level, `HOT_ARCHIVE_LIVE` entries are dropped.
-The `HOT_ARCHIVE_LIVE` state indicates that the given key currently exists in the Live BucketList. Thus, any Hot Archive reference
-is out of date and can be dropped.
+For Bucket merges, the newest version of a given key is always taken. At the
+bottom level, `HOT_ARCHIVE_LIVE` entries are dropped. The `HOT_ARCHIVE_LIVE`
+state indicates that the given key currently exists in the Live BucketList.
+Thus, any Hot Archive reference is out of date and can be dropped.
-The current Hot Archive is published to the History Archive via the "history" category on every checkpoint ledger.
+The current Hot Archive is published to the History Archive via the "history"
+category on every checkpoint ledger.
##### Hot Archive BucketList Depth and Spill Schedule
-The Hot Archive is guaranteed to experience less churn than the Live BucketList, given that the only events
-than can modify the Hot Archive BucketList are eviction and restoration. Thus, the spill schedule and depth
-of the Hot Archive BucketList should not match that of the Live BucketList. Long term, it is expected that
-the Hot Archive BucketList become significantly bigger than the Live BucketList (assuming State Archival
-for classic entry types and increased Soroban adoption).
+The Hot Archive is guaranteed to experience less churn than the Live
+BucketList, given that the only events than can modify the Hot Archive
+BucketList are eviction and restoration. Thus, the spill schedule and depth of
+the Hot Archive BucketList should not match that of the Live BucketList. Long
+term, it is expected that the Hot Archive BucketList become significantly
+bigger than the Live BucketList (assuming State Archival for classic entry
+types and increased Soroban adoption).
-Specific configurations will need to be investigated, but a slower spill schedule is most likely optimal. A
-deeper BucketList (to accommodate the expected size difference between Live and Hot Archive) may not be
-necessary, as the spill schedule impacts the maximum "capacity" of a BucketList along with the depth.
+Specific configurations will need to be investigated, but a slower spill
+schedule is most likely optimal. A deeper BucketList (to accommodate the
+expected size difference between Live and Hot Archive) may not be necessary, as
+the spill schedule impacts the maximum "capacity" of a BucketList along with
+the depth.
#### Changes to History Archives
@@ -163,11 +187,13 @@ currentBuckets: an array containing an encoding of the live state bucket list fo
hotArchiveBuckets: an array containing an encoding of the hot archive bucket list for this ledger
```
-Bucket files for the `hotArchiveBuckets` will be stored just as Bucket files are currently stored.
+Bucket files for the `hotArchiveBuckets` will be stored just as Bucket files
+are currently stored.
#### Changes to LedgerHeader
-While the XDR structure of `LedgerHeader` remains unchanged, `bucketListHash` will be changed to the following:
+While the XDR structure of `LedgerHeader` remains unchanged, `bucketListHash`
+will be changed to the following:
```
header.bucketListHash = SHA256(liveStateBucketListHash, hotArchiveHash)
@@ -175,27 +201,30 @@ header.bucketListHash = SHA256(liveStateBucketListHash, hotArchiveHash)
#### Meta
-Whenever a `PERSISTENT` entry is evicted (i.e. removed from the Live State BucketList and added to the Hot Archive),
-the entry key and its associated TTL key will be emitted via `evictedLedgerKeys`
-(this field has been renamed and was previously named `evictedTemporaryLedgerKeys`).
-
-Whenever an entry is restored via `RestoreFootprintOp`, the `LedgerEntry` being restored and its
-associated TTL will be emitted as a `LedgerEntryChange` of type `LEDGER_ENTRY_RESTORE`. Note that
-entries can be restored from both the Live State and Hot Archive BucketList, because archived entries
-continue to exist in Live State until they are evicted as part of natural BucketList growth. The emitted
-meta does not distinguish between these.
+Whenever a `PERSISTENT` entry is evicted (i.e. removed from the Live State
+BucketList and added to the Hot Archive), the entry key and its associated TTL
+key will be emitted via `evictedLedgerKeys` (this field has been renamed and
+was previously named `evictedTemporaryLedgerKeys`).
+Whenever an entry is restored via `RestoreFootprintOp`, the `LedgerEntry` being
+restored and its associated TTL will be emitted as a `LedgerEntryChange` of
+type `LEDGER_ENTRY_RESTORE`. Note that entries can be restored from both the
+Live State and Hot Archive BucketList, because archived entries continue to
+exist in Live State until they are evicted as part of natural BucketList
+growth. The emitted meta does not distinguish between these.
#### Default Values for Network Configs
-`sorobanLiveStateTargetSizeBytes` (formerly `bucketListTargetSizeBytes`) will need to be changed
-to reflect the new size calculations. Additionally, the "fee curve" may need to be revisted
-now that classic state growth no longer affects Soroban fees.
+`sorobanLiveStateTargetSizeBytes` (formerly `bucketListTargetSizeBytes`) will
+need to be changed to reflect the new size calculations. Additionally, the "fee
+curve" may need to be revisted now that classic state growth no longer affects
+Soroban fees.
-`contractMaxSizeBytes` will need to be increased to account for the new size calculations.
+`contractMaxSizeBytes` will need to be increased to account for the new size
+calculations.
-These changes will be included in the protocol 23 upgrade and will not be subject to a separate
-Soroban settings upgrade.
+These changes will be included in the protocol 23 upgrade and will not be
+subject to a separate Soroban settings upgrade.
TODO: Initial values for network configs.
@@ -203,98 +232,129 @@ TODO: Initial values for network configs.
### CAP-0062 vs. CAP-0057
-This CAP is a strict subset of [CAP-0057](cap-0057.md). The intention is not to replace CAP-0057, but to implement this CAP
-first as a step towards CAP-0057. At current usage levels, "full state archival" as specified by CAP-0057 (i.e. evicted entries are deleted
-from validators) is not a necessary or useful optimization. The space savings of validators and the History Archive would be
-minimal, and the additional complexity, both from a computational and UX standpoint, do not justify the minimal storage savings.
-Long term, should usage increase, the tradeoffs introduced by CAP-0057 will make more sense, but that is not the case currently
-or in the near term future.
-
-The reasoning behind introducing CAP-0057 before it was strictly necessary was to prevent breakage of applications. If applications
-and users were not prepared for State Archival, it would be very difficult to enable CAP-0057. However, because
-[CAP-0046-12](./cap-0046-12.md) is implemented and State Archival semantics are already required, upgrading to CAP-0057 should not
-be breaking, especially since much of the complexity, like proof generation, is handled automatically by RPC endpoints
-(via RPC's captive-core backend).
-
-While there are no immediate benefits to the "full" state archival introduced in CAP-0057, there are significant short term and long
-term gains from "partial state archival" introduced in this CAP. Specifically, this CAP will allow for full in-memory Soroban live state
-caching and automatic restoration of Soroban entries via `InvokeHostFunctionOp`, to be detailed in a future CAP.
+This CAP is a strict subset of [CAP-0057](cap-0057.md). The intention is not to
+replace CAP-0057, but to implement this CAP first as a step towards CAP-0057.
+At current usage levels, "full state archival" as specified by CAP-0057 (i.e.
+evicted entries are deleted from validators) is not a necessary or useful
+optimization. The space savings of validators and the History Archive would be
+minimal, and the additional complexity, both from a computational and UX
+standpoint, do not justify the minimal storage savings. Long term, should usage
+increase, the tradeoffs introduced by CAP-0057 will make more sense, but that
+is not the case currently or in the near term future.
+
+The reasoning behind introducing CAP-0057 before it was strictly necessary was
+to prevent breakage of applications. If applications and users were not
+prepared for State Archival, it would be very difficult to enable CAP-0057.
+However, because [CAP-0046-12](./cap-0046-12.md) is implemented and State
+Archival semantics are already required, upgrading to CAP-0057 should not be
+breaking, especially since much of the complexity, like proof generation, is
+handled automatically by RPC endpoints (via RPC's captive-core backend).
+
+While there are no immediate benefits to the "full" state archival introduced
+in CAP-0057, there are significant short term and long term gains from "partial
+state archival" introduced in this CAP. Specifically, this CAP will allow for
+full in-memory Soroban live state caching and automatic restoration of Soroban
+entries via `InvokeHostFunctionOp`, to be detailed in a future CAP.
### Fees and Limits Changes
#### Not counting Archive State
-Because ledger state is now split into two BucketLists, the BucketList size component for storage fees must be
-reconsidered. Currently, the total size of the BucketList is used for Soroban related fees. This is frustrating
-to end users, as classic state dominates the BucketList size such that changes unrelated to Soroban have the most
-impact towards Soroban related fees. The purpose of the fee is to give back pressure to new writes when the BucketList
-is large. This should discourage writes (or financially prevent DOS attacks) and give time for State Archival to evict
-state, reduce the size of the BucketList, thus reducing fees. The issue is, classic entries dominate storage and are
-not subject to State Archival, so this back pressure system is not effective. In practice, should BucketList size
-rapidly increase, it is most likely due to classic state, and the only way to lower fees is via network config upgrade.
-
-This CAP changes the BucketList size used in fee calculations to Live Soroban State Size. This means that
-archived state (i.e. the Hot Archive size) does not impact fees. Long term, it is expected that CAP-0057 will
-be implemented and the Hot Archive will be dropped from validators. Thus from a network health standpoint, the size of
-the Hot Archive does not have a significant impact on network sustainability.
-
-Prior to CAP-0057, large Hot Archives do impact network sustainability, specifically with respect to History
-Archive size, catchup time, and node disk requirements. However, a fee based on Hot Archive size does not
-seem to address this issue. First, there is currently no way to remove state from the Hot Archive. Only
-restore operations remove Hot Archive state, but this state is just added back to the Live BucketList. Thus
-fees would continually increase without any benefit to network sustainability, as no action or system is
+Because ledger state is now split into two BucketLists, the BucketList size
+component for storage fees must be reconsidered. Currently, the total size of
+the BucketList is used for Soroban related fees. This is frustrating to end
+users, as classic state dominates the BucketList size such that changes
+unrelated to Soroban have the most impact towards Soroban related fees. The
+purpose of the fee is to give back pressure to new writes when the BucketList
+is large. This should discourage writes (or financially prevent DOS attacks)
+and give time for State Archival to evict state, reduce the size of the
+BucketList, thus reducing fees. The issue is, classic entries dominate storage
+and are not subject to State Archival, so this back pressure system is not
+effective. In practice, should BucketList size rapidly increase, it is most
+likely due to classic state, and the only way to lower fees is via network
+config upgrade.
+
+This CAP changes the BucketList size used in fee calculations to Live Soroban
+State Size. This means that archived state (i.e. the Hot Archive size) does not
+impact fees. Long term, it is expected that CAP-0057 will be implemented and
+the Hot Archive will be dropped from validators. Thus from a network health
+standpoint, the size of the Hot Archive does not have a significant impact on
+network sustainability.
+
+Prior to CAP-0057, large Hot Archives do impact network sustainability,
+specifically with respect to History Archive size, catchup time, and node disk
+requirements. However, a fee based on Hot Archive size does not seem to address
+this issue. First, there is currently no way to remove state from the Hot
+Archive. Only restore operations remove Hot Archive state, but this state is
+just added back to the Live BucketList. Thus fees would continually increase
+without any benefit to network sustainability, as no action or system is
currently in place to reduce Hot Archive size.
-While not improving network sustainability, Hot Archive fees could prevent state based DOS attacks, where
-state is rapidly added to the network. However, unlike the live BucketList, operations cannot directly
-write state to the Hot Archive (with the exception of restores, which always decrease Hot Archive size).
-The rate of eviction (i.e. the rate at which state is added to the Hot Archive) can already be controlled
-via network config settings. The rate at which state is added to the Live BucketList is also already
-controlled by network config settings. Thus, enough tools are already in place to prevent state based DOS
-attacks without the addition of a Hot Archive based fee.
+While not improving network sustainability, Hot Archive fees could prevent
+state based DOS attacks, where state is rapidly added to the network. However,
+unlike the live BucketList, operations cannot directly write state to the Hot
+Archive (with the exception of restores, which always decrease Hot Archive
+size). The rate of eviction (i.e. the rate at which state is added to the Hot
+Archive) can already be controlled via network config settings. The rate at
+which state is added to the Live BucketList is also already controlled by
+network config settings. Thus, enough tools are already in place to prevent
+state based DOS attacks without the addition of a Hot Archive based fee.
#### Changes required for `CONTRACT_WASM`
-[CAP-0065](cap-0065.md) and [CAP-0066](cap-0066.md) build on this work to cache all live Soroban state in memory,
-including `CONTRACT_DATA`, `TTL`, and instantiated contract code. These caches open potential DOS vectors with the
+[CAP-0065](cap-0065.md) and [CAP-0066](cap-0066.md) build on this work to cache
+all live Soroban state in memory, including `CONTRACT_DATA`, `TTL`, and
+instantiated contract code. These caches open potential DOS vectors with the
way `CONTRACT_CODE` size is metered in protocol 22.
-For `CONTRACT_DATA` and `TTL`, we cache all live `LedgerEntry`. This does not present a DOS angle. We meter these
-entry types based on `LedgerEntry` size such that we have a 1 to 1 ratio between the bytes that are metered and
-the bytes that are cached in memory.
-
-For `CONTRACT_CODE`, this is no longer the case. [CAP-0065](cap-0065.md) caches instantiated modules in memory,
-which in the worst case could be up to 40x the size of the `CONTRACT_CODE` LedgerEntry size. This is a significant
-OOM risk, as `sorobanLiveStateTargetSizeBytes` must bound the amount of state validators are required to cache in
-memory. If `CONTRACT_CODE` xdr size is used, an attacker could upload code that once instantiated could bloat the
-cache size to `sorobanLiveStateTargetSizeBytes * 40`, causing an OOM based DOS of the network. To prevent this,
-we use the instantiated module memory cost model size instead of the `CONTRACT_CODE` LedgerEntry size.
-
-The only down side is that uploading, restoring, and rent bumping `CONTRACT_CODE` will be more expensive from both a
-fee and limits standpoint. The fee increase is not a significant concern. However, when uploading/restoring a
-new `CONTRACT_CODE` entry with the new size calculation, writeBytes will be significantly higher than what is actually
-being written to disk, as we charge for the (much larger) in-memory size instead of the actual on-disk size. This means
-that we might need to increase `txMaxWriteBytes` to support uploading larger/more complex contracts despite these write
-sources not actually being utilized.
-
-In the short term, this does not seem to be an issue, as we can set max contract sizes modestly and have significant
-buffer wrt write bytes limits. However, this may become an issue in the future. A future protocol may explore splitting
-limits and fees on write bytes for `CONTRACT_CODE`. We could charge writeBytes fees based on instantiated size to prevent DOS,
-but use the actual on-disk size for `txMaxWriteBytes` and `ledgerMaxWriteBytes` limits to avoid increasing limits artificially
-due to under utilization. However, this does not appear necessary for protocol 23.
+For `CONTRACT_DATA` and `TTL`, we cache all live `LedgerEntry`. This does not
+present a DOS angle. We meter these entry types based on `LedgerEntry` size
+such that we have a 1 to 1 ratio between the bytes that are metered and the
+bytes that are cached in memory.
+
+For `CONTRACT_CODE`, this is no longer the case. [CAP-0065](cap-0065.md) caches
+instantiated modules in memory, which in the worst case could be up to 40x the
+size of the `CONTRACT_CODE` LedgerEntry size. This is a significant OOM risk,
+as `sorobanLiveStateTargetSizeBytes` must bound the amount of state validators
+are required to cache in memory. If `CONTRACT_CODE` xdr size is used, an
+attacker could upload code that once instantiated could bloat the cache size to
+`sorobanLiveStateTargetSizeBytes * 40`, causing an OOM based DOS of the
+network. To prevent this, we use the instantiated module memory cost model size
+instead of the `CONTRACT_CODE` LedgerEntry size.
+
+The only down side is that uploading, restoring, and rent bumping
+`CONTRACT_CODE` will be more expensive from both a fee and limits standpoint.
+The fee increase is not a significant concern. However, when
+uploading/restoring a new `CONTRACT_CODE` entry with the new size calculation,
+writeBytes will be significantly higher than what is actually being written to
+disk, as we charge for the (much larger) in-memory size instead of the actual
+on-disk size. This means that we might need to increase `txMaxWriteBytes` to
+support uploading larger/more complex contracts despite these write sources not
+actually being utilized.
+
+In the short term, this does not seem to be an issue, as we can set max
+contract sizes modestly and have significant buffer wrt write bytes limits.
+However, this may become an issue in the future. A future protocol may explore
+splitting limits and fees on write bytes for `CONTRACT_CODE`. We could charge
+writeBytes fees based on instantiated size to prevent DOS, but use the actual
+on-disk size for `txMaxWriteBytes` and `ledgerMaxWriteBytes` limits to avoid
+increasing limits artificially due to under utilization. However, this does not
+appear necessary for protocol 23.
### Expectations for Downstream Systems
-Meta changed outlined in [CAP-0066](cap-0066.md). Otherwise, there will be minimal changes with some XDR fields
-being renamed.
+Meta changed outlined in [CAP-0066](cap-0066.md). Otherwise, there will be
+minimal changes with some XDR fields being renamed.
## Security Concerns
-This introduces no novel security concerns. All state is still maintained by validators and written to History
-Archives. Splitting state into separate DBs has no inherit risks.
+This introduces no novel security concerns. All state is still maintained by
+validators and written to History Archives. Splitting state into separate DBs
+has no inherit risks.
-[CAP-0065](cap-0065.md) and [CAP-0066](cap-0066.md) introduce potential DOS vectors wrt `sorobanLiveStateTargetSizeBytes`
-addressed above in the fees section.
+[CAP-0065](cap-0065.md) and [CAP-0066](cap-0066.md) introduce potential DOS
+vectors wrt `sorobanLiveStateTargetSizeBytes` addressed above in the fees
+section.
## Test Cases
diff --git a/core/cap-0063.md b/core/cap-0063.md
index 5769d62dd..56cec32a1 100644
--- a/core/cap-0063.md
+++ b/core/cap-0063.md
@@ -15,7 +15,9 @@ Protocol version: 23
## Simple Summary
-This CAP defines a new structure for transaction sets that allows for applying Smart Contract transactions with multiple threads while maintaining bounded application time.
+This CAP defines a new structure for transaction sets that allows for applying
+Smart Contract transactions with multiple threads while maintaining bounded
+application time.
## Working Group
@@ -23,37 +25,75 @@ As specified in the Preamble.
## Motivation
-Every node in the Stellar network has to apply the transactions to the current ledger state in order to produce the next block. Currently, transaction application happens in a single-threaded fashion, which results in CPU under-utilization on most modern systems. Utilizing the idle cores for applying the transactions provides a straightforward way of increasing the number of transactions that may be included into a single ledger block without compromising the time for closing that ledger.
-
-Smart Contract transactions on Stellar were designed with parallelism support in mind (see [CAP-46](cap-0046-05.md#static-footprints)). Specifically, every transaction has to declare the entries it may read and modify. This in theory allows Core nodes to pick a set of Smart Contract transactions and come up with a parallel execution schedule that will produce exactly the same result 'as if' the transaction set has been applied sequentially. However, there is currently nothing in the protocol that prevents validators from nominating 'parallel-unfriendly' transaction sets that can't be parallelized at all (e.g. when all transactions want to modify the same key). Thus, the time necessary for applying any given transaction set may vary wildly between `1*x` and `T*x` where T is the number of threads a validator may use.
-
-This CAP aims to solve this issue and allow for parallelization that has a guaranteed upper bound for the execution time of transaction sets (given at least the minimum specified number of physical threads supported).
+Every node in the Stellar network has to apply the transactions to the current
+ledger state in order to produce the next block. Currently, transaction
+application happens in a single-threaded fashion, which results in CPU
+under-utilization on most modern systems. Utilizing the idle cores for applying
+the transactions provides a straightforward way of increasing the number of
+transactions that may be included into a single ledger block without
+compromising the time for closing that ledger.
+
+Smart Contract transactions on Stellar were designed with parallelism support
+in mind (see [CAP-46](cap-0046-05.md#static-footprints)). Specifically, every
+transaction has to declare the entries it may read and modify. This in theory
+allows Core nodes to pick a set of Smart Contract transactions and come up with
+a parallel execution schedule that will produce exactly the same result 'as if'
+the transaction set has been applied sequentially. However, there is currently
+nothing in the protocol that prevents validators from nominating
+'parallel-unfriendly' transaction sets that can't be parallelized at all (e.g.
+when all transactions want to modify the same key). Thus, the time necessary
+for applying any given transaction set may vary wildly between `1*x` and `T*x`
+where T is the number of threads a validator may use.
+
+This CAP aims to solve this issue and allow for parallelization that has a
+guaranteed upper bound for the execution time of transaction sets (given at
+least the minimum specified number of physical threads supported).
### Goals Alignment
This CAP is aligned with the following Stellar Network Goals:
- - The Stellar Network should run at scale and at low cost to all participants of the network.
+- The Stellar Network should run at scale and at low cost to all participants
+ of the network.
## Abstract
-This CAP introduces a new format for the Smart Contract (Soroban) phase of the transaction set. The new format organizes the transactions in the following way:
-
-- Transactions are split into a sequence of 1 or more 'stages', that have to be applied sequentially.
-- Every stage consists of multiple clusters of transactions, such that every cluster is completely independent of every other cluster within the stage, so every cluster may be executed in parallel.
-- Every cluster consists of potentially dependent transactions, so these transactions have to be generally applied sequentially with respect to each other.
-
-The CAP also adds a new network setting for controlling the maximum number of clusters per stage, which effectively regulates the maximum degree of parallelism supported by the network.
-
-The transaction sets defined by this CAP must not exceed the ledger limits specified by the network configuration, including the new limit on the maximum number of parallel clusters. The validation process allows validators to execute the smart contract transactions using multiple threads while maintaining the upper bound on the total modelled instructions executed by every thread and thus capping the overall execution time.
-
-In order to maintain the read-only TTL update semantics, the CAP also defines the new algorithm for updating the TTLs of the ledger entries, specifically when multiple transactions within the same ledger update the TTL of the same entry.
+This CAP introduces a new format for the Smart Contract (Soroban) phase of the
+transaction set. The new format organizes the transactions in the following
+way:
+
+- Transactions are split into a sequence of 1 or more 'stages', that have to be
+ applied sequentially.
+- Every stage consists of multiple clusters of transactions, such that every
+ cluster is completely independent of every other cluster within the stage, so
+ every cluster may be executed in parallel.
+- Every cluster consists of potentially dependent transactions, so these
+ transactions have to be generally applied sequentially with respect to each
+ other.
+
+The CAP also adds a new network setting for controlling the maximum number of
+clusters per stage, which effectively regulates the maximum degree of
+parallelism supported by the network.
+
+The transaction sets defined by this CAP must not exceed the ledger limits
+specified by the network configuration, including the new limit on the maximum
+number of parallel clusters. The validation process allows validators to
+execute the smart contract transactions using multiple threads while
+maintaining the upper bound on the total modelled instructions executed by
+every thread and thus capping the overall execution time.
+
+In order to maintain the read-only TTL update semantics, the CAP also defines
+the new algorithm for updating the TTLs of the ledger entries, specifically
+when multiple transactions within the same ledger update the TTL of the same
+entry.
## Specification
### XDR Changes
-This patch of XDR changes is based on the XDR files in commit `a41b2db15ea34a9f9da5326b996bb8a7ceb5740f` of stellar-xdr.
+This patch of XDR changes is based on the XDR files in commit
+`a41b2db15ea34a9f9da5326b996bb8a7ceb5740f` of stellar-xdr.
+
```diff mddiffcheck.ignore=true
diff --git a/Stellar-contract-config-setting.x b/Stellar-contract-config-setting.x
index 9f09c7b..a8d3325 100644
@@ -62,7 +102,7 @@ index 9f09c7b..a8d3325 100644
@@ -23,6 +23,16 @@ struct ConfigSettingContractComputeV0
uint32 txMemoryLimit;
};
-
+
+// Settings for running the contract transactions in parallel.
+struct ConfigSettingContractParallelComputeV0
+{
@@ -84,7 +124,7 @@ index 9f09c7b..a8d3325 100644
+ CONFIG_SETTING_EVICTION_ITERATOR = 13,
+ CONFIG_SETTING_CONTRACT_PARALLEL_COMPUTE_V0 = 14
};
-
+
union ConfigSettingEntry switch (ConfigSettingID configSettingID)
@@ -335,5 +346,7 @@ case CONFIG_SETTING_BUCKETLIST_SIZE_WINDOW:
uint64 bucketListSizeWindow<>;
@@ -101,7 +141,7 @@ index 0fc03e2..ff19f17 100644
@@ -164,6 +164,33 @@ enum TxSetComponentType
TXSET_COMP_TXS_MAYBE_DISCOUNTED_FEE = 0
};
-
+
+// A collection of transactions that *may* have arbitrary read-write data
+// dependencies between each other, i.e. in a general case the transaction
+// execution order within a cluster may not be arbitrarily shuffled without
@@ -139,7 +179,7 @@ index 0fc03e2..ff19f17 100644
+case 1:
+ ParallelTxsComponent parallelTxsComponent;
};
-
+
// Transaction sets are the unit used by SCP to decide on transitions
@@ -458,11 +487,25 @@ case 3:
// This struct groups together changes on a per transaction basis
@@ -166,7 +206,7 @@ index 0fc03e2..ff19f17 100644
+
+ LedgerEntryChanges postTxApplyFeeProcessing;
};
-
+
// this represents a single upgrade that was performed as part of a ledger
@@ -482,7 +525,7 @@ struct LedgerCloseMetaV0
// NB: transactions are sorted in apply order here
@@ -174,7 +214,7 @@ index 0fc03e2..ff19f17 100644
// followed by applying transactions
- TransactionResultMeta txProcessing<>;
+ TransactionResultMetaV0 txProcessing<>;
-
+
// upgrades are applied last
UpgradeEntryMeta upgradesProcessing<>;
@@ -516,7 +559,40 @@ struct LedgerCloseMetaV1
@@ -216,7 +256,7 @@ index 0fc03e2..ff19f17 100644
+ // fees for all transactions are processed first
+ // followed by applying transactions
+ TransactionResultMetaV1 txProcessing<>;
-
+
// upgrades are applied last
UpgradeEntryMeta upgradesProcessing<>;
@@ -542,5 +618,7 @@ case 0:
@@ -233,55 +273,108 @@ index 0fc03e2..ff19f17 100644
#### `ledgerMaxDependentTxClusters` network configuration setting
-A new network configuration setting is introduced in order to regulate the maximum potential degree of parallelism supported by the network.
+A new network configuration setting is introduced in order to regulate the
+maximum potential degree of parallelism supported by the network.
-The setting is `ledgerMaxDependentTxClusters` and it's introduced in a new `ConfigSettingContractParallelComputeV0` setting struct with `CONFIG_SETTING_CONTRACT_PARALLEL_COMPUTE_V0` `ConfigSettingID`. During the protocol upgrade to version 23, the respective configuration setting ledger entry will be created and `ledgerMaxDependentTxClusters` will be set to 1 (no parallelism).
+The setting is `ledgerMaxDependentTxClusters` and it's introduced in a new
+`ConfigSettingContractParallelComputeV0` setting struct with
+`CONFIG_SETTING_CONTRACT_PARALLEL_COMPUTE_V0` `ConfigSettingID`. During the
+protocol upgrade to version 23, the respective configuration setting ledger
+entry will be created and `ledgerMaxDependentTxClusters` will be set to 1 (no
+parallelism).
-The setting is used for the validation of transaction sets and its exact semantics is described in the following sections.
+The setting is used for the validation of transaction sets and its exact
+semantics is described in the following sections.
#### Soroban phase structure
-Soroban phase (phase 1 of transaction) has to contain a single component with version 1 (`ParallelTxsComponent`). The phase still has to contain only Soroban transactions and only a single Soroban phase is allowed, there is no protocol change around that.
-
-`parallelTxsComponent` contains an optional `baseFee` that represents the discounted base fee that must be used as a base fee for every transaction. If set, `baseFee` has to be not greater than a base fee of any transaction in the component.
+Soroban phase (phase 1 of transaction) has to contain a single component with
+version 1 (`ParallelTxsComponent`). The phase still has to contain only Soroban
+transactions and only a single Soroban phase is allowed, there is no protocol
+change around that.
-`parallelTxsComponent` must consist of zero or more `executionStages`. Every stage in `executionStages` must contain at least one non-empty `DependentTxCluster`. Thus, if a transaction set contains no Soroban transactions, then it must contain a `parallelTxsComponent` with 0 `executionStages`.
+`parallelTxsComponent` contains an optional `baseFee` that represents the
+discounted base fee that must be used as a base fee for every transaction. If
+set, `baseFee` has to be not greater than a base fee of any transaction in the
+component.
+`parallelTxsComponent` must consist of zero or more `executionStages`. Every
+stage in `executionStages` must contain at least one non-empty
+`DependentTxCluster`. Thus, if a transaction set contains no Soroban
+transactions, then it must contain a `parallelTxsComponent` with 0
+`executionStages`.
#### Effective fee computation
-The effective fee computation works in the same fashion as for `TxSetComponent` component (described in [CAP-0042](./cap-0042.md#effective-fee-computation)).
+The effective fee computation works in the same fashion as for `TxSetComponent`
+component (described in [CAP-0042](./cap-0042.md#effective-fee-computation)).
-The effective fee for a given transaction in `ParallelTxsComponent` is computed in the following way:
+The effective fee for a given transaction in `ParallelTxsComponent` is computed
+in the following way:
- * If `baseFee` is empty, then transaction's effective fee is its fee bid.
- * If `baseFee` is set, then transaction's effective fee is set to the value of `baseFee`.
+- If `baseFee` is empty, then transaction's effective fee is its fee bid.
+- If `baseFee` is set, then transaction's effective fee is set to the value of
+ `baseFee`.
#### Soroban phase validation
-As per already existing specification, every phase in a transaction set has to be valid in order for it to get applied to the ledger. Beyond the basic XDR representation format described above, the full validation specification for the parallel Soroban phase is defined as follows:
-
-- When set, `baseFee` must not be higher than the base fee of any transaction in the phase
-- Every `DependentTxCluster` has to have transactions sorted by their SHA-256 hashes in increasing order
-- Every stage in `executionStages` must have its clusters sorted by SHA-256 hash of the first transaction in the cluster (recall that empty clusters are not allowed)
-- Stages must be sorted by SHA-256 hash of the first transaction in the first cluster of the stage (recall that empty stages are not allowed)
-- The number of clusters per stage must not exceed the value of `ledgerMaxDependentTxClusters` network configuration setting
-- Within a stage, footprint conflicts between the dependent transaction clusters are not allowed. The footprint conflict between two transactions is defined as follows: if a transaction A has a ledger key in its read-write footprint, and another transaction B has the same ledger key in its footprint (either read-only, or read-write), then they're conflicting. A pair of clusters is considered to have a footprint conflict in case if any pair of transactions A from the first cluster and B from the second cluster have a conflict.
-- The sum of 'sequential' instructions for the phase must not exceed the value of `ledgerMaxInstructions` network configuration setting. 'Sequential' instructions are defined as follows:
- - 'Sequential' instructions per cluster are defined as the sum of `sorobanData.resources.instructions` across all the transactions in the cluster
- - 'Sequential' instructions per stage are defined as the max of 'sequential' instructions across its clusters
- - 'Sequential' instructions for the phase are defined as the sum of 'sequential' instructions across all the stages in the phase
-
+As per already existing specification, every phase in a transaction set has to
+be valid in order for it to get applied to the ledger. Beyond the basic XDR
+representation format described above, the full validation specification for
+the parallel Soroban phase is defined as follows:
+
+- When set, `baseFee` must not be higher than the base fee of any transaction
+ in the phase
+- Every `DependentTxCluster` has to have transactions sorted by their SHA-256
+ hashes in increasing order
+- Every stage in `executionStages` must have its clusters sorted by SHA-256
+ hash of the first transaction in the cluster (recall that empty clusters are
+ not allowed)
+- Stages must be sorted by SHA-256 hash of the first transaction in the first
+ cluster of the stage (recall that empty stages are not allowed)
+- The number of clusters per stage must not exceed the value of
+ `ledgerMaxDependentTxClusters` network configuration setting
+- Within a stage, footprint conflicts between the dependent transaction
+ clusters are not allowed. The footprint conflict between two transactions is
+ defined as follows: if a transaction A has a ledger key in its read-write
+ footprint, and another transaction B has the same ledger key in its footprint
+ (either read-only, or read-write), then they're conflicting. A pair of
+ clusters is considered to have a footprint conflict in case if any pair of
+ transactions A from the first cluster and B from the second cluster have a
+ conflict.
+- The sum of 'sequential' instructions for the phase must not exceed the value
+ of `ledgerMaxInstructions` network configuration setting. 'Sequential'
+ instructions are defined as follows:
+ - 'Sequential' instructions per cluster are defined as the sum of
+ `sorobanData.resources.instructions` across all the transactions in the
+ cluster
+ - 'Sequential' instructions per stage are defined as the max of 'sequential'
+ instructions across its clusters
+ - 'Sequential' instructions for the phase are defined as the sum of
+ 'sequential' instructions across all the stages in the phase
#### Soroban phase application
-Even though some of the transactions in the new phase are data-independent by design, there is still a protocol defined order of application of the transactions. That is, if transactions are applied, the results must be 'as if' they have been applied in that order. The application order is defined as follows:
+Even though some of the transactions in the new phase are data-independent by
+design, there is still a protocol defined order of application of the
+transactions. That is, if transactions are applied, the results must be 'as if'
+they have been applied in that order. The application order is defined as
+follows:
+
+- _unchanged_ We define the transaction 'application comparison key' as
+ `sha256(transaction_envelope_xdr) ^ sha256(transaction_set_xdr)`, where `^`
+ is bit-XOR operation. Note, that this is the transaction application order
+ key used by the current protocol as well.
+- First, transactions in every cluster are sorted for apply using the
+ comparison key
+- Then clusters in every stage are sorted using the comparison key of the first
+ transaction in the sorted cluster
+- Then all the stages in phase are sorted using the comparison key of the first
+ transaction of the first sorted cluster in the sorted stage
+- The application order is defined as the order of transactions visited during
+ iterating the stages in the sort order defined above, i.e. it can be defined
+ by the following pseudo-code:
-- _unchanged_ We define the transaction 'application comparison key' as `sha256(transaction_envelope_xdr) ^ sha256(transaction_set_xdr)`, where `^` is bit-XOR operation. Note, that this is the transaction application order key used by the current protocol as well.
-- First, transactions in every cluster are sorted for apply using the comparison key
-- Then clusters in every stage are sorted using the comparison key of the first transaction in the sorted cluster
-- Then all the stages in phase are sorted using the comparison key of the first transaction of the first sorted cluster in the sorted stage
-- The application order is defined as the order of transactions visited during iterating the stages in the sort order defined above, i.e. it can be defined by the following pseudo-code:
```
for each stage in sorted_stages:
for each sorted_cluster in stage:
@@ -289,147 +382,282 @@ for each stage in sorted_stages:
application_order.append(transaction)
```
-During the application the fees must be withdrawn and sequence numbers must be bumped sequentially, in application order defined above and before performing any operations inside the transactions. This is consistent with the current protocol behavior.
+During the application the fees must be withdrawn and sequence numbers must be
+bumped sequentially, in application order defined above and before performing
+any operations inside the transactions. This is consistent with the current
+protocol behavior.
-Then all the operations must be applied and the changes in *non-TTL* ledger entries should be 'as if' the operations from every transaction have been applied sequentially. The semantics for processing the TTL changes and computing the rent fees are changed with this CAP. The changes are described in the following section.
+Then all the operations must be applied and the changes in _non-TTL_ ledger
+entries should be 'as if' the operations from every transaction have been
+applied sequentially. The semantics for processing the TTL changes and
+computing the rent fees are changed with this CAP. The changes are described in
+the following section.
#### TTL update semantics
-Currently the changes to the entry TTL are immediately applied to the ledger state, in the same way as changes to any other ledger entry. However, the protocol allows increasing the TTL even for the entries that have been declared in the footprint as read-only. Thus if we wanted to preserve the current behavior, all the read-only footprint entries would potentially be read-write, and thus only transactions with non-overlapping footprints could belong to different clusters. This is not optimal for parallelization, given that common entries such as Stellar Asset Contract instances and some popular Wasms can be referenced by multiple transactions, but they're always or almost only read-only.
-
-Thus, this CAP proposes a new way for applying the TTLs changes that makes changes to read-only entries' TTL changes commutative. The proposed TTL update algorithm ensures that the TTL changes are only observable in the following cases:
-- When the corresponding entry is being modified in the transaction (i.e. is a part of the read-write footprint)
+Currently the changes to the entry TTL are immediately applied to the ledger
+state, in the same way as changes to any other ledger entry. However, the
+protocol allows increasing the TTL even for the entries that have been declared
+in the footprint as read-only. Thus if we wanted to preserve the current
+behavior, all the read-only footprint entries would potentially be read-write,
+and thus only transactions with non-overlapping footprints could belong to
+different clusters. This is not optimal for parallelization, given that common
+entries such as Stellar Asset Contract instances and some popular Wasms can be
+referenced by multiple transactions, but they're always or almost only
+read-only.
+
+Thus, this CAP proposes a new way for applying the TTLs changes that makes
+changes to read-only entries' TTL changes commutative. The proposed TTL update
+algorithm ensures that the TTL changes are only observable in the following
+cases:
+
+- When the corresponding entry is being modified in the transaction (i.e. is a
+ part of the read-write footprint)
- After executing all the transactions
The detailed new algorithm is as follows:
-- After fees have been withdrawn, but before applying any operations:
- - Snapshot the initial `liveUntilLedger` of every entry within the stage's footprint and store it in 'current TTL map' keyed by the TTL entry keys
- - Also prepare an empty 'read-only update map' that contains the results values of `liveUntilLedgers`.
+- After fees have been withdrawn, but before applying any operations:
+ - Snapshot the initial `liveUntilLedger` of every entry within the stage's
+ footprint and store it in 'current TTL map' keyed by the TTL entry keys
+ - Also prepare an empty 'read-only update map' that contains the results
+ values of `liveUntilLedgers`.
- When applying a Soroban operation:
- - Observe the relevant TTL changes thus far. For every key in the read-write footprint:
- - If 'read-only update map' contains the key, set the value in 'current TTL map' to the value from 'read-only update map'
- - Note, that this can't decrease the current TTL of the entry
- - Remove the key from 'read-only update map'
+ - Observe the relevant TTL changes thus far. For every key in the read-write
+ footprint:
+ - If 'read-only update map' contains the key, set the value in 'current TTL
+ map' to the value from 'read-only update map'
+ - Note, that this can't decrease the current TTL of the entry
+ - Remove the key from 'read-only update map'
- Before applying the actual operation logic:
- - When reading the initial state of the entries, use 'current TTL map' to determine the `liveUntilLedger` of every entry in the footprint
- - If an operation succeeds and ledger changes caused by the operation must be materialized:
- - For every key in the read-only footprint that had its `liveUntilLedger` updated:
- - Set the the value corresponding to the key in 'read-only update map' to `max(updated_liveUntilLedger, existing_value_liveUntilLedger)` (if key is missing from the map, just use `updated_liveUntilLedger`)
+ - When reading the initial state of the entries, use 'current TTL map' to
+ determine the `liveUntilLedger` of every entry in the footprint
+ - If an operation succeeds and ledger changes caused by the operation must be
+ materialized:
+ - For every key in the read-only footprint that had its `liveUntilLedger`
+ updated:
+ - Set the the value corresponding to the key in 'read-only update map' to
+ `max(updated_liveUntilLedger, existing_value_liveUntilLedger)` (if key
+ is missing from the map, just use `updated_liveUntilLedger`)
- For every key in the read-write footprint (unconditionally):
- - If the entry still exists, set the value corresponding to the key in 'current TTL map' to the new `liveUntilLedger` of the entry
- - If the entry was deleted, also removed the key from 'current TTL map'
- - _unchanged_ The effective rent fee for the operation is still computed based on the initial state of the entry at operation start and the end state of the entry.
- - Notice, however, that the initial TTL of the entry itself is defined differently now.
+ - If the entry still exists, set the value corresponding to the key in
+ 'current TTL map' to the new `liveUntilLedger` of the entry
+ - If the entry was deleted, also removed the key from 'current TTL map'
+ - _unchanged_ The effective rent fee for the operation is still computed
+ based on the initial state of the entry at operation start and the end
+ state of the entry.
+ - Notice, however, that the initial TTL of the entry itself is defined
+ differently now.
- After applying all the Soroban operations in the cluster:
- - Iterate all the remaining keys in the 'read-only update map' and update the `liveUntilLedger` for the corresponding entries with the value from the map
+ - Iterate all the remaining keys in the 'read-only update map' and update the
+ `liveUntilLedger` for the corresponding entries with the value from the map
- Note, that this can't decrease the current TTL of the entry
-Notice, that while the algorithm is defined in terms of a single mutable 'current TTL map', in implementation it can actually be separated into 2 parts: a fully immutable part for the entries that are always read-only within a stage, and a mutable part that may only be modified while applying transactions in a single cluster. Thus when applying transactions in parallel, no locking is necessary to update the maps during the operation application. The entries might move between maps only in-between the stages.
+Notice, that while the algorithm is defined in terms of a single mutable
+'current TTL map', in implementation it can actually be separated into 2 parts:
+a fully immutable part for the entries that are always read-only within a
+stage, and a mutable part that may only be modified while applying transactions
+in a single cluster. Thus when applying transactions in parallel, no locking is
+necessary to update the maps during the operation application. The entries
+might move between maps only in-between the stages.
##### TTL ledger change semantics
-Transaction meta for every Soroban transaction will contain a 'correct' TTL change from the perspective of that transaction. That means that the initial entry TTLs are what the transaction has observed before execution, the final TTLs are what the transaction has updated them too, and the rent fee corresponds to the TTL changes. However, if a meta consumer wants to track the TTL of the entry after applying _all_ the transactions in the ledger, they will need to never decrease the TTL of the entries they track. For example:
-
-- Transaction 1 extends the TTL for entry E by 1000 ledgers and sets it `liveUntilLedger` to `currentLedgerSeq + 1000`
-- Transaction 2 extends the TTL for entry E by 500 ledgers and sets it `liveUntilLedger` to `currentLedgerSeq + 500`
+Transaction meta for every Soroban transaction will contain a 'correct' TTL
+change from the perspective of that transaction. That means that the initial
+entry TTLs are what the transaction has observed before execution, the final
+TTLs are what the transaction has updated them too, and the rent fee
+corresponds to the TTL changes. However, if a meta consumer wants to track the
+TTL of the entry after applying _all_ the transactions in the ledger, they will
+need to never decrease the TTL of the entries they track. For example:
+
+- Transaction 1 extends the TTL for entry E by 1000 ledgers and sets it
+ `liveUntilLedger` to `currentLedgerSeq + 1000`
+- Transaction 2 extends the TTL for entry E by 500 ledgers and sets it
+ `liveUntilLedger` to `currentLedgerSeq + 500`
- The final `liveUntilLedger` for E is `currentLedgerSeq + 1000`
-The only way the TTL of an entry can ever go down after applying a ledger is if an entry has been deleted by one transaction and then recreated by another transaction. Thus the conceptual algorithm for tracking entry TTLs using the transaction meta is as follows:
+The only way the TTL of an entry can ever go down after applying a ledger is if
+an entry has been deleted by one transaction and then recreated by another
+transaction. Thus the conceptual algorithm for tracking entry TTLs using the
+transaction meta is as follows:
- Maintain a map from TTL keys to the corresponding `liveUntilLedger` values
- For every transaction in apply order:
- For every TTL entry update:
- - If TTL has been updated, set the value corresponding to the TTL key to `max(updated_liveUntilLedger, existing_value_liveUntilLedger)` (if key is missing from the map, just use `updated_liveUntilLedger`)
- - If a TTL entry has been removed, remove the corresponding key from the map
+ - If TTL has been updated, set the value corresponding to the TTL key to
+ `max(updated_liveUntilLedger, existing_value_liveUntilLedger)` (if key is
+ missing from the map, just use `updated_liveUntilLedger`)
+ - If a TTL entry has been removed, remove the corresponding key from the
+ map
#### Parallel application of operations
-The protocol only defines the application order and it cannot strictly define how to apply the operations, as long as the end result is equivalent. However, the phase structure and validation rules strongly suggest parallelization between the dependent transaction clusters. Specifically:
-
-- Within a single stage, every cluster is completely data-independent of every other cluster, and thus every cluster may be applied by a separate thread
-- After every thread in the stage has succeeded, the changed ledger entries should be passed to the following stage to be applied in parallel as well
-- While the protocol guarantees that there are no data conflicts between the clusters, it's also possible that clusters have 'sub-clusters' that are independent of each other and thus use a higher degree of parallelism than the protocol allows. This doesn't however have any protocol guarantees in terms of the degree of parallelization.
+The protocol only defines the application order and it cannot strictly define
+how to apply the operations, as long as the end result is equivalent. However,
+the phase structure and validation rules strongly suggest parallelization
+between the dependent transaction clusters. Specifically:
+
+- Within a single stage, every cluster is completely data-independent of every
+ other cluster, and thus every cluster may be applied by a separate thread
+- After every thread in the stage has succeeded, the changed ledger entries
+ should be passed to the following stage to be applied in parallel as well
+- While the protocol guarantees that there are no data conflicts between the
+ clusters, it's also possible that clusters have 'sub-clusters' that are
+ independent of each other and thus use a higher degree of parallelism than
+ the protocol allows. This doesn't however have any protocol guarantees in
+ terms of the degree of parallelization.
#### Move fee refund to after all Soroban transactions are applied.
-Currently, the fee refund is applied after every transaction. This CAP specifies a change to instead apply refunds after all Soroban transactions have been applied. The meta will also need to be updated. Currently, the Soroban fee refund currently exist in `txChangesAfter` under `TransactionMeta`. The refund will stop appearing in this field, and will instead be emitted through `postTxApplyFeeProcessing`, which is in the new `TransactionResultMetaV1` struct, which will be emitted by the new `LedgerCloseMetaV2` struct.
+Currently, the fee refund is applied after every transaction. This CAP
+specifies a change to instead apply refunds after all Soroban transactions have
+been applied. The meta will also need to be updated. Currently, the Soroban fee
+refund currently exist in `txChangesAfter` under `TransactionMeta`. The refund
+will stop appearing in this field, and will instead be emitted through
+`postTxApplyFeeProcessing`, which is in the new `TransactionResultMetaV1`
+struct, which will be emitted by the new `LedgerCloseMetaV2` struct.
#### Candidate value generation
-Just like today, this CAP does not specify the exact algorithm used to produce a valid value as to make it easier for implementations to compete on the quality of transaction sets.
+Just like today, this CAP does not specify the exact algorithm used to produce
+a valid value as to make it easier for implementations to compete on the
+quality of transaction sets.
+
+Here is a sketch of a possible greedy algorithm that produces relatively
+efficient Soroban phase contents:
-Here is a sketch of a possible greedy algorithm that produces relatively efficient Soroban phase contents:
- Define a small number of stages S (1-4) to generate
-- For every stage maintain a set of the dependent transaction clusters (initially empty)
+- For every stage maintain a set of the dependent transaction clusters
+ (initially empty)
- For every transaction in the order of descending inclusion fee
- For every stage try to fit the transaction into stage:
- Determine the potential new contents of dependent transaction clusters:
- - Transaction that doesn't conflict with any other transaction forms a new, single-transaction cluster
- - Otherwise, merge the new transaction and all the clusters that have at least one conflict with a new transaction into a new cluster
- - Validate the potential new clusters: verify that it's possible to pack these clusters into `ledgerMaxDependentTxClusters` bins such that in every bin the total amount of instructions across all the transactions doesn't exceed `ledgerMaxInstructions / S` instructions.
- - If the new clusters are valid, store them in the stage and consider the transaction added, otherwise roll them back to the previous state
- - If a transaction doesn't fit into any stage, postpone it to the next ledger and trigger surge pricing
-- The output phase consists of the final packings of clusters in every stage into `ledgerMaxDependentTxClusters`
+ - Transaction that doesn't conflict with any other transaction forms a
+ new, single-transaction cluster
+ - Otherwise, merge the new transaction and all the clusters that have at
+ least one conflict with a new transaction into a new cluster
+ - Validate the potential new clusters: verify that it's possible to pack
+ these clusters into `ledgerMaxDependentTxClusters` bins such that in
+ every bin the total amount of instructions across all the transactions
+ doesn't exceed `ledgerMaxInstructions / S` instructions.
+ - If the new clusters are valid, store them in the stage and consider the
+ transaction added, otherwise roll them back to the previous state
+ - If a transaction doesn't fit into any stage, postpone it to the next ledger
+ and trigger surge pricing
+- The output phase consists of the final packings of clusters in every stage
+ into `ledgerMaxDependentTxClusters`
#### Additional disk read bytes limit enforcement
-This CAP adds a step before any transactions are applied that preloads all entries specified across all transaction footprints within a given stage. Due to this, we'll need to add a read bytes limit check at this step (in addition to the existing check during transaction apply). In protocol 22, the read bytes limit is enforced only once during transaction application, so this CAP introduces an additional opportunity to fail the read limit validation. An example of the difference can be demonstrated as follows - You have two transactions, the first deletes an existing entry, and the second reads from the key of the entry that will be deleted. In protocol 22, the delete would be applied, and then the second transaction would read nothing during application, so no additional read bytes would be consumed for that transaction. In protocol 23, both transactions will read the entry before application during the preload step, and will count the size of the entry against the read bytes limit.
+This CAP adds a step before any transactions are applied that preloads all
+entries specified across all transaction footprints within a given stage. Due
+to this, we'll need to add a read bytes limit check at this step (in addition
+to the existing check during transaction apply). In protocol 22, the read bytes
+limit is enforced only once during transaction application, so this CAP
+introduces an additional opportunity to fail the read limit validation. An
+example of the difference can be demonstrated as follows - You have two
+transactions, the first deletes an existing entry, and the second reads from
+the key of the entry that will be deleted. In protocol 22, the delete would be
+applied, and then the second transaction would read nothing during application,
+so no additional read bytes would be consumed for that transaction. In protocol
+23, both transactions will read the entry before application during the preload
+step, and will count the size of the entry against the read bytes limit.
## Design Rationale
-This proposal tries to strike the balance between the parallelization efficiency, nomination flexibility and simplicity of transaction set validation and application.
+This proposal tries to strike the balance between the parallelization
+efficiency, nomination flexibility and simplicity of transaction set validation
+and application.
Parallelization efficiency:
-- Sequential stages provide a simple scheduling solution for parallelizing the sets of transactions with a moderate amount of conflicts:
- - For example, if a single 'hot' entry that multiple transactions read (e.g. an instance entry of a popular token contract) is updated, then the update can be scheduled in one stage, and all the reads may be efficiently parallelized in the remaining stages.
-- On the other hand, the small, limited number of stages minimizes the amount of synchronization necessary - only a couple barriers are necessary
-- Application time is limited in terms of modelled instructions, so the real apply time should be relatively predictable
-- TTL reconciliation logic allows parallelizing TTL changes which a prevalent for otherwise read-only entries
+- Sequential stages provide a simple scheduling solution for parallelizing the
+ sets of transactions with a moderate amount of conflicts:
+ - For example, if a single 'hot' entry that multiple transactions read (e.g.
+ an instance entry of a popular token contract) is updated, then the update
+ can be scheduled in one stage, and all the reads may be efficiently
+ parallelized in the remaining stages.
+- On the other hand, the small, limited number of stages minimizes the amount
+ of synchronization necessary - only a couple barriers are necessary
+- Application time is limited in terms of modelled instructions, so the real
+ apply time should be relatively predictable
+- TTL reconciliation logic allows parallelizing TTL changes which a prevalent
+ for otherwise read-only entries
Flexibility:
-- The number and size of the stages is not fixed by the protocol, so it's possible to innovate on the nomination algorithms without protocol changes
-- The maximum number of clusters is a network setting, so increasing the maximum parallelization degree supported by the network is a change that only requires a single vote and no code changes
+- The number and size of the stages is not fixed by the protocol, so it's
+ possible to innovate on the nomination algorithms without protocol changes
+- The maximum number of clusters is a network setting, so increasing the
+ maximum parallelization degree supported by the network is a change that only
+ requires a single vote and no code changes
- Protocol is also not prescriptive on the exact application logic
-
- Simplicity:
-
-- Storing the dependency clusters and stages inside transaction sets makes the validation fast and straightforward
+
+Simplicity:
+
+- Storing the dependency clusters and stages inside transaction sets makes the
+ validation fast and straightforward
- Minimal amount of new settings is necessary
### Move fee refund to after all Soroban transactions are applied.
-Currently, the fee refund is applied after every transaction. Doing the same thing in parallel soroban is more difficult because the fee source account does not need to be in the transaction footprint. If we wanted to apply the refund after each transaction, the fee source account would need to be added implicitly to the footprint's read write set so we can handle conflicts where the source account is also used in the XLM Stellar Asset Contract. Instead of adding this data dependency for a minor edge case, the CAP just applies the refunds after all Soroban transactions have been applied.
+Currently, the fee refund is applied after every transaction. Doing the same
+thing in parallel soroban is more difficult because the fee source account does
+not need to be in the transaction footprint. If we wanted to apply the refund
+after each transaction, the fee source account would need to be added
+implicitly to the footprint's read write set so we can handle conflicts where
+the source account is also used in the XLM Stellar Asset Contract. Instead of
+adding this data dependency for a minor edge case, the CAP just applies the
+refunds after all Soroban transactions have been applied.
## Protocol Upgrade Transition
-As soon as this CAP becomes active, validators will produce the new (v1) format of Soroban phase in transaction sets in SCP.
+As soon as this CAP becomes active, validators will produce the new (v1) format
+of Soroban phase in transaction sets in SCP.
-The configuration entry of type `CONFIG_SETTING_CONTRACT_PARALLEL_COMPUTE_V0` will be created with `ledgerMaxDependentTxClusters` set to the initial value of 1 during the protocol transition.
+The configuration entry of type `CONFIG_SETTING_CONTRACT_PARALLEL_COMPUTE_V0`
+will be created with `ledgerMaxDependentTxClusters` set to the initial value of
+1 during the protocol transition.
-This way the only immediate change realized in the protocol will be the TTL update semantics change. Any protocol-supported parallel execution logic won't be active until `ledgerMaxDependentTxClusters` is increased by a separate SLP and a validator vote.
+This way the only immediate change realized in the protocol will be the TTL
+update semantics change. Any protocol-supported parallel execution logic won't
+be active until `ledgerMaxDependentTxClusters` is increased by a separate SLP
+and a validator vote.
### Backwards Incompatibilities
-As transaction sets are transmitted over the network, the overlay network will have to be updated to support the new format.
+As transaction sets are transmitted over the network, the overlay network will
+have to be updated to support the new format.
-The XDR changes introduced by the CAP should have minimal ecosystem effect, since only the internal SCP data structures are changed and these are not typically observed outside of the consensus protocol.
+The XDR changes introduced by the CAP should have minimal ecosystem effect,
+since only the internal SCP data structures are changed and these are not
+typically observed outside of the consensus protocol.
### Resource Utilization
-As `ledgerMaxDependentTxClusters` setting grows, the demand to the hardware running the Stellar Core will grow as well, specifically multi-core processors will become necessary for nodes to stay in sync with the network in case if most of the network bandwidth is being used. Specifically, at least `ledgerMaxDependentTxClusters + 1` cores would be necessary.
+As `ledgerMaxDependentTxClusters` setting grows, the demand to the hardware
+running the Stellar Core will grow as well, specifically multi-core processors
+will become necessary for nodes to stay in sync with the network in case if
+most of the network bandwidth is being used. Specifically, at least
+`ledgerMaxDependentTxClusters + 1` cores would be necessary.
## Security Concerns
-The changes on the security front are minimal as transaction semantics are not changed.
+The changes on the security front are minimal as transaction semantics are not
+changed.
-As usual, the validators have some degree of control over the contents of the transaction sets, especially in terms of simply censoring the transactions that are being included. However, this CAP doesn't meaningfully affect this risk in either direction.
+As usual, the validators have some degree of control over the contents of the
+transaction sets, especially in terms of simply censoring the transactions that
+are being included. However, this CAP doesn't meaningfully affect this risk in
+either direction.
## Future work
-Improvements to the mem-pooling, nomination and application logic can be implemented as a followup to the initial CAP implementation. These are not protocol changes and may be a part of any Core release (before or after the protocol upgrade).
+Improvements to the mem-pooling, nomination and application logic can be
+implemented as a followup to the initial CAP implementation. These are not
+protocol changes and may be a part of any Core release (before or after the
+protocol upgrade).
## Test Cases
diff --git a/core/cap-0064.md b/core/cap-0064.md
index 3558d608c..7283c5e1a 100644
--- a/core/cap-0064.md
+++ b/core/cap-0064.md
@@ -15,7 +15,8 @@ Protocol version: TBD
## Simple Summary
-This CAP adds a new type of Soroban authorization payload that allows for signing the transaction memo.
+This CAP adds a new type of Soroban authorization payload that allows for
+signing the transaction memo.
## Working Group
@@ -23,27 +24,51 @@ As specified in the Preamble.
## Motivation
-Standalone Soroban authorization payloads (those with `SOROBAN_CREDENTIALS_ADDRESS`, see [CAP-46-11](./cap-0046-11.md#authorization-payload-in-transaction) for details) for smart contract transactions are designed to be completely independent of the transaction envelope they belong to. This is done in order to support arbitrary custom multi-party authorization schemes that smart contracts may want to implement. As long as an authorization payload has been properly signed, it can be attached to any valid transaction.
-
-However, in some cases there may be a dependency between the transaction envelope and the inner authorization payload that currently can not be signed for. Specifically, transaction memos are often used to identify 'sub-destinations' within the actual destination account. This approach is often used by exchanges for attributing the incoming token transfers to specific exchange users.
-
-Since custom accounts (smart wallets) require using `SOROBAN_CREDENTIALS_ADDRESS` in order to sign any operations, it is currently not possible to e.g. sent token from a custom account balance to an exchange account. This is a common procedure for off-ramp or CEX-based trading and thus not supporting it significantly hinders the adoption of custom accounts and requires unsafe workarounds (like using an intermediary classic Stellar account for performing the payment). Custom account adoption itself provides a way of improving the overall user experience of Stellar network.
+Standalone Soroban authorization payloads (those with
+`SOROBAN_CREDENTIALS_ADDRESS`, see
+[CAP-46-11](./cap-0046-11.md#authorization-payload-in-transaction) for details)
+for smart contract transactions are designed to be completely independent of
+the transaction envelope they belong to. This is done in order to support
+arbitrary custom multi-party authorization schemes that smart contracts may
+want to implement. As long as an authorization payload has been properly
+signed, it can be attached to any valid transaction.
+
+However, in some cases there may be a dependency between the transaction
+envelope and the inner authorization payload that currently can not be signed
+for. Specifically, transaction memos are often used to identify
+'sub-destinations' within the actual destination account. This approach is
+often used by exchanges for attributing the incoming token transfers to
+specific exchange users.
+
+Since custom accounts (smart wallets) require using
+`SOROBAN_CREDENTIALS_ADDRESS` in order to sign any operations, it is currently
+not possible to e.g. sent token from a custom account balance to an exchange
+account. This is a common procedure for off-ramp or CEX-based trading and thus
+not supporting it significantly hinders the adoption of custom accounts and
+requires unsafe workarounds (like using an intermediary classic Stellar account
+for performing the payment). Custom account adoption itself provides a way of
+improving the overall user experience of Stellar network.
### Goals Alignment
This CAP is aligned with the following Stellar Network Goals:
- - The Stellar Network should make it easy for developers of Stellar projects to create highly usable products.
+- The Stellar Network should make it easy for developers of Stellar projects to
+ create highly usable products.
## Abstract
-This CAP introduces a new version of Soroban address credentials that allows users to specify a memo that the transaction must have in order for credentials to be valid for authorization. The new version of credentials doesn't supersede the existing one, i.e. protocol will support both versions.
+This CAP introduces a new version of Soroban address credentials that allows
+users to specify a memo that the transaction must have in order for credentials
+to be valid for authorization. The new version of credentials doesn't supersede
+the existing one, i.e. protocol will support both versions.
## Specification
### XDR Changes
-This patch of XDR changes is based on the XDR files in commit `a41b2db15ea34a9f9da5326b996bb8a7ceb5740f` of stellar-xdr.
+This patch of XDR changes is based on the XDR files in commit
+`a41b2db15ea34a9f9da5326b996bb8a7ceb5740f` of stellar-xdr.
```diff mddiffcheck.ignore=true
Stellar-ledger-entries.x | 3 ++-
@@ -62,7 +87,7 @@ index 5bf4f9d..f0bf9ea 100644
+ ENVELOPE_TYPE_SOROBAN_AUTHORIZATION = 9,
+ ENVELOPE_TYPE_SOROBAN_AUTHORIZATION_V2 = 10
};
-
+
enum BucketListType
diff --git a/Stellar-transaction.x b/Stellar-transaction.x
index 7d32481..763531c 100644
@@ -71,7 +96,7 @@ index 7d32481..763531c 100644
@@ -569,10 +569,22 @@ struct SorobanAddressCredentials
SCVal signature;
};
-
+
+struct SorobanAddressCredentialsV2
+{
+ ExtensionPoint ext;
@@ -90,7 +115,7 @@ index 7d32481..763531c 100644
+ SOROBAN_CREDENTIALS_ADDRESS = 1,
+ SOROBAN_CREDENTIALS_ADDRESS_V2 = 2
};
-
+
union SorobanCredentials switch (SorobanCredentialsType type)
@@ -581,6 +593,8 @@ case SOROBAN_CREDENTIALS_SOURCE_ACCOUNT:
void;
@@ -99,7 +124,7 @@ index 7d32481..763531c 100644
+case SOROBAN_CREDENTIALS_ADDRESS_V2:
+ SorobanAddressCredentialsV2 addressV2;
};
-
+
/* Unit of authorization data for Soroban.
@@ -729,6 +743,17 @@ case ENVELOPE_TYPE_SOROBAN_AUTHORIZATION:
uint32 signatureExpirationLedger;
@@ -117,58 +142,98 @@ index 7d32481..763531c 100644
+ SorobanAuthorizedInvocation invocation;
+ } sorobanAuthorizationV2;
};
-
+
enum MemoType
---
+--
```
### Semantics
#### `SOROBAN_CREDENTIALS_ADDRESS_V2` credentials
-A new type of of address credentials is introduced for `SorobanAuthorizationEntry`: `SOROBAN_CREDENTIALS_ADDRESS_V2` of type `SorobanAddressCredentialsV2`. The semantics of the new credentials is identical to the semantics of `SOROBAN_CREDENTIALS_ADDRESS` defined by [CAP-46-11](./cap-0046-11.md#authorization-payload-in-transaction) with the following exceptions:
-
-- During the authorization process the value of `txMemo` field is validated against the memo of the transaction being executed. In case of a mismatch, the authorization is considered to have failed.
-- SHA-256 hash of `ENVELOPE_TYPE_SOROBAN_AUTHORIZATION_V2` envelope must be signed instead of `ENVELOPE_TYPE_SOROBAN_AUTHORIZATION` envelope used for `SOROBAN_CREDENTIALS_ADDRESS` signatures. The envelope is built using the respectively named fields from `SorobanAddressCredentialsV2`, and the target network id that the authorization has to be used for.
-- No-op extension point has been added for the future extensions to both the authorization entry and the envelope
+A new type of of address credentials is introduced for
+`SorobanAuthorizationEntry`: `SOROBAN_CREDENTIALS_ADDRESS_V2` of type
+`SorobanAddressCredentialsV2`. The semantics of the new credentials is
+identical to the semantics of `SOROBAN_CREDENTIALS_ADDRESS` defined by
+[CAP-46-11](./cap-0046-11.md#authorization-payload-in-transaction) with the
+following exceptions:
+
+- During the authorization process the value of `txMemo` field is validated
+ against the memo of the transaction being executed. In case of a mismatch,
+ the authorization is considered to have failed.
+- SHA-256 hash of `ENVELOPE_TYPE_SOROBAN_AUTHORIZATION_V2` envelope must be
+ signed instead of `ENVELOPE_TYPE_SOROBAN_AUTHORIZATION` envelope used for
+ `SOROBAN_CREDENTIALS_ADDRESS` signatures. The envelope is built using the
+ respectively named fields from `SorobanAddressCredentialsV2`, and the target
+ network id that the authorization has to be used for.
+- No-op extension point has been added for the future extensions to both the
+ authorization entry and the envelope
#### `SOROBAN_CREDENTIALS_ADDRESS` credentials remain supported
-The first version of the credentials will still be supported by the protocol. It can be considered to be semantically equivalent to `SOROBAN_CREDENTIALS_ADDRESS_V2` with `txMemo` set to `MEMO_NONE` (i.e. it may only pass authorization check when the transaction memo is `MEMO_NONE`).
+The first version of the credentials will still be supported by the protocol.
+It can be considered to be semantically equivalent to
+`SOROBAN_CREDENTIALS_ADDRESS_V2` with `txMemo` set to `MEMO_NONE` (i.e. it may
+only pass authorization check when the transaction memo is `MEMO_NONE`).
#### Muxed accounts restrictions
-In order to avoid confusion between memos and muxed source accounts, transactions that contain both of:
+In order to avoid confusion between memos and muxed source accounts,
+transactions that contain both of:
- Muxed transaction source account and/or muxed operation source account
-- At least one Soroban authorization entry with credentials that are not set to `SOROBAN_CREDENTIALS_SOURCE_ACCOUNT`
+- At least one Soroban authorization entry with credentials that are not set to
+ `SOROBAN_CREDENTIALS_SOURCE_ACCOUNT`
are considered invalid and thus they won't be ever included into ledger.
## Design Rationale
-The design is simply the minimal necessary set of changes that allows for signing transaction memos. We had to come up with the new XDR structures because the initial versions did not have any extension points. In order to simplify potential future changes, the extension point has been introduced as well.
+The design is simply the minimal necessary set of changes that allows for
+signing transaction memos. We had to come up with the new XDR structures
+because the initial versions did not have any extension points. In order to
+simplify potential future changes, the extension point has been introduced as
+well.
### Muxed accounts are not supported
-Muxed accounts are an alternative to transaction memos and serve a similar purpose of defining off-chain sub-accounts for the actual accounts. However, they don't align with the change proposed by this CAP due to the following reasons:
+Muxed accounts are an alternative to transaction memos and serve a similar
+purpose of defining off-chain sub-accounts for the actual accounts. However,
+they don't align with the change proposed by this CAP due to the following
+reasons:
-- When a muxed account is a source account of a transaction or operation, it defines the source (not the destination) of a transfer and thus the main motivational use case of supporting exchange transfers is not covered by that.
-- Muxed account can not be a destination of any Soroban operation because `ScAddress` doesn't allow expressing muxed accounts.
+- When a muxed account is a source account of a transaction or operation, it
+ defines the source (not the destination) of a transfer and thus the main
+ motivational use case of supporting exchange transfers is not covered by
+ that.
+- Muxed account can not be a destination of any Soroban operation because
+ `ScAddress` doesn't allow expressing muxed accounts.
-We may consider supporting muxed accounts more holistically as an address type in the future CAPs, but that falls out of scope of this particular change.
+We may consider supporting muxed accounts more holistically as an address type
+in the future CAPs, but that falls out of scope of this particular change.
### No additional transaction fields are supported
-There are other transaction fields (currently preconditions and Soroban resources data), that could in theory also be signed as a part of Soroban authorization payloads. However, unlike memos, these fields are normally not used for any sort of identification and provide functionality that can already be replicated in Soroban to some degree. For example, all the Soroban authorization entries must have signature expiration ledger and there is no particular need for also tying the signature to the preconditions of the external transaction. Soroban resources can't be signed at all, as they may depend on the signature itself.
+There are other transaction fields (currently preconditions and Soroban
+resources data), that could in theory also be signed as a part of Soroban
+authorization payloads. However, unlike memos, these fields are normally not
+used for any sort of identification and provide functionality that can already
+be replicated in Soroban to some degree. For example, all the Soroban
+authorization entries must have signature expiration ledger and there is no
+particular need for also tying the signature to the preconditions of the
+external transaction. Soroban resources can't be signed at all, as they may
+depend on the signature itself.
## Protocol Upgrade Transition
-
-As soon as this CAP becomes active, validators will start accepting the transactions with `SOROBAN_CREDENTIALS_ADDRESS_V2` credentials and performing the corresponding authorization checks.
+
+As soon as this CAP becomes active, validators will start accepting the
+transactions with `SOROBAN_CREDENTIALS_ADDRESS_V2` credentials and performing
+the corresponding authorization checks.
### Backwards Incompatibilities
-The old credentials format is still accepted and thus no backwards incompatibilities are expected.
+The old credentials format is still accepted and thus no backwards
+incompatibilities are expected.
### Resource Utilization
@@ -180,7 +245,8 @@ This CAP doesn't raise any new security concerns.
## Future work
-Support for muxed accounts in Soroban or Soroban-specific multiplexed addresses might be done in the future, given the real use cases and demand.
+Support for muxed accounts in Soroban or Soroban-specific multiplexed addresses
+might be done in the future, given the real use cases and demand.
## Test Cases
diff --git a/core/cap-0065.md b/core/cap-0065.md
index b21e9d844..e8956135c 100644
--- a/core/cap-0065.md
+++ b/core/cap-0065.md
@@ -14,95 +14,199 @@ Protocol version: 23
```
## Simple Summary
-Maintain a fully-populated cache of all live WASM modules -- parsed, validated and translated -- at all times, across all ledgers, eliminating parsing and validation costs from the WASM VM instantiation process.
+
+Maintain a fully-populated cache of all live WASM modules -- parsed, validated
+and translated -- at all times, across all ledgers, eliminating parsing and
+validation costs from the WASM VM instantiation process.
## Working Group
As specified in the Preamble.
## Motivation
-The existing protocol caches ready-to-run (parsed, validated and translated) WASM modules only within a single transaction; the cache is reset after each transaction. This means that any module that is used in multiple transactions in a transaction set, or multiple transaction sets over multiple ledgers, is repeatedly (and wastefully) re-parsed, re-validated and re-translated to its executable form.
+
+The existing protocol caches ready-to-run (parsed, validated and translated)
+WASM modules only within a single transaction; the cache is reset after each
+transaction. This means that any module that is used in multiple transactions
+in a transaction set, or multiple transaction sets over multiple ledgers, is
+repeatedly (and wastefully) re-parsed, re-validated and re-translated to its
+executable form.
### Goals Alignment
-This change is aligned with the goal of lowering the cost and increasing the scale of the network.
+This change is aligned with the goal of lowering the cost and increasing the
+scale of the network.
## Abstract
-It would be ideal to cache modules in memory across transactions or even across ledgers. However, until recently there has been no consensus on how to attribute and charge fees to cover the cost of populating a longer-lived cache. The design of the module cache has therefore been limited to operating within a single transaction (with a clearly defined fee, and account that pays the fee).
-With the protocol changes proposed in [CAP-0062](./cap-0062.md) and [CAP-0066](./cap-0066.md) the soroban state of the ledger is effectively partitioned into:
+It would be ideal to cache modules in memory across transactions or even across
+ledgers. However, until recently there has been no consensus on how to
+attribute and charge fees to cover the cost of populating a longer-lived cache.
+The design of the module cache has therefore been limited to operating within a
+single transaction (with a clearly defined fee, and account that pays the fee).
- 1. a smaller, size-bounded, live (directly-usable), in-memory component (the "live bucketlist")
- 2. a larger, unbounded, archived (not-directly-usable), on-disk component (the "hot archive bucketlist")
+With the protocol changes proposed in [CAP-0062](./cap-0062.md) and
+[CAP-0066](./cap-0066.md) the soroban state of the ledger is effectively
+partitioned into:
-This is part of the longer term state archival plan, and users can control the movement of ledger entries between #1 and #2 by means of TTLs and restore operations.
+1. a smaller, size-bounded, live (directly-usable), in-memory component (the
+ "live bucketlist")
+2. a larger, unbounded, archived (not-directly-usable), on-disk component (the
+ "hot archive bucketlist")
-Because of this new organization, a new caching strategy is possible: the module cache can restrict its population to contracts in the live, in-memory component of the ledger. The cost of populating the cache is then _only_ charged when uploading or restoring `ContractCodeEntry` ledger entries to the in-memory component.
+This is part of the longer term state archival plan, and users can control the
+movement of ledger entries between #1 and #2 by means of TTLs and restore
+operations.
-Moreover this cost may even be subsumed entirely into the IO resource fees associated with the upload or restore operation; correct calibration will be required to determine if a residual cost has to be charged for the parsing, validation and translation, separate from the IO costs.
+Because of this new organization, a new caching strategy is possible: the
+module cache can restrict its population to contracts in the live, in-memory
+component of the ledger. The cost of populating the cache is then _only_
+charged when uploading or restoring `ContractCodeEntry` ledger entries to the
+in-memory component.
+
+Moreover this cost may even be subsumed entirely into the IO resource fees
+associated with the upload or restore operation; correct calibration will be
+required to determine if a residual cost has to be charged for the parsing,
+validation and translation, separate from the IO costs.
## Specification
-This change depends on the adoption of [CAP-0062](./cap-0062.md) and [CAP-0066](./cap-0066.md).
-The API between stellar-core and the soroban host is modified to allow the module cache to be reused across multiple soroban hosts.
+This change depends on the adoption of [CAP-0062](./cap-0062.md) and
+[CAP-0066](./cap-0066.md).
+
+The API between stellar-core and the soroban host is modified to allow the
+module cache to be reused across multiple soroban hosts.
-When stellar-core starts up, it parses, validates and translates all WASM modules (a `ContractCodeEntry` ledger entry) in the live bucketlist and stores them in a single in-memory, reusable module cache.
+When stellar-core starts up, it parses, validates and translates all WASM
+modules (a `ContractCodeEntry` ledger entry) in the live bucketlist and stores
+them in a single in-memory, reusable module cache.
-When a `ContractCodeEntry` is uploaded or restored from the hot archive bucketlist, it is added to the reusable module cache _after_ stellar core finishes processing all transactions in the ledger containing the upload or restore operations.
+When a `ContractCodeEntry` is uploaded or restored from the hot archive
+bucketlist, it is added to the reusable module cache _after_ stellar core
+finishes processing all transactions in the ledger containing the upload or
+restore operations.
-When a `ContractCodeEntry` is evicted from the live bucketlist to the hot archive bucketlist, it is removed from the reusable module cache, again _after_ all transactions in the associated ledger is processed.
+When a `ContractCodeEntry` is evicted from the live bucketlist to the hot
+archive bucketlist, it is removed from the reusable module cache, again _after_
+all transactions in the associated ledger is processed.
-No other changes to the protocol occur. Users should observe no behavioural differences, just lower fees due to the elimination of parsing, validation and translation costs during transaction execution. The "cache" will have a 100% hit rate since transactions can only invoke contracts currently in the live bucketlist. The cache therefore no longer behaves like a cache so much as simply a ready-to-run representation of the content of the live bucketlist.
+No other changes to the protocol occur. Users should observe no behavioural
+differences, just lower fees due to the elimination of parsing, validation and
+translation costs during transaction execution. The "cache" will have a 100%
+hit rate since transactions can only invoke contracts currently in the live
+bucketlist. The cache therefore no longer behaves like a cache so much as
+simply a ready-to-run representation of the content of the live bucketlist.
### Fees and edge cases
-Uploads will still be charged the cost of a full parse of the contract, using the coarse worst-case cost model reserved for unknown contracts. This is the same cost model used during uploads before this CAP.
+Uploads will still be charged the cost of a full parse of the contract, using
+the coarse worst-case cost model reserved for unknown contracts. This is the
+same cost model used during uploads before this CAP.
-Executing a contract _in the same ledger_ that the contract is first uploaded will incur a full parse/validate/translate cost just before the execution, using the refined cost model of _known_ contracts that was introduced in [CAP-0054](./cap-0054.md). In other words the low cost instantiation that is new to this CAP (omitting the parse/validate/translate costs) will _only_ apply to ledgers _after_ the ledger in which a contract is uploaded.
+Executing a contract _in the same ledger_ that the contract is first uploaded
+will incur a full parse/validate/translate cost just before the execution,
+using the refined cost model of _known_ contracts that was introduced in
+[CAP-0054](./cap-0054.md). In other words the low cost instantiation that is
+new to this CAP (omitting the parse/validate/translate costs) will _only_ apply
+to ledgers _after_ the ledger in which a contract is uploaded.
-Evictions are performed before uploads or restorations, at the end of ledger close. In other words if the same entry is both evicted and uploaded in a single ledger, the state of the module cache after the ledger will contain the entry.
+Evictions are performed before uploads or restorations, at the end of ledger
+close. In other words if the same entry is both evicted and uploaded in a
+single ledger, the state of the module cache after the ledger will contain the
+entry.
### XDR changes
+
None.
### Semantics
-The only semantic change is that fees are reduced.
-
-## Design Rationale
-The design is very straightforward: there is a cache that stores work that only needs to be done once, so that it can be reused. This cache already exists in soroban, it is just cleared after each transaction. The change is to stop clearing it, increasing its effectiveness (to 100%).
-
-The only real "rationale" is in explaining why we _didn't_ have a persistent (cross-ledger) cache from the beginning of soroban's design. This restriction resulted from our inability to design a suitable way to charge fees to users for cache residency (and cache misses), since the fees charged would depend on the history of cache population and eviction events, which would not be visible or predictable to users.
-It was frequently suggested that the network could charge fees to everyone "as if" they were experiencing a cache miss, and then refund the fee on a cache hit. Unfortunately the dominant cost being saved by caching is CPU time, which is not a refundable resource: there is a fixed amount of time _per transaction set_ (based on the close time target) and that time has to be allocated to transactions as part of admission control, when building transaction sets, after which the time is "consumed" whether _actually_ spent on-CPU while executing, or simply having excluded other transactions from the transaction set when building it. Various other strategies were proposed but none (until now) seemed satisfactory.
+The only semantic change is that fees are reduced.
-With [CAP-0062](./cap-0062.md) and [CAP-0066](./cap-0066.md), the population and eviction events are visible and controlled by the user (using rent bumps) and so we can move ahead with the simple, obvious design of "not clearing the cache after each transaction". Moreover since the design in CAP-0062 places _all_ live ledger data in memory, we can ensure the cache is actually always full of every possibly-invocable contract: a 100% hit rate.
-
-While there is a small risk that the cumulative size of the live bucketlist will exceed available memory, this is controlled by a network setting that the validators can vote to raise or lower as necessary to balance the needs of the network against the cost to node operators of validators with more memory.
+## Design Rationale
-In practice we expect the live bucketlist to remain comfortably within the memory of any reasonably modern computer: ledger entries range from hundreds of bytes to low-kilobytes, most servers used as validators have tens of gigabytes of memory, and the great majority of ledger entries are _dormant_ and can therefore be safely evicted to the disk without causing any inconvenience to users. Should they ever become active, the user only has to issue a restore operation.
+The design is very straightforward: there is a cache that stores work that only
+needs to be done once, so that it can be reused. This cache already exists in
+soroban, it is just cleared after each transaction. The change is to stop
+clearing it, increasing its effectiveness (to 100%).
+
+The only real "rationale" is in explaining why we _didn't_ have a persistent
+(cross-ledger) cache from the beginning of soroban's design. This restriction
+resulted from our inability to design a suitable way to charge fees to users
+for cache residency (and cache misses), since the fees charged would depend on
+the history of cache population and eviction events, which would not be visible
+or predictable to users.
+
+It was frequently suggested that the network could charge fees to everyone "as
+if" they were experiencing a cache miss, and then refund the fee on a cache
+hit. Unfortunately the dominant cost being saved by caching is CPU time, which
+is not a refundable resource: there is a fixed amount of time _per transaction
+set_ (based on the close time target) and that time has to be allocated to
+transactions as part of admission control, when building transaction sets,
+after which the time is "consumed" whether _actually_ spent on-CPU while
+executing, or simply having excluded other transactions from the transaction
+set when building it. Various other strategies were proposed but none (until
+now) seemed satisfactory.
+
+With [CAP-0062](./cap-0062.md) and [CAP-0066](./cap-0066.md), the population
+and eviction events are visible and controlled by the user (using rent bumps)
+and so we can move ahead with the simple, obvious design of "not clearing the
+cache after each transaction". Moreover since the design in CAP-0062 places
+_all_ live ledger data in memory, we can ensure the cache is actually always
+full of every possibly-invocable contract: a 100% hit rate.
+
+While there is a small risk that the cumulative size of the live bucketlist
+will exceed available memory, this is controlled by a network setting that the
+validators can vote to raise or lower as necessary to balance the needs of the
+network against the cost to node operators of validators with more memory.
+
+In practice we expect the live bucketlist to remain comfortably within the
+memory of any reasonably modern computer: ledger entries range from hundreds of
+bytes to low-kilobytes, most servers used as validators have tens of gigabytes
+of memory, and the great majority of ledger entries are _dormant_ and can
+therefore be safely evicted to the disk without causing any inconvenience to
+users. Should they ever become active, the user only has to issue a restore
+operation.
## Protocol Upgrade Transition
-Besides the changes in [CAP-0062](./cap-0062.md) and [CAP-0066](./cap-0066.md), upgrading to the changes in this CAP should impose no additional requirements for ecosystem users. CPU costs of transaction execution, and therefore fees charged, will decrease. That should be the only effect.
+
+Besides the changes in [CAP-0062](./cap-0062.md) and [CAP-0066](./cap-0066.md),
+upgrading to the changes in this CAP should impose no additional requirements
+for ecosystem users. CPU costs of transaction execution, and therefore fees
+charged, will decrease. That should be the only effect.
### Backwards Incompatibilities
-Besides the changes in [CAP-0062](./cap-0062.md) and [CAP-0066](./cap-0066.md), this CAP represents no additional backward incompatibilities.
+
+Besides the changes in [CAP-0062](./cap-0062.md) and [CAP-0066](./cap-0066.md),
+this CAP represents no additional backward incompatibilities.
### Resource Utilization
+
This CAP will substantially reduce transaction execution costs.
## Security Concerns
-There is a slightly elevated hypothetical risk of service denial due to memory exhaustion, beyond [CAP-0062](./cap-0062.md) and [CAP-0066](./cap-0066.md): for example if a user determines a way to allocate a disproportionate amount of memory in a cached module such that the ledger entry byte-size limits of the live bucketlist imposed by the previous CAPs do not adequately limit the memory consumption of those same ledger entries _when translated_.
-We are not presently aware of any method of attack that fits this hypothetical risk, but it is not out of the realm of possibility. We will continue to perform testing, code inspection and monitoring with an eye to this hypothetical risk.
+There is a slightly elevated hypothetical risk of service denial due to memory
+exhaustion, beyond [CAP-0062](./cap-0062.md) and [CAP-0066](./cap-0066.md): for
+example if a user determines a way to allocate a disproportionate amount of
+memory in a cached module such that the ledger entry byte-size limits of the
+live bucketlist imposed by the previous CAPs do not adequately limit the memory
+consumption of those same ledger entries _when translated_.
+
+We are not presently aware of any method of attack that fits this hypothetical
+risk, but it is not out of the realm of possibility. We will continue to
+perform testing, code inspection and monitoring with an eye to this
+hypothetical risk.
## Test Cases
+
TBD.
## Implementation
-There are preliminary implementations ready, pending further testing, calibration and measurement, integration and review:
-
- - The soroban side is in https://github.com/stellar/rs-soroban-env/pull/1506
+There are preliminary implementations ready, pending further testing,
+calibration and measurement, integration and review:
- - The stellar-core side is in https://github.com/stellar/stellar-core/pull/4621
+- The soroban side is in https://github.com/stellar/rs-soroban-env/pull/1506
+- The stellar-core side is in https://github.com/stellar/stellar-core/pull/4621
diff --git a/core/cap-0066.md b/core/cap-0066.md
index 9ccaef4b0..6d7cc3d95 100644
--- a/core/cap-0066.md
+++ b/core/cap-0066.md
@@ -13,8 +13,9 @@ Protocol version: 23
## Simple Summary
-This proposal introduces a new resource type for Soroban reads, distinguishing between in-memory and
-disk reads. This also proposes automatic restoration for archived entries via `InvokeHostFunctionOp`.
+This proposal introduces a new resource type for Soroban reads, distinguishing
+between in-memory and disk reads. This also proposes automatic restoration for
+archived entries via `InvokeHostFunctionOp`.
## Working Group
@@ -22,30 +23,39 @@ As specified in the Preamble.
## Motivation
-By distinguishing disk and in-memory reads, this proposal allows for significant increase in Soroban
-read limits. This distinction also allows for safe automatic entry restoration, significantly improving UX.
+By distinguishing disk and in-memory reads, this proposal allows for
+significant increase in Soroban read limits. This distinction also allows for
+safe automatic entry restoration, significantly improving UX.
### Goals Alignment
-This change is aligned with the goal of lowering the cost and increasing the scale of the network.
+This change is aligned with the goal of lowering the cost and increasing the
+scale of the network.
## Abstract
-[CAP-0062](cap-0062.md) introduces partial State Archival, where evicted and live Soroban state is stored in separate databases.
-This separation allows live Soroban state to be efficiently cached entirely in memory, removing disc reads entirely for all
-live Soroban state. Todays eviction scan can then delete entries from both the live BucketList disk and in-memory cache efficiently.
+[CAP-0062](cap-0062.md) introduces partial State Archival, where evicted and
+live Soroban state is stored in separate databases. This separation allows live
+Soroban state to be efficiently cached entirely in memory, removing disc reads
+entirely for all live Soroban state. Todays eviction scan can then delete
+entries from both the live BucketList disk and in-memory cache efficiently.
However, classic state and evicted entries are still subject to disk reads.
-This CAP introduces a new resource type for reads, distinguishing between disk reads and in-memory reads. By making this
-distinction at the protocol level, read limits for Soroban data can greatly increase. This also opens the door for other
-optimizations, such as a complete module cache for all live contracts.
-
-Additionally, this CAP introduces automatic restoration via `InvokeHostFunctionOp`, where any archived key present in the
-footprint is automatically restored. Initially in protocol 20, the state archival design was not solidified enough to
-enable automatic restoration, so an explicit restore operation was required. While this was not technically required in
-protocol 20, it was important for contract interfaces to reason properly about restoration such that when full state
-archival was introduce, it would not break preexisting deployments. Given that the full State Archival design has been
-mostly finalized in [CAP-0057](cap-0057.md), it is now appropriate to introduce automatic restoration.
+This CAP introduces a new resource type for reads, distinguishing between disk
+reads and in-memory reads. By making this distinction at the protocol level,
+read limits for Soroban data can greatly increase. This also opens the door for
+other optimizations, such as a complete module cache for all live contracts.
+
+Additionally, this CAP introduces automatic restoration via
+`InvokeHostFunctionOp`, where any archived key present in the footprint is
+automatically restored. Initially in protocol 20, the state archival design was
+not solidified enough to enable automatic restoration, so an explicit restore
+operation was required. While this was not technically required in protocol 20,
+it was important for contract interfaces to reason properly about restoration
+such that when full state archival was introduce, it would not break
+preexisting deployments. Given that the full State Archival design has been
+mostly finalized in [CAP-0057](cap-0057.md), it is now appropriate to introduce
+automatic restoration.
## Specification
@@ -70,7 +80,7 @@ mostly finalized in [CAP-0057](cap-0057.md), it is now appropriate to introduce
uint32 ledgerMaxWriteLedgerEntries;
// Maximum number of bytes that can be written per ledger
uint32 ledgerMaxWriteBytes;
-
+
- // Maximum number of ledger entry read operations per transaction
- uint32 txMaxReadLedgerEntries;
- // Maximum number of bytes that can be read per transaction
@@ -83,21 +93,21 @@ mostly finalized in [CAP-0057](cap-0057.md), it is now appropriate to introduce
uint32 txMaxWriteLedgerEntries;
// Maximum number of bytes that can be written per transaction
uint32 txMaxWriteBytes;
-
+
- int64 feeReadLedgerEntry; // Fee per ledger entry read
- int64 feeWriteLedgerEntry; // Fee per ledger entry write
+ int64 feeDiskReadLedgerEntry; // Fee per disk ledger entry read
+ int64 feeDiskRead1KB; // Fee for reading 1KB disk
+ int64 feeWriteLedgerEntry; // Fee per ledger entry write
-
+
- int64 feeRead1KB; // Fee for reading 1KB
-
+
// The following parameters determine the write fee per 1KB.
- // Write fee grows linearly until bucket list reaches this size
- int64 bucketListTargetSizeBytes;
- // Fee per 1KB write when the bucket list is empty
- int64 writeFee1KBBucketListLow;
-- // Fee per 1KB write when the bucket list has reached `bucketListTargetSizeBytes`
+- // Fee per 1KB write when the bucket list has reached `bucketListTargetSizeBytes`
- int64 writeFee1KBBucketListHigh;
- // Write fee multiplier for any additional data past the first `bucketListTargetSizeBytes`
- uint32 bucketListWriteFeeGrowthFactor;
@@ -119,7 +129,7 @@ mostly finalized in [CAP-0057](cap-0057.md), it is now appropriate to introduce
+ // Fee per 1 KB write
+ uint32 feeWrite1KB;
};
-
+
// Historical data (pushed to core archives) settings for contracts.
@@ -302,7 +312,8 @@ enum ConfigSettingID
CONFIG_SETTING_STATE_ARCHIVAL = 10,
@@ -129,7 +139,7 @@ mostly finalized in [CAP-0057](cap-0057.md), it is now appropriate to introduce
+ CONFIG_SETTING_EVICTION_ITERATOR = 13,
+ CONFIG_SETTING_CONTRACT_LEDGER_COST_EXT_V0 = 14
};
-
+
union ConfigSettingEntry switch (ConfigSettingID configSettingID)
@@ -335,5 +346,7 @@ case CONFIG_SETTING_BUCKETLIST_SIZE_WINDOW:
uint64 bucketListSizeWindow<>;
@@ -145,19 +155,19 @@ diff --git a/Stellar-ledger.x b/Stellar-ledger.x
@@ -528,12 +528,11 @@ struct LedgerCloseMetaV1
// systems calculating storage fees correctly.
uint64 totalByteSizeOfBucketList;
-
+
- // Temp keys that are being evicted at this ledger.
- LedgerKey evictedTemporaryLedgerKeys<>;
+ // TTL and data/code keys that have been evicted at this ledger.
+ LedgerKey evictedKeys<>;
-
+
- // Archived restorable ledger entries that are being
- // evicted at this ledger.
- LedgerEntry evictedPersistentLedgerEntries<>;
+ // Maintained for backwards compatibility, should never be populated.
+ LedgerEntry unused<>;
};
-
+
union LedgerCloseMeta switch (int v)
diff --git a/Stellar-transaction.x b/Stellar-transaction.x
index 7d32481..f966640 100644
@@ -165,8 +175,8 @@ index 7d32481..f966640 100644
+++ b/Stellar-transaction.x
@@ -882,16 +882,30 @@ struct SorobanResources
// The maximum number of instructions this transaction can use
- uint32 instructions;
-
+ uint32 instructions;
+
- // The maximum number of bytes this transaction can read from ledger
- uint32 readBytes;
+ // The maximum number of bytes this transaction can read from disk backed entries
@@ -174,7 +184,7 @@ index 7d32481..f966640 100644
// The maximum number of bytes this transaction can write to ledger
uint32 writeBytes;
};
-
+
+struct SorobanResourcesExtV0
+{
+ // Vector of indices representing what Soroban
@@ -196,14 +206,15 @@ index 7d32481..f966640 100644
+ } ext;
SorobanResources resources;
// Amount of the transaction `fee` allocated to the Soroban resource fees.
- // The fraction of `resourceFee` corresponding to `resources` specified
+ // The fraction of `resourceFee` corresponding to `resources` specified
```
### Semantics
#### In-memory vs Disk Read Resources
-Read resources are now separated between memory backed and disk backed state as follows:
+Read resources are now separated between memory backed and disk backed state as
+follows:
- in-memory entries:
- Live Soroban entries
@@ -211,42 +222,54 @@ Read resources are now separated between memory backed and disk backed state as
- Archived Soroban entries
- Classic entries
-In-memory entries are subject to the corresponding in-memory limits, but charge no entry or byte based
-read fees. On disk entries are subject to the corresponding limits and also charge entry and byte based
-read fees.
+In-memory entries are subject to the corresponding in-memory limits, but charge
+no entry or byte based read fees. On disk entries are subject to the
+corresponding limits and also charge entry and byte based read fees.
#### `sorobanLiveStateTargetSizeBytes`
-`bucketListTargetSizeBytes` has been renamed to `sorobanLiveStateTargetSizeBytes`. Instead of tracking the current
-amount of disk being used by the network, this value has changed semantically to track the amount of in-memory
-(i.e. live soroban) state.
-
-The value of `sorobanLiveStateTargetSizeBytes` is the total size of live Soroban state in the ledger, in bytes.
-Previously, `bucketListTargetSizeBytes` was based on the total size of the BucketList. In addition to no longer
-counting classic state, `sorobanLiveStateTargetSizeBytes` is now based on the "live soroban state" instead of on-disk
-size of entries. The "live soroban state" corresponds to live `CONTRACT_DATA` entries, `TTL` entries for both `CONTRACT_DATA`
-and `CONTRACT_CODE`, and instantiated contract code. Archived and deleted state is not included, even if a `BucketEntry`
-for the archived or deleted state exists in the Live BucketList.
-
-`sorobanLiveStateTargetSizeBytes` is the sum of the size of each live `CONTRACT_DATA` entry, `CONTRACT_CODE` entry, and
-corresponding `TTL` entry, where the size of each entry is calculated as follows:
- - `CONTRACT_DATA`: XDR serialized size of `CONTRACT_DATA` LedgerEntry.
- - `TTL`: XDR serialized size of `TTL` LedgerEntry.
- - `CONTRACT_CODE`: The memory cost of the given code based on the module's `ContractCodeCostInputs`, as calculated by the cost model.
- Note that this is usually much larger than the `CONTRACT_CODE` LedgerEntry XDR size.
+`bucketListTargetSizeBytes` has been renamed to
+`sorobanLiveStateTargetSizeBytes`. Instead of tracking the current amount of
+disk being used by the network, this value has changed semantically to track
+the amount of in-memory (i.e. live soroban) state.
+
+The value of `sorobanLiveStateTargetSizeBytes` is the total size of live
+Soroban state in the ledger, in bytes. Previously, `bucketListTargetSizeBytes`
+was based on the total size of the BucketList. In addition to no longer
+counting classic state, `sorobanLiveStateTargetSizeBytes` is now based on the
+"live soroban state" instead of on-disk size of entries. The "live soroban
+state" corresponds to live `CONTRACT_DATA` entries, `TTL` entries for both
+`CONTRACT_DATA` and `CONTRACT_CODE`, and instantiated contract code. Archived
+and deleted state is not included, even if a `BucketEntry` for the archived or
+deleted state exists in the Live BucketList.
+
+`sorobanLiveStateTargetSizeBytes` is the sum of the size of each live
+`CONTRACT_DATA` entry, `CONTRACT_CODE` entry, and corresponding `TTL` entry,
+where the size of each entry is calculated as follows:
+
+- `CONTRACT_DATA`: XDR serialized size of `CONTRACT_DATA` LedgerEntry.
+- `TTL`: XDR serialized size of `TTL` LedgerEntry.
+- `CONTRACT_CODE`: The memory cost of the given code based on the module's
+ `ContractCodeCostInputs`, as calculated by the cost model. Note that this is
+ usually much larger than the `CONTRACT_CODE` LedgerEntry XDR size.
#### Write fee computation changes
-Previously, a dynamic write fee was used in order to limit the amount of disk space being used by the network. With
-State Archival, the size of disk usage is no longer as significant of a concern. However, the size of
-in-memory state must be bounded to prevent OOM DOS attacks. To accomplish this, we maintain a dynamic rent fee to
-limit the amount of in-memory state on the ledger at a given time.
+Previously, a dynamic write fee was used in order to limit the amount of disk
+space being used by the network. With State Archival, the size of disk usage is
+no longer as significant of a concern. However, the size of in-memory state
+must be bounded to prevent OOM DOS attacks. To accomplish this, we maintain a
+dynamic rent fee to limit the amount of in-memory state on the ledger at a
+given time.
-Since `sorobanLiveStateTargetSizeBytes` tracks in-memory state instead of on-disk state, it no longer makes sense
-to charge dynamic write fees based off of `sorobanLiveStateTargetSizeBytes`, but we still need to charge a fee for
-the cost of the write operation itself. To accomplish this, we introduce a constant per-KB write fee.
+Since `sorobanLiveStateTargetSizeBytes` tracks in-memory state instead of
+on-disk state, it no longer makes sense to charge dynamic write fees based off
+of `sorobanLiveStateTargetSizeBytes`, but we still need to charge a fee for the
+cost of the write operation itself. To accomplish this, we introduce a constant
+per-KB write fee.
-Soroban and classic state are both charged the flat per-KB write fee. The transaction write fee is computed as follows:
+Soroban and classic state are both charged the flat per-KB write fee. The
+transaction write fee is computed as follows:
```
write_fee = ceil(write_bytes * feeWrite1KB / 1024)
@@ -254,10 +277,11 @@ write_fee = ceil(write_bytes * feeWrite1KB / 1024)
#### Rent fee computation changes
-Intuitively, the rent fee now represents the cost of reserving memory in the ledger state, so it should be dynamic wrt
-`sorobanLiveStateTargetSizeBytes`. The `Rent_fee` and `rent_fee_per_entry_change` calculations from
-[CAP-46-07](cap-0046-07.md#rent-fee) remain unchanged, with `rent_fee_for_size_and_ledgers` now being computed
-as follows:
+Intuitively, the rent fee now represents the cost of reserving memory in the
+ledger state, so it should be dynamic wrt `sorobanLiveStateTargetSizeBytes`.
+The `Rent_fee` and `rent_fee_per_entry_change` calculations from
+[CAP-46-07](cap-0046-07.md#rent-fee) remain unchanged, with
+`rent_fee_for_size_and_ledgers` now being computed as follows:
Renting S bytes of ledger space for the period of L ledgers:
@@ -292,28 +316,53 @@ rent_fee_per_1kb(s) = max(MINIMUM_RENT_FEE_PER_1KB,
(s-sorobanStateTargetSizeBytes)/sorobanStateTargetSizeBytes)
```
-Note this is the same from the `write_fee_per_1kb` calculation in [CAP-46-07](cap-0046-07.md#ledger-data). The only difference is that
-`rent_fee_for_size_and_ledgers` does not apply a discount to `rent_fee_per_1kb` (other than the contract data type discount),
-which was the case for `write_fee_per_1kb`.
+Note this is the same from the `write_fee_per_1kb` calculation in
+[CAP-46-07](cap-0046-07.md#ledger-data). The only difference is that
+`rent_fee_for_size_and_ledgers` does not apply a discount to `rent_fee_per_1kb`
+(other than the contract data type discount), which was the case for
+`write_fee_per_1kb`.
#### `CONTRACT_CODE` size calculation for limits and fees
`CONTRACT_CODE` entries have two "sizes" that are relevant to fees and limits:
- - "Disk Size": The size of the `CONTRACT_CODE` `LedgerEntry` XDR.
- - "In-memory Size": The total memory footprint for a contract code entry. We store every `CONTRACT_CODE` in memory in two different places: once in the Soroban ledger entry cache and once in the global Soroban module cache. Thus in-memory entry size is a sum of two components:
- - The memory size of the Wasm *module* corresponding to the Wasm stored in the `CONTRACT_CODE` entry, as calculated by the cost model. More specifically, the in-memory size of a given `CONTRACT_CODE` is defined as the metered memory consumption of parsing the Wasm code into a Wasm VM module.
- - The size of the `CONTRACT_CODE` `LedgerEntry` XDR.
-
-Disk size is consistent between protocol upgrades, as the `CONTRACT_CODE` bytes themselves are immutable. However, the
-in-memory size is dependent on the cost model. For this reason, limits can not be based on in-memory size. If in-memory size
-was used for limits, a contract could be "bricked" by any upgrade that changes the cost model such that the in-memory size
-increases.
-For this reason, `contractMaxSizeBytes` limit enforcement will continue to be based on disk size. But in-memory size will be used for accounting `CONTRACT_CODE` entries towards the Soroban state size, as well as the rent computations.
-
-Since the in-memory size may be significantly larger than the disk size (up to 40x as of protocol 23) the rent fees will be adjusted with the protocol upgrade in order to keep the effective rent fees for contract code close to those in protocol 22 (since these fees seemed to be reasonable enough for DOS protection). Moreover, in order to reduce the gap in rent fees between code and data entries a discount factor of `3` will be applied to the final rent fees. This is the highest possible discount the protocol can implement without creating an incentive to store the contract data in code entries - the minimum ratio between the disk and in-memory size for any Wasm is greater than 3 as of protocol 23 (the Wasm module size is at least 2x of the actual Wasm size, and another 1x Wasm size is charged for the entry cache).
-
-To summarize, here is the breakdown of the sizes used for `CONTRACT_CODE` in different contexts:
+- "Disk Size": The size of the `CONTRACT_CODE` `LedgerEntry` XDR.
+- "In-memory Size": The total memory footprint for a contract code entry. We
+ store every `CONTRACT_CODE` in memory in two different places: once in the
+ Soroban ledger entry cache and once in the global Soroban module cache. Thus
+ in-memory entry size is a sum of two components:
+ - The memory size of the Wasm _module_ corresponding to the Wasm stored in
+ the `CONTRACT_CODE` entry, as calculated by the cost model. More
+ specifically, the in-memory size of a given `CONTRACT_CODE` is defined as
+ the metered memory consumption of parsing the Wasm code into a Wasm VM
+ module.
+ - The size of the `CONTRACT_CODE` `LedgerEntry` XDR.
+
+Disk size is consistent between protocol upgrades, as the `CONTRACT_CODE` bytes
+themselves are immutable. However, the in-memory size is dependent on the cost
+model. For this reason, limits can not be based on in-memory size. If in-memory
+size was used for limits, a contract could be "bricked" by any upgrade that
+changes the cost model such that the in-memory size increases.
+
+For this reason, `contractMaxSizeBytes` limit enforcement will continue to be
+based on disk size. But in-memory size will be used for accounting
+`CONTRACT_CODE` entries towards the Soroban state size, as well as the rent
+computations.
+
+Since the in-memory size may be significantly larger than the disk size (up to
+40x as of protocol 23) the rent fees will be adjusted with the protocol upgrade
+in order to keep the effective rent fees for contract code close to those in
+protocol 22 (since these fees seemed to be reasonable enough for DOS
+protection). Moreover, in order to reduce the gap in rent fees between code and
+data entries a discount factor of `3` will be applied to the final rent fees.
+This is the highest possible discount the protocol can implement without
+creating an incentive to store the contract data in code entries - the minimum
+ratio between the disk and in-memory size for any Wasm is greater than 3 as of
+protocol 23 (the Wasm module size is at least 2x of the actual Wasm size, and
+another 1x Wasm size is charged for the entry cache).
+
+To summarize, here is the breakdown of the sizes used for `CONTRACT_CODE` in
+different contexts:
- Maximum contract code size enforcement - disk size
- Disk read bytes limit and fee - disk size
@@ -321,42 +370,73 @@ To summarize, here is the breakdown of the sizes used for `CONTRACT_CODE` in dif
- Soroban state size accounting - in-memory size
- Rent fee - in-memory size, with an additional `3` times discount
-While instantiated contract code is significantly larger than it's on disk size, this is not true for cached contract data.
-For this reason, rent fees for `CONTRACT_DATA` will continue to be based on the serialized XDR disk size.
+While instantiated contract code is significantly larger than it's on disk
+size, this is not true for cached contract data. For this reason, rent fees for
+`CONTRACT_DATA` will continue to be based on the serialized XDR disk size.
#### Initial Network Config Settings
-Initially, all disk read limits should match exactly the limits of reads today. Limits must not
-decrease from current values in order to prevent bricking existing contracts. In the worst case, a contract can read
-up to `txMaxReadLedgerEntries - 1` classic entries today (all classic entries + SAC instance). Current limits have been
-carefully measured assuming disk access only, so the addition of in-memory state should not negatively affect the network
-in any way should we maintain these values via disk read limits.
-
-The new config setting `txMaxFootprintEntries` sets the limit on the total number of entries in the transaction footprint, i.e. the number of entries in both read-only and read-write footprints. The footprint size limits are not required wrt transaction execution time, as in-memory reads are very inexpensive and disk reads and writes are limited by the separate settings. For this reason, no respective ledger wide read limit is necessary,
-and no byte based read limit is necessary for in-memory reads. However, the size of transaction footprints
-does impact the cost and complexity of assembling transactions sets and potentially maintaining the mempool, as with implementation [CAP-63](./CAP-63) Core will need to verify the presence of the conflicts in transaction footprints. Thus a tx footprint entry limit is introduced to ensure efficient transaction set
-construction.
-
-Note, that there is also a 'soft' limit on the maximum size of footprint per transaction that is caused by the transaction size limit. But coupling the limits in this fashion is risky - for example, network might need to increase the minimal transaction size in order to accommodate larger contracts, but the simultaneous increase in footprint entry count might not be desired.
-
-When the protocol is upgraded to version 23 the initial values of the new configuration settings will be set to the following values:
-- `txMaxFootprintEntries` will be set to the current value of `txMaxDiskReadEntries` (renamed from `txMaxReadLedgerEntries`) in order to prevent breakages of any existing contracts
-- `feeWrite1KB` will be set to `3'500` stroops, which is roughly 2x of the current Soroban read fee per 1 KB. Note, that this fee is decoupled from the rent write fee now.
+Initially, all disk read limits should match exactly the limits of reads today.
+Limits must not decrease from current values in order to prevent bricking
+existing contracts. In the worst case, a contract can read up to
+`txMaxReadLedgerEntries - 1` classic entries today (all classic entries + SAC
+instance). Current limits have been carefully measured assuming disk access
+only, so the addition of in-memory state should not negatively affect the
+network in any way should we maintain these values via disk read limits.
+
+The new config setting `txMaxFootprintEntries` sets the limit on the total
+number of entries in the transaction footprint, i.e. the number of entries in
+both read-only and read-write footprints. The footprint size limits are not
+required wrt transaction execution time, as in-memory reads are very
+inexpensive and disk reads and writes are limited by the separate settings. For
+this reason, no respective ledger wide read limit is necessary, and no byte
+based read limit is necessary for in-memory reads. However, the size of
+transaction footprints does impact the cost and complexity of assembling
+transactions sets and potentially maintaining the mempool, as with
+implementation [CAP-63](./CAP-63) Core will need to verify the presence of the
+conflicts in transaction footprints. Thus a tx footprint entry limit is
+introduced to ensure efficient transaction set construction.
+
+Note, that there is also a 'soft' limit on the maximum size of footprint per
+transaction that is caused by the transaction size limit. But coupling the
+limits in this fashion is risky - for example, network might need to increase
+the minimal transaction size in order to accommodate larger contracts, but the
+simultaneous increase in footprint entry count might not be desired.
+
+When the protocol is upgraded to version 23 the initial values of the new
+configuration settings will be set to the following values:
+
+- `txMaxFootprintEntries` will be set to the current value of
+ `txMaxDiskReadEntries` (renamed from `txMaxReadLedgerEntries`) in order to
+ prevent breakages of any existing contracts
+- `feeWrite1KB` will be set to `3'500` stroops, which is roughly 2x of the
+ current Soroban read fee per 1 KB. Note, that this fee is decoupled from the
+ rent write fee now.
#### Rent-related network settings update
-Since Soroban live state will be used for the rent write fee computation, the fee-related configuration will be updated. The goal of the update is to minimize the immediate effective rent cost increase for the contract code entries due to using their in-memory size for the rent fee computation.
+Since Soroban live state will be used for the rent write fee computation, the
+fee-related configuration will be updated. The goal of the update is to
+minimize the immediate effective rent cost increase for the contract code
+entries due to using their in-memory size for the rent fee computation.
Specifically, the fees are tuned using the following input constraints:
-- The current in-memory state size during the protocol upgrade is projected to be 1 GB
-- If a contract code entry size is increased 40x, the effective rent cost at the current size should increase just 2x. Note, that given a 2x rent fee discount for the code entries, this means that the effective rent costs for the regular contract data entries will go down 10x compared to protocol 22.
-- The 'target' size of the in-memory state after which the fees grow steeply is set to 3 GB (3x of the current size)
+- The current in-memory state size during the protocol upgrade is projected to
+ be 1 GB
+- If a contract code entry size is increased 40x, the effective rent cost at
+ the current size should increase just 2x. Note, that given a 2x rent fee
+ discount for the code entries, this means that the effective rent costs for
+ the regular contract data entries will go down 10x compared to protocol 22.
+- The 'target' size of the in-memory state after which the fees grow steeply is
+ set to 3 GB (3x of the current size)
- The rent cost will stay constant until the state growth 2 times (to 2 GB)
- During the state size growth from 2 to 3 GB the rent fees will grow up to 10x
-- After 3 GB the rent fee growth factor of `5000` will be applied (no change from protocol 22)
+- After 3 GB the rent fee growth factor of `5000` will be applied (no change
+ from protocol 22)
-Given these constraints, the following rent fee settings will be updated to the following values during the protocol 23 upgrade:
+Given these constraints, the following rent fee settings will be updated to the
+following values during the protocol 23 upgrade:
- `sorobanStateTargetSizeBytes` set to `3'000'000'000` (3 GB)
- `rentFee1KBSorobanStateSizeLow` set to `-17'000`
@@ -366,44 +446,71 @@ Given these constraints, the following rent fee settings will be updated to the
#### Allow resource fees to exceed `uint32`
-Currently the maximum possible Stellar transaction fee is limited by the transaction `fee` field's is limited by its `uint32` type, i.e. it is `2^32 - 1` stroops or ~429 XLM. The combination of using the memory size for the contract code entries, as well as the rent changes above, makes it possible for a certain transaction, such as a Wasm upload or TTL extension to exceed that fee. Specifically, when the state size is close to its limit the fees will become high enough to make it impossible to upload a large Wasm (currently the protocol requires a payment of at least 120 days of rent for any entry creation).
-
-While this is an edge case scenario that should not happen under normal network utilization, its important to ensure that the network remains usable (albeit expensive) in case if state size grows too big too quickly.
-
-Instead of modifying the transaction data structure, protocol 23 just allows a fee bump transaction to have a negative inclusion fee for its inner transaction. Since fee bump transactions have `int64` field for the fee, they can cover any resource fee, and there is no real need for the inner transaction to be fully valid on its own. Thus, in order to submit a transaction with resource fee that exceeds `uint32` type limit, a user will need to create a fee bump transaction with an arbitrary inner transaction's `fee` and a fee bump transaction fee that is sufficient to cover the declared resource fee.
+Currently the maximum possible Stellar transaction fee is limited by the
+transaction `fee` field's is limited by its `uint32` type, i.e. it is
+`2^32 - 1` stroops or ~429 XLM. The combination of using the memory size for
+the contract code entries, as well as the rent changes above, makes it possible
+for a certain transaction, such as a Wasm upload or TTL extension to exceed
+that fee. Specifically, when the state size is close to its limit the fees will
+become high enough to make it impossible to upload a large Wasm (currently the
+protocol requires a payment of at least 120 days of rent for any entry
+creation).
+
+While this is an edge case scenario that should not happen under normal network
+utilization, its important to ensure that the network remains usable (albeit
+expensive) in case if state size grows too big too quickly.
+
+Instead of modifying the transaction data structure, protocol 23 just allows a
+fee bump transaction to have a negative inclusion fee for its inner
+transaction. Since fee bump transactions have `int64` field for the fee, they
+can cover any resource fee, and there is no real need for the inner transaction
+to be fully valid on its own. Thus, in order to submit a transaction with
+resource fee that exceeds `uint32` type limit, a user will need to create a fee
+bump transaction with an arbitrary inner transaction's `fee` and a fee bump
+transaction fee that is sufficient to cover the declared resource fee.
#### `archivedEntries` Vector
-For the purposes of block creation, the `LedgerKey` alone is sufficient to distinguish between classic and Soroban data.
-However, a disk read would be required prior to tx application in order to determine if a Soroban entry was live or archived.
-This is necessary for determining what resources limits a Soroban key counts towards, but
-exposes a significant DOS angle. To prevent this attack, the footprint must statically declare which entries are live vs. archived.
-
-This is accomplished via the `archivedEntries` vector. If any archived Soroban entry is present in a TX's footprint,
-it must provide the `archivedEntries` vector containing the index of each archived key based on the ordering of the
-readWrite footprint. Because restores are a write operation, no archived key should be in the readOnly footprint. This vector
-should contain no classic key indexes, as classic keys are always considered disk reads.
+For the purposes of block creation, the `LedgerKey` alone is sufficient to
+distinguish between classic and Soroban data. However, a disk read would be
+required prior to tx application in order to determine if a Soroban entry was
+live or archived. This is necessary for determining what resources limits a
+Soroban key counts towards, but exposes a significant DOS angle. To prevent
+this attack, the footprint must statically declare which entries are live vs.
+archived.
+
+This is accomplished via the `archivedEntries` vector. If any archived Soroban
+entry is present in a TX's footprint, it must provide the `archivedEntries`
+vector containing the index of each archived key based on the ordering of the
+readWrite footprint. Because restores are a write operation, no archived key
+should be in the readOnly footprint. This vector should contain no classic key
+indexes, as classic keys are always considered disk reads.
#### Changes to `InvokeHostFunctionOp`
##### Automatic Entry Restoration
-Whenever `InvokeHostFunctionOp` is applied, any archived state is
-automatically restored prior to host function invocation. This restored state is then accessible to the host function.
+Whenever `InvokeHostFunctionOp` is applied, any archived state is automatically
+restored prior to host function invocation. This restored state is then
+accessible to the host function.
-Restored state is still subject to the same minimum rent and write fees that exist currently based on the final result of the
-invocation. Even if an entry is archived but not yet evicted such that it technically still
-exists in memory, it is still subject to the same limits and fees as disk based entries in order ot provide a simpler unified
-interface for downstream systems.
+Restored state is still subject to the same minimum rent and write fees that
+exist currently based on the final result of the invocation. Even if an entry
+is archived but not yet evicted such that it technically still exists in
+memory, it is still subject to the same limits and fees as disk based entries
+in order ot provide a simpler unified interface for downstream systems.
-All archived keys must be declared in the `archivedEntries` vector via `SorobanResourcesExtV0`.
-If no archived entries are present in the footprint, `SorobanResourcesExtV0` may be omitted.
+All archived keys must be declared in the `archivedEntries` vector via
+`SorobanResourcesExtV0`. If no archived entries are present in the footprint,
+`SorobanResourcesExtV0` may be omitted.
##### Fees
-Given that restoration and invocation are happening atomically, rent fees will be charged based on the final
-state following invocation, not the initial restore state. For example, if an entry is autorestored, then deleted
-as part of the invocation, it will only pay disk read fees, not write fees or rent fees.
+Given that restoration and invocation are happening atomically, rent fees will
+be charged based on the final state following invocation, not the initial
+restore state. For example, if an entry is autorestored, then deleted as part
+of the invocation, it will only pay disk read fees, not write fees or rent
+fees.
Suppose we are auto restoring some entry e:
@@ -426,50 +533,61 @@ else:
##### Incorrect Entries in `archivedEntries` Vector
-If a key is not marked as archived, but the entry is actually archived, then the TX fails. If a Soroban key is marked as archived when
-it is not, the transaction does not fail. The entry is treated as if it is archived from a resource and fee perspective.
-This is appropriate, as on disk reads are more expensive than in-memory. So long as the TX pays the disk based fees,
-there is no issue with actually loading an in-memory entry.
-
-`archivedSorobanEntries` must only point to persistent Soroban entries. If `archivedSorobanEntries` contains an invalid index
-(an out of bounds index, classic entry, or temporary entry)
-the transaction is malformed. Indices in `archivedSorobanEntries` must be sorted. If the indices are not sorted, or
+If a key is not marked as archived, but the entry is actually archived, then
+the TX fails. If a Soroban key is marked as archived when it is not, the
+transaction does not fail. The entry is treated as if it is archived from a
+resource and fee perspective. This is appropriate, as on disk reads are more
+expensive than in-memory. So long as the TX pays the disk based fees, there is
+no issue with actually loading an in-memory entry.
+
+`archivedSorobanEntries` must only point to persistent Soroban entries. If
+`archivedSorobanEntries` contains an invalid index (an out of bounds index,
+classic entry, or temporary entry) the transaction is malformed. Indices in
+`archivedSorobanEntries` must be sorted. If the indices are not sorted, or
include a duplicate value, the transaction is malformed.
##### Failure Codes
-If an archived entry is included in the TX's footprint but is not specified via the `archivedEntries` vector, the
-tx will fail with the `INVOKE_HOST_FUNCTION_ENTRY_ARCHIVED` code. If there are insufficient resources for entry restoration,
-the tx will fail with the `INVOKE_HOST_FUNCTION_RESOURCE_LIMIT_EXCEEDED` code.
+If an archived entry is included in the TX's footprint but is not specified via
+the `archivedEntries` vector, the tx will fail with the
+`INVOKE_HOST_FUNCTION_ENTRY_ARCHIVED` code. If there are insufficient resources
+for entry restoration, the tx will fail with the
+`INVOKE_HOST_FUNCTION_RESOURCE_LIMIT_EXCEEDED` code.
##### Meta
-Whenever an entry is restored, the data/code entry will be emitted as a `LedgerEntryChangeType` of type `LEDGER_ENTRY_RESTORE`.
-This will include the complete LedgerEntry of the restored entry. It does not matter if the entry has been
-evicted or not, the meta produced will be the same. Additionally, the corresponding `TTL` entry will also be emitted
-as a `LedgerEntryChangeType` of type `LEDGER_ENTRY_RESTORE`.
-
-If a restored entry is modified during host function invocation, another `LedgerEntryChangeType` will be
-emitted corresponding to the entry change in addition to the `LEDGER_ENTRY_RESTORE` restore event.
-This is similar to the current meta schema, where the starting value of a LedgerEntry is emitted as
-`LEDGER_ENTRY_STATE` followed by the change. This is similar, except that `LEDGER_ENTRY_STATE` is replaced
-by `LEDGER_ENTRY_RESTORE`. This applies to both data/code and `TTL` entries. For example, suppose a data/code entry
-is restored via an `InvokeHostFunctionOp`, then during invocation its `TTL` value is bumped. Meta for the TX will be
-as follows:
+Whenever an entry is restored, the data/code entry will be emitted as a
+`LedgerEntryChangeType` of type `LEDGER_ENTRY_RESTORE`. This will include the
+complete LedgerEntry of the restored entry. It does not matter if the entry has
+been evicted or not, the meta produced will be the same. Additionally, the
+corresponding `TTL` entry will also be emitted as a `LedgerEntryChangeType` of
+type `LEDGER_ENTRY_RESTORE`.
+
+If a restored entry is modified during host function invocation, another
+`LedgerEntryChangeType` will be emitted corresponding to the entry change in
+addition to the `LEDGER_ENTRY_RESTORE` restore event. This is similar to the
+current meta schema, where the starting value of a LedgerEntry is emitted as
+`LEDGER_ENTRY_STATE` followed by the change. This is similar, except that
+`LEDGER_ENTRY_STATE` is replaced by `LEDGER_ENTRY_RESTORE`. This applies to
+both data/code and `TTL` entries. For example, suppose a data/code entry is
+restored via an `InvokeHostFunctionOp`, then during invocation its `TTL` value
+is bumped. Meta for the TX will be as follows:
```
data/code: LEDGER_ENTRY_RESTORE(restoredValue)
TTL: LEDGER_ENTRY_RESTORE(minimumTTLfromRestore), LEDGER_ENTRY_UPDATED(bumpedTTLValue)
```
-Similarly, suppose a `CONTRACT_DATA` entry is restored, then deleted during invocation. Meta for the TX will be:
+Similarly, suppose a `CONTRACT_DATA` entry is restored, then deleted during
+invocation. Meta for the TX will be:
```
CONTRACT_DATA: LEDGER_ENTRY_RESTORE(restoredValue), LEDGER_ENTRY_REMOVED(dataKey)
TTL: LEDGER_ENTRY_RESTORE(minimumTTLfromRestore), LEDGER_ENTRY_REMOVED(ttlKey)
```
-If an entry is restored and not modified (either from RestoreFootprintOp or InvokeHostFunctionOp), the meta will be:
+If an entry is restored and not modified (either from RestoreFootprintOp or
+InvokeHostFunctionOp), the meta will be:
```
CONTRACT_DATA: LEDGER_ENTRY_RESTORE(restoredValue)
@@ -478,13 +596,16 @@ TTL: LEDGER_ENTRY_RESTORE(minimumTTLfromRestore)
#### getledgerentry Endpoint
-In order to facilitate state queries for RPC preflight, captive-core will expose the following HTTP endpoint:
+In order to facilitate state queries for RPC preflight, captive-core will
+expose the following HTTP endpoint:
`getledgerentry`
-Used to query both live and archived LedgerEntries. While `getledgerentryraw` does simple key-value lookup
-on the live ledger, `getledgerentry` will query a given key in both the live BucketList and Hot Archive BucketList.
-It will also report whether an entry is archived or live, and return the entry's current TTL value.
+Used to query both live and archived LedgerEntries. While `getledgerentryraw`
+does simple key-value lookup on the live ledger, `getledgerentry` will query a
+given key in both the live BucketList and Hot Archive BucketList. It will also
+report whether an entry is archived or live, and return the entry's current TTL
+value.
A POST request with the following body:
@@ -492,11 +613,11 @@ A POST request with the following body:
ledgerSeq=NUM&key=Base64&key=Base64...
```
-- `ledgerSeq`: An optional parameter, specifying the ledger snapshot to base the query on.
- If the specified ledger in not available, a 404 error will be returned. If this parameter
- is not set, the current ledger is used.
-- `key`: A series of Base64 encoded XDR strings specifying the `LedgerKey` to query. TTL keys
- must not be queried and will return 400 if done so.
+- `ledgerSeq`: An optional parameter, specifying the ledger snapshot to base
+ the query on. If the specified ledger in not available, a 404 error will be
+ returned. If this parameter is not set, the current ledger is used.
+- `key`: A series of Base64 encoded XDR strings specifying the `LedgerKey` to
+ query. TTL keys must not be queried and will return 400 if done so.
A JSON payload is returned as follows:
@@ -511,123 +632,154 @@ A JSON payload is returned as follows:
}
```
-- `entries`: A list of entries for each queried LedgerKey. Every `key` queried is guaranteed to
-have a corresponding entry returned.
-- `e`: Either the `LedgerEntry` or `LedgerKey` for a given key encoded as a Base64 string. If a key
-is live or archived, `e` contains the corresponding `LedgerEntry`. If a key does not exist
-(including expired temporary entries) `e` contains the corresponding `LedgerKey`.
+- `entries`: A list of entries for each queried LedgerKey. Every `key` queried
+ is guaranteed to have a corresponding entry returned.
+- `e`: Either the `LedgerEntry` or `LedgerKey` for a given key encoded as a
+ Base64 string. If a key is live or archived, `e` contains the corresponding
+ `LedgerEntry`. If a key does not exist (including expired temporary entries)
+ `e` contains the corresponding `LedgerKey`.
- `state`: One of the following values:
- `live`: Entry is live.
- - `new`: Entry does not exist. Either the entry has never existed or is an expired temp entry.
+ - `new`: Entry does not exist. Either the entry has never existed or is an
+ expired temp entry.
- `archived`: Entry is archived, counts towards on-disk resources.
-- `ttl`: An optional value, only returned for live Soroban entries. Contains
-a uint32 value for the entry's `liveUntilLedgerSeq`.
+- `ttl`: An optional value, only returned for live Soroban entries. Contains a
+ uint32 value for the entry's `liveUntilLedgerSeq`.
- `ledgerSeq`: The ledger number on which the query was performed.
-Classic entries will always return a state of `live` or `new`.
-If a classic entry does not exist, it will have a state of `new`.
+Classic entries will always return a state of `live` or `new`. If a classic
+entry does not exist, it will have a state of `new`.
Similarly, temporary Soroban entries will always return a state of `live` or
-`new`. If a temporary entry does not exist or has expired, it
-will have a state of `new`.
+`new`. If a temporary entry does not exist or has expired, it will have a state
+of `new`.
This endpoint will always give correct information for archived entries. Even
-if an entry has been archived and evicted to the Hot Archive, this endpoint will
-still the archived entry's full `LedgerEntry` as well as the proper state.
+if an entry has been archived and evicted to the Hot Archive, this endpoint
+will still the archived entry's full `LedgerEntry` as well as the proper state.
## Design Rationale
### Reasoning behind in-memory limits and no read fees
-While there is a resource limit for the maximum number of in-memory entry reads, there is no limit on the
-number of in-memory read bytes. Additionally, there are no read-specific fees for in-memory state.
-
-There does not seem to be a need for limits on in-memory bytes read. The cost associated with loading an
-in-memory entry is searching an in-memory map and making a copy of the LedgerEntry. These copies will be
-metered against the `txMemoryLimit` limit. To prevent OOM based DOS, operation apply will exit early if
-loading entries exceeds this limit.
-
-There is very little execution time associated with copying an in-memory entry. Given that the size of the
-copies is bounded by the `txMemoryLimit` and no expensive disk IO is required, it is not necessary to charge
-a fee for in-memory reads. An attacker could potentially exploit this
-by creating TXs with large readOnly footprints that don't actually interact with the loaded state. The
-TX would consume little resources and would be cheap, but would load the maximum amount of in-memory state
-and. Due to the low computational cost of these in-memory copies, it seems unlikely this would negatively
-affect the network. Even the TX size costs associated with the large footprint may make the attack economically
-nonviable relative to the amount of stress on the network. Additionally, memory allocations are short lived and
-freed immediately following TX application. Only a small number of TXs are executed at any given time such that
-memory is quickly freed such that OOM based attacks are not possible.
+While there is a resource limit for the maximum number of in-memory entry
+reads, there is no limit on the number of in-memory read bytes. Additionally,
+there are no read-specific fees for in-memory state.
+
+There does not seem to be a need for limits on in-memory bytes read. The cost
+associated with loading an in-memory entry is searching an in-memory map and
+making a copy of the LedgerEntry. These copies will be metered against the
+`txMemoryLimit` limit. To prevent OOM based DOS, operation apply will exit
+early if loading entries exceeds this limit.
+
+There is very little execution time associated with copying an in-memory entry.
+Given that the size of the copies is bounded by the `txMemoryLimit` and no
+expensive disk IO is required, it is not necessary to charge a fee for
+in-memory reads. An attacker could potentially exploit this by creating TXs
+with large readOnly footprints that don't actually interact with the loaded
+state. The TX would consume little resources and would be cheap, but would load
+the maximum amount of in-memory state and. Due to the low computational cost of
+these in-memory copies, it seems unlikely this would negatively affect the
+network. Even the TX size costs associated with the large footprint may make
+the attack economically nonviable relative to the amount of stress on the
+network. Additionally, memory allocations are short lived and freed immediately
+following TX application. Only a small number of TXs are executed at any given
+time such that memory is quickly freed such that OOM based attacks are not
+possible.
### Changes required for `CONTRACT_CODE`
-This CAP combined with [CAP-0065](cap-0065.md) cache all live Soroban state in memory,
-including `CONTRACT_DATA`, `TTL`, and instantiated contract code. These caches open potential DOS vectors with the
-way `CONTRACT_CODE` size is metered currently.
-
-For `CONTRACT_DATA` and `TTL`, we cache all live `LedgerEntries`. This does not present a DOS angle. We meter these
-entry types based on serialized `LedgerEntry` XDR size such that we have a 1 to 1 ratio between the bytes that are metered and
-the bytes that are cached in memory.
-
-For `CONTRACT_CODE`, this is no longer the case. [CAP-0065](cap-0065.md) caches instantiated modules in memory,
-which in the worst case could be up to 40x the size of the `CONTRACT_CODE` LedgerEntry size. This is a significant
-OOM risk, as `sorobanLiveStateTargetSizeBytes` must bound the amount of state validators are required to cache in
-memory. If `CONTRACT_CODE` XDR size is used, an attacker could upload code that once instantiated could bloat the
-cache size to `sorobanLiveStateTargetSizeBytes * 40`, causing an OOM based DOS of the network. To prevent this,
-we use the instantiated module memory cost model size instead of the `CONTRACT_CODE` LedgerEntry size for rent fees.
-While fees become slightly higher, this is reasonable given that the contract modules consume more network resources
-(memory).
+This CAP combined with [CAP-0065](cap-0065.md) cache all live Soroban state in
+memory, including `CONTRACT_DATA`, `TTL`, and instantiated contract code. These
+caches open potential DOS vectors with the way `CONTRACT_CODE` size is metered
+currently.
+
+For `CONTRACT_DATA` and `TTL`, we cache all live `LedgerEntries`. This does not
+present a DOS angle. We meter these entry types based on serialized
+`LedgerEntry` XDR size such that we have a 1 to 1 ratio between the bytes that
+are metered and the bytes that are cached in memory.
+
+For `CONTRACT_CODE`, this is no longer the case. [CAP-0065](cap-0065.md) caches
+instantiated modules in memory, which in the worst case could be up to 40x the
+size of the `CONTRACT_CODE` LedgerEntry size. This is a significant OOM risk,
+as `sorobanLiveStateTargetSizeBytes` must bound the amount of state validators
+are required to cache in memory. If `CONTRACT_CODE` XDR size is used, an
+attacker could upload code that once instantiated could bloat the cache size to
+`sorobanLiveStateTargetSizeBytes * 40`, causing an OOM based DOS of the
+network. To prevent this, we use the instantiated module memory cost model size
+instead of the `CONTRACT_CODE` LedgerEntry size for rent fees. While fees
+become slightly higher, this is reasonable given that the contract modules
+consume more network resources (memory).
### Requirement for Module Cache and Cost Model Consistency
-Using the instantiated module size for rent fees requires that the Module Cache be up to date with the current
-cost model. This means that each upgrade to the cost model must trigger a rebuild of the Module Cache.
+Using the instantiated module size for rent fees requires that the Module Cache
+be up to date with the current cost model. This means that each upgrade to the
+cost model must trigger a rebuild of the Module Cache.
### UX Expectations
-With the addition of the new read resource type, contract developers will need to reason about what types of data
-they are accessing wrt limits. However, the system has been designed such that this should be straight forward.
-From a correctness standpoint, all smart contracts can be guaranteed to only interact with live state. A contract
-developer can assume that all Soroban state consumes only in-memory resources, since the `RestoreFootprintOp`
-(or equivalent in the case of deprecation) can restore all required Soroban entries before host function invocation
-in the worst case. This means that from a limits perspective, only Soroban state vs. classic state must be considered
-for contract correctness. However, there may be issues with usability or efficiency, discussed in the section below.
-
-While contract developers only need to distinguish between Soroban and classic state for contract correctness, the
-tx itself will still need to properly populate limits and the `archivedEntries` vector based on whether or not
-entries are archived. Similar to today, these resources and footprints will be generated automatically via RPC.
-Specifically, RPC will correctly generate the footprint, `archivedEntries` vector, in-memory read resources,
-and disk read resources. Thus most of the complexity introduced by these changes should be abstracted away.
-
-Note that the RPC itself is not required to implement this behavior, but will call captive-core endpoints.
+With the addition of the new read resource type, contract developers will need
+to reason about what types of data they are accessing wrt limits. However, the
+system has been designed such that this should be straight forward. From a
+correctness standpoint, all smart contracts can be guaranteed to only interact
+with live state. A contract developer can assume that all Soroban state
+consumes only in-memory resources, since the `RestoreFootprintOp` (or
+equivalent in the case of deprecation) can restore all required Soroban entries
+before host function invocation in the worst case. This means that from a
+limits perspective, only Soroban state vs. classic state must be considered for
+contract correctness. However, there may be issues with usability or
+efficiency, discussed in the section below.
+
+While contract developers only need to distinguish between Soroban and classic
+state for contract correctness, the tx itself will still need to properly
+populate limits and the `archivedEntries` vector based on whether or not
+entries are archived. Similar to today, these resources and footprints will be
+generated automatically via RPC. Specifically, RPC will correctly generate the
+footprint, `archivedEntries` vector, in-memory read resources, and disk read
+resources. Thus most of the complexity introduced by these changes should be
+abstracted away.
+
+Note that the RPC itself is not required to implement this behavior, but will
+call captive-core endpoints.
### Expectations for Downstream Systems
RPC must make the following changes:
-1. Ingest the new `LEDGER_ENTRY_RESTORE` restore changes. This includes interpreting the new `LedgerEntryChangeType`
-correctly, but also correctly handling the case where an entry is restored and modified in the same ledger.
-2. Allow user queries for both live and evicted state via the `getledgerentry` endpoint.
-3. Handle transactions that cannot use automatic restore due to resource limits.
+1. Ingest the new `LEDGER_ENTRY_RESTORE` restore changes. This includes
+ interpreting the new `LedgerEntryChangeType` correctly, but also correctly
+ handling the case where an entry is restored and modified in the same
+ ledger.
+2. Allow user queries for both live and evicted state via the `getledgerentry`
+ endpoint.
+3. Handle transactions that cannot use automatic restore due to resource
+ limits.
-Change 1 should be straight forward and is defined in the [InvokeHostFunctionOp Meta](#meta) section.
+Change 1 should be straight forward and is defined in the
+[InvokeHostFunctionOp Meta](#meta) section.
-Change 2 allows wallets and dapps to effectively reason about ledger state. RPC integration is minimal, as internally
-this endpoint can be implemented by calling the captive-core [`getledgerentry` HTTP endpoint](#getledgerentry-endpoint).
+Change 2 allows wallets and dapps to effectively reason about ledger state. RPC
+integration is minimal, as internally this endpoint can be implemented by
+calling the captive-core
+[`getledgerentry` HTTP endpoint](#getledgerentry-endpoint).
Change 3 is more involved and discussed in the section below.
### Handling Invocations that restore too much state for automatic restore
-Suppose an expensive contract is written, such that it uses the maximum in-memory resources and the maximum
-disk resources when no entries are archived. Should any Soroban entry be archived automatic
-restoration would not be possible, as the restoration would exceed resource limits. In this case, it would be
-necessary to manually issue one or more`RestoreFootprintOp` prior to the
-host function invocation.
+Suppose an expensive contract is written, such that it uses the maximum
+in-memory resources and the maximum disk resources when no entries are
+archived. Should any Soroban entry be archived automatic restoration would not
+be possible, as the restoration would exceed resource limits. In this case, it
+would be necessary to manually issue one or more`RestoreFootprintOp` prior to
+the host function invocation.
-To facilitate this, RPC must change the structure of `restorePreamble` to allow for multiple `RestoreFootprintOp`s.
-Instead of returning a single set of keys and resource object in `restorePreamble`, it should return a vector of
-{keySet, resourceObject} pairs, where each pair represents a `RestoreFootprintOp`. This vector should be constructed
-as follows:
+To facilitate this, RPC must change the structure of `restorePreamble` to allow
+for multiple `RestoreFootprintOp`s. Instead of returning a single set of keys
+and resource object in `restorePreamble`, it should return a vector of {keySet,
+resourceObject} pairs, where each pair represents a `RestoreFootprintOp`. This
+vector should be constructed as follows:
```python
restorePreamble = []
@@ -656,46 +808,59 @@ if not archivedKeys.empty():
restorePreamble.append(RestoreOp())
```
-Ideally, developers should be avoid creating contracts that require these expensive restores if possible.
-The challenge will be communicating this to developers, but may be accomplished via documentation.
+Ideally, developers should be avoid creating contracts that require these
+expensive restores if possible. The challenge will be communicating this to
+developers, but may be accomplished via documentation.
### Concerns regarding Automatic Restoration with Full State Archival
-In protocol 20, there was no technical reason against enabling automatic restoration. However, the proof system
-for full state archival was not finalized, and the explicit `RestoreFootprintOp` provided more future flexibility.
-Additionally, it was important for developers to build preflighting transactions into their dApp flows.
-
-If Soroban had launched with automatic restoration, it is possible many developers would attempt to avoid preflight
-by creating transaction footprint and resource declarations manually with reasonable fee and resource assumptions.
-In a system where explicit entry restoration is not required, the burden of resource discoverability is low, so
-developers are not incentivized or required to build preflight into their tx submission system. Long term this is
-a significant issue, as the addition of full state archival with proofs makes manual resource and footprint
-declaration largely impossible. Any system that did not use RPC preflight would be broken by full state archival.
-
-With this proposal, it is vital to ensure that automatic restore does not result in a situation where large numbers
-of developers circumvent preflight. Two specific features of this proposal help ensure developers are incentivized
-to build preflight into their flows now such that they are not broken later by archival proofs. Specifically,
-no in-memory read fees incentivize developers to minimize fees by using preflight to detect the maximum
-number of entries currently live. Additionally, the requirement of `archivedEntries` vectors makes manually
-constructing footprints challenging, as the developer would need to be able to distinguish live vs evicted state
-to minimize fees. Given that preflighting will result in lower on-chain fees, and given that resource footprints
-are somewhat challenging to create without captive-core seems to indicate that developers will preflight transactions
-prior to submission as intended.
+In protocol 20, there was no technical reason against enabling automatic
+restoration. However, the proof system for full state archival was not
+finalized, and the explicit `RestoreFootprintOp` provided more future
+flexibility. Additionally, it was important for developers to build
+preflighting transactions into their dApp flows.
+
+If Soroban had launched with automatic restoration, it is possible many
+developers would attempt to avoid preflight by creating transaction footprint
+and resource declarations manually with reasonable fee and resource
+assumptions. In a system where explicit entry restoration is not required, the
+burden of resource discoverability is low, so developers are not incentivized
+or required to build preflight into their tx submission system. Long term this
+is a significant issue, as the addition of full state archival with proofs
+makes manual resource and footprint declaration largely impossible. Any system
+that did not use RPC preflight would be broken by full state archival.
+
+With this proposal, it is vital to ensure that automatic restore does not
+result in a situation where large numbers of developers circumvent preflight.
+Two specific features of this proposal help ensure developers are incentivized
+to build preflight into their flows now such that they are not broken later by
+archival proofs. Specifically, no in-memory read fees incentivize developers to
+minimize fees by using preflight to detect the maximum number of entries
+currently live. Additionally, the requirement of `archivedEntries` vectors
+makes manually constructing footprints challenging, as the developer would need
+to be able to distinguish live vs evicted state to minimize fees. Given that
+preflighting will result in lower on-chain fees, and given that resource
+footprints are somewhat challenging to create without captive-core seems to
+indicate that developers will preflight transactions prior to submission as
+intended.
## Security Concerns
### Memory Based DOS Attack
-By distinguishing between disk backed and memory backed reads at the protocol level, it is now required that
-validators maintain all live Soroban state in memory. A significant increase in live Soroban state could
-lead to an OOM based attack on the network.
-
-However, this is not a significant concern. The `sorobanLiveStateTargetSizeBytes` (see [CAP-0062](cap-0062.md))
-provides a soft cap on the amount of live Soroban state that can exist at any given time. This prevents any
-sort of runaway memory issue. Additionally, because of the eviction process, there is a natural back pressure
-applied to in-memory state, the rate at which can be controlled via network config settings. Finally, the rate
-at which new state is created is also controlled via network configs, so there does not appear to be any
-memory based DOS attack possible.
+By distinguishing between disk backed and memory backed reads at the protocol
+level, it is now required that validators maintain all live Soroban state in
+memory. A significant increase in live Soroban state could lead to an OOM based
+attack on the network.
+
+However, this is not a significant concern. The
+`sorobanLiveStateTargetSizeBytes` (see [CAP-0062](cap-0062.md)) provides a soft
+cap on the amount of live Soroban state that can exist at any given time. This
+prevents any sort of runaway memory issue. Additionally, because of the
+eviction process, there is a natural back pressure applied to in-memory state,
+the rate at which can be controlled via network config settings. Finally, the
+rate at which new state is created is also controlled via network configs, so
+there does not appear to be any memory based DOS attack possible.
## Test Cases
diff --git a/core/cap-0067.md b/core/cap-0067.md
index 01fb34b22..c19677c33 100644
--- a/core/cap-0067.md
+++ b/core/cap-0067.md
@@ -15,33 +15,52 @@ Protocol version: 23
## Simple Summary
-Emit `transfer`, `mint`, `burn`, `clawback`, `fee`, and `set_authorized` events in Classic in the same format as what we see in Soroban so that the movement of assets and trustline updates can be tracked using a single stream of data. In addition to emitting events in Classic, update the events emitted in the Stellar Asset Contract to be semantically correct and compatible with SEP-41.
+Emit `transfer`, `mint`, `burn`, `clawback`, `fee`, and `set_authorized` events
+in Classic in the same format as what we see in Soroban so that the movement of
+assets and trustline updates can be tracked using a single stream of data. In
+addition to emitting events in Classic, update the events emitted in the
+Stellar Asset Contract to be semantically correct and compatible with SEP-41.
## Motivation
-Tracking the movement of Stellar assets today is complex because you need to consume both Soroban events emitted by the Stellar Asset Contract and ledger entry changes for Classic operations. There are also differences between Stellar assets and custom Soroban tokens that this CAP will address so those differences will be made irrelevant to the end user.
+Tracking the movement of Stellar assets today is complex because you need to
+consume both Soroban events emitted by the Stellar Asset Contract and ledger
+entry changes for Classic operations. There are also differences between
+Stellar assets and custom Soroban tokens that this CAP will address so those
+differences will be made irrelevant to the end user.
### Goals Alignment
This CAP is aligned with the following Stellar Network Goals:
- - The Stellar Network should be secure and reliable, and should bias towards safety, simplicity,
- reliability, and performance over new functionality.
+- The Stellar Network should be secure and reliable, and should bias towards
+ safety, simplicity, reliability, and performance over new functionality.
## Abstract
-The changes specified by this CAP -
-* Remove the admin from the topics of the `mint` and `clawback` events emitted in the SAC.
-* Update issuer semantics in the SAC so that a `transfer` involving the issuer will emit the semantically correct event (`mint` or `burn`).
-* Add memo support to Soroban by adding a `SC_ADDRESS_TYPE_MUXED_ACCOUNT` and allow the SAC to take in this type in the `transfer` function call.
-* Update the `data` field in the `transfer` event from an integer to a map to store additional memo information.
-* Emit an event for every movement of an `Asset` of types `ASSET_TYPE_NATIVE`, `ASSET_TYPE_CREDIT_ALPHANUM4`, and `ASSET_TYPE_CREDIT_ALPHANUM12`. in Stellar classic. All of the added events will follow the format of the existing Stellar Asset Contract events, with the exception of a new `fee` event to track fees paid by the source account.
+The changes specified by this CAP -
+
+- Remove the admin from the topics of the `mint` and `clawback` events emitted
+ in the SAC.
+- Update issuer semantics in the SAC so that a `transfer` involving the issuer
+ will emit the semantically correct event (`mint` or `burn`).
+- Add memo support to Soroban by adding a `SC_ADDRESS_TYPE_MUXED_ACCOUNT` and
+ allow the SAC to take in this type in the `transfer` function call.
+- Update the `data` field in the `transfer` event from an integer to a map to
+ store additional memo information.
+- Emit an event for every movement of an `Asset` of types `ASSET_TYPE_NATIVE`,
+ `ASSET_TYPE_CREDIT_ALPHANUM4`, and `ASSET_TYPE_CREDIT_ALPHANUM12`. in Stellar
+ classic. All of the added events will follow the format of the existing
+ Stellar Asset Contract events, with the exception of a new `fee` event to
+ track fees paid by the source account.
## Specification
### XDR Changes
-This patch of XDR changes is based on the XDR files in commit `fa0338f4a25a95320d2143c8f08d200f0a360a4b` of stellar-xdr.
+This patch of XDR changes is based on the XDR files in commit
+`fa0338f4a25a95320d2143c8f08d200f0a360a4b` of stellar-xdr.
+
```diff mddiffcheck.ignore=true
diff --git a/Stellar-contract.x b/Stellar-contract.x
index 5113005..71738cb 100644
@@ -63,7 +82,7 @@ index 5113005..71738cb 100644
+ uint64 id;
+ uint256 ed25519;
};
-
+
union SCAddress switch (SCAddressType type)
@@ -188,6 +197,12 @@ case SC_ADDRESS_TYPE_ACCOUNT:
AccountID accountId;
@@ -76,7 +95,7 @@ index 5113005..71738cb 100644
+case SC_ADDRESS_TYPE_LIQUIDITY_POOL:
+ PoolID liquidityPoolId;
};
-
+
%struct SCVal;
diff --git a/Stellar-ledger.x b/Stellar-ledger.x
index 0fc03e2..963acc4 100644
@@ -85,7 +104,7 @@ index 0fc03e2..963acc4 100644
@@ -434,6 +434,41 @@ struct TransactionMetaV3
// Soroban transactions).
};
-
+
+struct OperationMetaV2
+{
+ ExtensionPoint ext;
@@ -103,26 +122,26 @@ index 0fc03e2..963acc4 100644
+};
+
+// Transaction-level events happen at different stages of the ledger apply flow
-+// (as opposed to the operation events that all happen atomically when
++// (as opposed to the operation events that all happen atomically when
+// transaction is applied).
+// This enum represents the possible stages during which an event has been
+// emitted.
+enum TransactionEventStage {
-+ // The event has happened before any one of the transactions has its
++ // The event has happened before any one of the transactions has its
+ // operations applied.
+ TRANSACTION_EVENT_STAGE_BEFORE_ALL_TXS = 0,
+ // The event has happened immediately after operations of the transaction
+ // have been applied.
+ TRANSACTION_EVENT_STAGE_AFTER_TX = 1,
-+ // The event has happened after every transaction had its operations
++ // The event has happened after every transaction had its operations
+ // applied.
+ TRANSACTION_EVENT_STAGE_AFTER_ALL_TXS = 2
+}
+
+// Represents a transaction-level event in metadata.
-+// Currently this is limited to the fee events (when fee is charged or
++// Currently this is limited to the fee events (when fee is charged or
+// refunded).
-+struct TransactionEvent {
++struct TransactionEvent {
+ TransactionEventStage stage; // Stage at which an event has occurred.
+ ContractEvent event; // The contract event that has occurred.
+}
@@ -144,7 +163,7 @@ index 0fc03e2..963acc4 100644
+};
+
+
- // This is in Stellar-ledger.x to due to a circular dependency
+ // This is in Stellar-ledger.x to due to a circular dependency
struct InvokeHostFunctionSuccessPreImage
{
@@ -453,6 +541,8 @@ case 2:
@@ -154,13 +173,14 @@ index 0fc03e2..963acc4 100644
+case 4:
+ TransactionMetaV4 v4;
};
-
+
// This struct groups together changes on a per transaction basis
```
### New host functions
-The diff is based on commit `9a24835ec6d75c526fb128a1d73b92e7d7becfa7` of `rs-soroban-env`.
+The diff is based on commit `9a24835ec6d75c526fb128a1d73b92e7d7becfa7` of
+`rs-soroban-env`.
```diff mddiffcheck.ignore=true
diff --git a/soroban-env-common/env.json b/soroban-env-common/env.json
@@ -207,380 +227,662 @@ index d421dca2..4249efc6 100644
### Remove the admin from the SAC `mint`, `clawback`, and `set_authorized` events
The `mint` event will look like:
+
```
contract: asset, topics: ["mint", to:Address, sep0011_asset:String], data: amount:i128
```
The `clawback` event will look like:
+
```
contract: asset, topics: ["clawback", from:Address, sep0011_asset:String], data: amount:i128
```
The `set_authorized` event will look like:
+
```
contract: asset, topics: ["set_authorized", id:Address, sep0011_asset:String], data: authorize:bool
```
-### Emit the semantically correct event for a Stellar Asset Contract `transfer` when the issuer is involved
+### Emit the semantically correct event for a Stellar Asset Contract `transfer` when the issuer is involved
-At the moment, if the issuer is the sender in a Stellar Asset Contract `transfer`, the asset will be minted. If the issuer is the recipient, the asset will be burned. The event emitted in both scenarios, however, is the `transfer` event. This CAP changes that behavior to instead emit the `mint`/`burn` event.
+At the moment, if the issuer is the sender in a Stellar Asset Contract
+`transfer`, the asset will be minted. If the issuer is the recipient, the asset
+will be burned. The event emitted in both scenarios, however, is the `transfer`
+event. This CAP changes that behavior to instead emit the `mint`/`burn` event.
### Multiplexing support
-Multiplexing (muxing) is a technique used by the Stellar users to represent custodial accounts, i.e. accounts that are represented by a single on-chain entity, but may have an arbitrary number of off-chain, 'virtual' balances. Stellar protocol currently provides two ways for supporting the account 'multiplexing' ('muxing'): using the transaction memo to identify the payment destination, and the `MuxedAccount` type that can identify the source or destination of most of the Stellar operations.
+Multiplexing (muxing) is a technique used by the Stellar users to represent
+custodial accounts, i.e. accounts that are represented by a single on-chain
+entity, but may have an arbitrary number of off-chain, 'virtual' balances.
+Stellar protocol currently provides two ways for supporting the account
+'multiplexing' ('muxing'): using the transaction memo to identify the payment
+destination, and the `MuxedAccount` type that can identify the source or
+destination of most of the Stellar operations.
-This CAP ensures that muxing information is represented in the events, and also extends the multiplexing functionality to Soroban operations.
+This CAP ensures that muxing information is represented in the events, and also
+extends the multiplexing functionality to Soroban operations.
#### `MuxedAddressObject` host object
-A new host object type called `MuxedAddressObject` is added to represent multiplexed addresses (only account addresses as of this CAP). While the regular host objects used to have 1:1 mapping to `SCVal`, due to changes in this CAP it's necessary to change the semantics and have 1:n mapping between `SCVal::SCV_ADDRESS` (and the respective `ScAddress`) and the host objects. Specifically, semantics of the mapping between `ScAddress` and host objects are updated in the following fashion:
+A new host object type called `MuxedAddressObject` is added to represent
+multiplexed addresses (only account addresses as of this CAP). While the
+regular host objects used to have 1:1 mapping to `SCVal`, due to changes in
+this CAP it's necessary to change the semantics and have 1:n mapping between
+`SCVal::SCV_ADDRESS` (and the respective `ScAddress`) and the host objects.
+Specifically, semantics of the mapping between `ScAddress` and host objects are
+updated in the following fashion:
-- (unchanged) `SC_ADDRESS_TYPE_ACCOUNT` and `SC_ADDRESS_TYPE_CONTRACT` still correspond to `AddressObject`
+- (unchanged) `SC_ADDRESS_TYPE_ACCOUNT` and `SC_ADDRESS_TYPE_CONTRACT` still
+ correspond to `AddressObject`
- `SC_ADDRESS_TYPE_MUXED_ACCOUNT` corresponds to `MuxedAddressObject`
-- `SC_ADDRESS_TYPE_CLAIMABLE_BALANCE` and `SC_ADDRESS_TYPE_LIQUIDITY_POOL` are disallowed by host and any conversion from XDR to host objects that involves these address kinds will fail (including passing these in the authorization payloads, function arguments, decoding the XDR blobs via host function etc.).
-
-Other than the special XDR mapping, `MuxedAddressObject` is a regular host object and thus it's not implicitly compatible with `AddressObject` type. Thus the contracts that expect the regular `AddressObject` as an input argument will fail if `ScAddress::SC_ADDRESS_TYPE_MUXED_ACCOUNT` is passed to them (the failure is expected and is the same kind of failure as for passing any other object kind beyond `AddressObject`).
-
-`MuxedAddressObject` is meant to identify the multiplexed addresses for the special cases that require them. Thus we only provide the host functions that get the two parts of `MuxedAddressObject`:
-
-- `get_address_from_muxed_address` returns the regular address part as `AddressObject`
+- `SC_ADDRESS_TYPE_CLAIMABLE_BALANCE` and `SC_ADDRESS_TYPE_LIQUIDITY_POOL` are
+ disallowed by host and any conversion from XDR to host objects that involves
+ these address kinds will fail (including passing these in the authorization
+ payloads, function arguments, decoding the XDR blobs via host function etc.).
+
+Other than the special XDR mapping, `MuxedAddressObject` is a regular host
+object and thus it's not implicitly compatible with `AddressObject` type. Thus
+the contracts that expect the regular `AddressObject` as an input argument will
+fail if `ScAddress::SC_ADDRESS_TYPE_MUXED_ACCOUNT` is passed to them (the
+failure is expected and is the same kind of failure as for passing any other
+object kind beyond `AddressObject`).
+
+`MuxedAddressObject` is meant to identify the multiplexed addresses for the
+special cases that require them. Thus we only provide the host functions that
+get the two parts of `MuxedAddressObject`:
+
+- `get_address_from_muxed_address` returns the regular address part as
+ `AddressObject`
- `get_id_from_muxed_address` returns the multiplexing identifier as `U64Val`
##### `SC_ADDRESS_TYPE_MUXED_ACCOUNT` is prohibited in storage keys
-Multiplexed accounts are 'virtual' accounts that are meant to only exist off-chain. While with this CAP it will be possible for any contract to process `MuxedAddressObject` in an arbitrary fashion, most of the time it would be a mistake to use the whole muxed address in a contract data storage key (as it's purpose by definition is to support off-chain multiplexing). Thus as an additional precaution Soroban host will produce an error in case if `SC_ADDRESS_TYPE_MUXED_ACCOUNT` is present in the contract data key `SCVal`(either directly, or anywhere in the nested containers). This restriction is applied for all the kinds of the contract data storage, including the instance storage.
-
-In case if the users do want to use `MuxedAddressObject` for multiplexing on-chain, they can still use the provided host functions to decompose the address into two values and store them in a user-defined data structure.
+Multiplexed accounts are 'virtual' accounts that are meant to only exist
+off-chain. While with this CAP it will be possible for any contract to process
+`MuxedAddressObject` in an arbitrary fashion, most of the time it would be a
+mistake to use the whole muxed address in a contract data storage key (as it's
+purpose by definition is to support off-chain multiplexing). Thus as an
+additional precaution Soroban host will produce an error in case if
+`SC_ADDRESS_TYPE_MUXED_ACCOUNT` is present in the contract data key
+`SCVal`(either directly, or anywhere in the nested containers). This
+restriction is applied for all the kinds of the contract data storage,
+including the instance storage.
+
+In case if the users do want to use `MuxedAddressObject` for multiplexing
+on-chain, they can still use the provided host functions to decompose the
+address into two values and store them in a user-defined data structure.
#### Update the SAC `transfer` function to support muxed addresses
-The `transfer` function of the Stellar Asset contract will be updated to accept `AddressObject` or `MuxedAddressObject` both for the `to` argument of the `transfer` function. If a `MuxedAddressObject` is passed, then the `transfer` function will behave as if a respective `AddressObject` has been passed. The only semantics difference compared to using a regular `AddressObject` is the event payload to follow the format described in the following section.
+The `transfer` function of the Stellar Asset contract will be updated to accept
+`AddressObject` or `MuxedAddressObject` both for the `to` argument of the
+`transfer` function. If a `MuxedAddressObject` is passed, then the `transfer`
+function will behave as if a respective `AddressObject` has been passed. The
+only semantics difference compared to using a regular `AddressObject` is the
+event payload to follow the format described in the following section.
#### Emit a map as the `data` field in the `transfer` and `mint` event if muxed information is being emitted for the destination.
-The `data` field is currently an integer that represents the `amount`. If no muxed information is being emitted, this will not change. If we're emitting muxed information, then the `data` field on the `transfer` and `mint` event will be an `SCVal` of type `SCV_MAP`. In
-that case, the key for each map entry will be an `SCSymbol` with the name of the field in `data` (eg. "amount"), and the value will be represented by the types specified by the events below.
-
-The general `transfer` event format that involves muxed accounts is `topics: ["transfer", from:, to:] data: { amount: i128, to_muxed_id:u64|bytes|string }`, where `to_muxed_id` is present. The `mint` event format that involves muxed accounts is `topics: ["mint", to:] data: { amount: i128, to_muxed_id:u64|bytes|string }`. The muxed account is always represented as a pair of (`non-muxed ScAddress`, `muxed_id`) , i.e the pair of (`to`, `to_muxed_id`) identifies the muxed transfer destination. Non-muxed accounts are represented as a singular `from`/`to` value in the event topics and the `to_muxed_id` key won't be present in the `data` map.
-
-The mapping between the input muxed addresses and the emitted events is generically defined as follows:
-
-1. `MuxedAccount` (or `ScAddress` that wraps `MuxedAccount`, i.e `SC_ADDRESS_TYPE_MUXED_ACCOUNT`) with `KEY_TYPE_MUXED_ED25519` variant set will be represented as a pair of `ScVal::SCV_ADDRESS` with `ScAddress` type `SC_ADDRESS_TYPE_ACCOUNT` and the same `ed25519` key as in the `MuxedAccount`, and `ScVal::SCV_U64` `muxed_id` with the same value as the `id` field of the `MuxedAccount`.
-2. If transaction memo is not `MEMO_NONE`, and the classic operation in the transaction has a destination that is a non-muxed classic account (i.e. is representable by `SC_ADDRESS_TYPE_ACCOUNT` address), then the transfer event destination will be represented as a pair of `ScVal::SCV_ADDRESS` with `to` `ScAddress` type `SC_ADDRESS_TYPE_ACCOUNT` and the same `ed25519` key as in the destination account, and an `to_muxed_id` `ScVal` containing the transaction memo. The memo is mapped to `ScVal` in the following fashion:
- - `MEMO_ID` is represented by `SCV_U64`
- - `MEMO_TEXT` is represented by `SCV_STRING`
- - `MEMO_HASH` and `MEMO_RETURN` are represented as `SCV_BYTES`, i.e. these memo types are indistinguishable in the event
-
-Note, that the memo mapping rules above imply that in case if the destination of the transfer or mint is not a classic account (i.e. a claimable balance, or liquidity pool), or a muxed account itself, then the transaction memo will not be represented in the unified events at all.
-
-Here is how the rules above generally apply to the generated unified events (the event-specific sections also contain more detailed information where necessary):
-
-- For `transfer` and `mint` events coming directly from Soroban (i.e. from Stellar Asset contract) we will represent the input `KEY_TYPE_MUXED_ED25519` muxed account addresses according to the rule 1
-- For the `transfer` and `mint` events emitted due to non-Soroban value movement we make best-effort mapping based on the input `MuxedAccount`s and the transaction memo:
- - If the destination is a `MuxedAccount` with `KEY_TYPE_MUXED_ED25519`, then `to_muxed_id` will be emitted according to rule 1
- - If the destination is a non-muxed classic account (i.e. `MuxedAccount` with `KEY_TYPE_ED25519` variant set), then emit `to_muxed_id` according to rule 2
+The `data` field is currently an integer that represents the `amount`. If no
+muxed information is being emitted, this will not change. If we're emitting
+muxed information, then the `data` field on the `transfer` and `mint` event
+will be an `SCVal` of type `SCV_MAP`. In that case, the key for each map entry
+will be an `SCSymbol` with the name of the field in `data` (eg. "amount"), and
+the value will be represented by the types specified by the events below.
+
+The general `transfer` event format that involves muxed accounts is
+`topics: ["transfer", from:, to:] data: { amount: i128, to_muxed_id:u64|bytes|string }`,
+where `to_muxed_id` is present. The `mint` event format that involves muxed
+accounts is
+`topics: ["mint", to:] data: { amount: i128, to_muxed_id:u64|bytes|string }`.
+The muxed account is always represented as a pair of (`non-muxed ScAddress`,
+`muxed_id`) , i.e the pair of (`to`, `to_muxed_id`) identifies the muxed
+transfer destination. Non-muxed accounts are represented as a singular
+`from`/`to` value in the event topics and the `to_muxed_id` key won't be
+present in the `data` map.
+
+The mapping between the input muxed addresses and the emitted events is
+generically defined as follows:
+
+1. `MuxedAccount` (or `ScAddress` that wraps `MuxedAccount`, i.e
+ `SC_ADDRESS_TYPE_MUXED_ACCOUNT`) with `KEY_TYPE_MUXED_ED25519` variant set
+ will be represented as a pair of `ScVal::SCV_ADDRESS` with `ScAddress` type
+ `SC_ADDRESS_TYPE_ACCOUNT` and the same `ed25519` key as in the
+ `MuxedAccount`, and `ScVal::SCV_U64` `muxed_id` with the same value as the
+ `id` field of the `MuxedAccount`.
+2. If transaction memo is not `MEMO_NONE`, and the classic operation in the
+ transaction has a destination that is a non-muxed classic account (i.e. is
+ representable by `SC_ADDRESS_TYPE_ACCOUNT` address), then the transfer event
+ destination will be represented as a pair of `ScVal::SCV_ADDRESS` with `to`
+ `ScAddress` type `SC_ADDRESS_TYPE_ACCOUNT` and the same `ed25519` key as in
+ the destination account, and an `to_muxed_id` `ScVal` containing the
+ transaction memo. The memo is mapped to `ScVal` in the following fashion:
+
+- `MEMO_ID` is represented by `SCV_U64`
+- `MEMO_TEXT` is represented by `SCV_STRING`
+- `MEMO_HASH` and `MEMO_RETURN` are represented as `SCV_BYTES`, i.e. these memo
+ types are indistinguishable in the event
+
+Note, that the memo mapping rules above imply that in case if the destination
+of the transfer or mint is not a classic account (i.e. a claimable balance, or
+liquidity pool), or a muxed account itself, then the transaction memo will not
+be represented in the unified events at all.
+
+Here is how the rules above generally apply to the generated unified events
+(the event-specific sections also contain more detailed information where
+necessary):
+
+- For `transfer` and `mint` events coming directly from Soroban (i.e. from
+ Stellar Asset contract) we will represent the input `KEY_TYPE_MUXED_ED25519`
+ muxed account addresses according to the rule 1
+- For the `transfer` and `mint` events emitted due to non-Soroban value
+ movement we make best-effort mapping based on the input `MuxedAccount`s and
+ the transaction memo:
+ - If the destination is a `MuxedAccount` with `KEY_TYPE_MUXED_ED25519`, then
+ `to_muxed_id` will be emitted according to rule 1
+ - If the destination is a non-muxed classic account (i.e. `MuxedAccount` with
+ `KEY_TYPE_ED25519` variant set), then emit `to_muxed_id` according to rule
+ 2
### New Events for Representing Fees
-When a transaction has a fee charged or refunded, emit an event in the following format in the transaction-level `events` field of the `TransactionMetaV4`:
+When a transaction has a fee charged or refunded, emit an event in the
+following format in the transaction-level `events` field of the
+`TransactionMetaV4`:
+
```
contract: native asset, topics: ["fee", from:Address], data: amount:i128
```
-Where `from` is the account paying the fee or receiving the fee refund, either the fee bump fee account for fee bump transactions, or the transaction source account otherwise. `amount` represents the fee charged (when positive) or refunded (when negative).
+Where `from` is the account paying the fee or receiving the fee refund, either
+the fee bump fee account for fee bump transactions, or the transaction source
+account otherwise. `amount` represents the fee charged (when positive) or
+refunded (when negative).
-Transaction-level events also have `TransactionEventStage` defined in order to identify when an event has occurred within the ledger application flow. The fee events are emitted with the following stages:
+Transaction-level events also have `TransactionEventStage` defined in order to
+identify when an event has occurred within the ledger application flow. The fee
+events are emitted with the following stages:
-- Initial fee charged is always emitted with `TRANSACTION_EVENT_STAGE_BEFORE_ALL_TXS` stage, i.e. fee is charged before any transactions applied
-- Fee refund events are only emitted when the refund is non-zero (as of protocol 23 that may only be true for Soroban transactions) and the stage depends on the protocol:
- - Before protocol 23 the stage is `TRANSACTION_EVENT_STAGE_AFTER_TX`, i.e. the fee is refunded immediately after the transaction has been applied
- - Starting from protocol 23 the stage is `TRANSACTION_EVENT_STAGE_AFTER_ALL_TXS`, i.e. the fee is refunded after every transaction has been applied
+- Initial fee charged is always emitted with
+ `TRANSACTION_EVENT_STAGE_BEFORE_ALL_TXS` stage, i.e. fee is charged before
+ any transactions applied
+- Fee refund events are only emitted when the refund is non-zero (as of
+ protocol 23 that may only be true for Soroban transactions) and the stage
+ depends on the protocol:
+ - Before protocol 23 the stage is `TRANSACTION_EVENT_STAGE_AFTER_TX`, i.e.
+ the fee is refunded immediately after the transaction has been applied
+ - Starting from protocol 23 the stage is
+ `TRANSACTION_EVENT_STAGE_AFTER_ALL_TXS`, i.e. the fee is refunded after
+ every transaction has been applied
-No more transaction level events may be emitted as of protocol 23. The future protocols may introduce more fee events that will follow the same pattern of identifying the stage when the event has occurred.
+No more transaction level events may be emitted as of protocol 23. The future
+protocols may introduce more fee events that will follow the same pattern of
+identifying the stage when the event has occurred.
-The final fee paid can be calculated by taking the sum of the amounts from all `fee` events.
+The final fee paid can be calculated by taking the sum of the amounts from all
+`fee` events.
### New Events for Operations
-This section will go over the semantics of how the additional `transfer`/`mint`/`burn`/`clawback`, `set_authorized` events are emitted for each operation. These events will be emitted through the `events<>` field in the new `OperationMetaV2`. Soroban events will be moved to `OperationMetaV2`. The hash of the current soroban events will still exist under `INVOKE_HOST_FUNCTION_SUCCESS` as it does today. It's also important to note that nothing is being removed from meta, and in fact, the emission of the events (as well as the new meta version) mentioned in this section will be configurable through a flag.
-Note that the `contract` field for these events corresponds to the Stellar Asset Contract address for the respective asset. The Stellar Asset Contract instance is not required to be deployed for the asset. The events will be published using the reserved contract address regardless of deployment status.
+This section will go over the semantics of how the additional
+`transfer`/`mint`/`burn`/`clawback`, `set_authorized` events are emitted for
+each operation. These events will be emitted through the `events<>` field in
+the new `OperationMetaV2`. Soroban events will be moved to `OperationMetaV2`.
+The hash of the current soroban events will still exist under
+`INVOKE_HOST_FUNCTION_SUCCESS` as it does today. It's also important to note
+that nothing is being removed from meta, and in fact, the emission of the
+events (as well as the new meta version) mentioned in this section will be
+configurable through a flag.
+
+Note that the `contract` field for these events corresponds to the Stellar
+Asset Contract address for the respective asset. The Stellar Asset Contract
+instance is not required to be deployed for the asset. The events will be
+published using the reserved contract address regardless of deployment status.
#### Payment
+
Emit one of the following events -
-For a payment not involving the issuer, or if both the sender and receiver are the issuer:
+For a payment not involving the issuer, or if both the sender and receiver are
+the issuer:
+
```
contract: asset, topics: ["transfer", from:Address, to:Address, sep0011_asset:String], data: amount:i128
```
When sending from an issuer:
+
```
contract: asset, topics: ["mint", to:Address, sep0011_asset:String], data: amount:i128
```
When sending to an issuer:
+
```
contract: asset, topics: ["burn", from:Address, sep0011_asset:String], data: amount:i128
```
-As specified in the [section to update the transfer and mint events data field](#emit-a-map-as-the-data-field-in-the-transfer-and-mint-event-if-muxed-information-is-being-emitted-for-the-destination), `to_muxed_id` can be added to the `data` map.
-
+As specified in the
+[section to update the transfer and mint events data field](#emit-a-map-as-the-data-field-in-the-transfer-and-mint-event-if-muxed-information-is-being-emitted-for-the-destination),
+`to_muxed_id` can be added to the `data` map.
#### Path Payment Strict Send / Path Payment Strict Receive
-For each movement of the asset created by the path payment, emit one of the following -
+
+For each movement of the asset created by the path payment, emit one of the
+following -
```
contract: assetA, topics: ["transfer", from:Address, to:Address, sep0011_asset:String], data: amount:i128
contract: assetB, topics: ["transfer", from:Address, to:Address, sep0011_asset:String], data: amount:i128
```
-If `from` is the issuer on a side of the trade, emit the following instead for that side of the trade:
+If `from` is the issuer on a side of the trade, emit the following instead for
+that side of the trade:
+
```
contract: asset, topics: ["mint", to:Address, sep0011_asset:String], data: amount:i128
```
-If `to` is the issuer on a side of the trade, emit the following instead for that side of the trade:
+If `to` is the issuer on a side of the trade, emit the following instead for
+that side of the trade:
+
```
contract: asset, topics: ["burn", from:Address, sep0011_asset:String], data: amount:i128
```
-* `from` is the account or the liquidity pool (represented by the new `SC_ADDRESS_TYPE_LIQUIDITY_POOL` address type) being debited (seller).
-* `to` is the account or liquidity pool (represented by the new `SC_ADDRESS_TYPE_LIQUIDITY_POOL` address type) being credited (buyer).
+- `from` is the account or the liquidity pool (represented by the new
+ `SC_ADDRESS_TYPE_LIQUIDITY_POOL` address type) being debited (seller).
+- `to` is the account or liquidity pool (represented by the new
+ `SC_ADDRESS_TYPE_LIQUIDITY_POOL` address type) being credited (buyer).
-The trades within a path payment are conceptually between the source account and the owner of the offers. Those are the addresses that'll appear on the event pairs specified above. At the end of all the trades, we need to emit one more `transfer` (or `burn` if the destination is the issuer) event to indicate a transfer from the source account to the destination account. The amount will be equivalent to the sum of the destination asset received on the trades of the final hop.
+The trades within a path payment are conceptually between the source account
+and the owner of the offers. Those are the addresses that'll appear on the
+event pairs specified above. At the end of all the trades, we need to emit one
+more `transfer` (or `burn` if the destination is the issuer) event to indicate
+a transfer from the source account to the destination account. The amount will
+be equivalent to the sum of the destination asset received on the trades of the
+final hop.
-Note that if the path payment has an empty path and `sendAsset == destAsset`, then the operation is effectively a regular [payment](#payment), so emit an event following the specifications of the payment section.
+Note that if the path payment has an empty path and `sendAsset == destAsset`,
+then the operation is effectively a regular [payment](#payment), so emit an
+event following the specifications of the payment section.
-As specified in the [section to update the transfer and mint events data field](#emit-a-map-as-the-data-field-in-the-transfer-and-mint-event-if-muxed-information-is-being-emitted-for-the-destination), `to_muxed_id` can be added to the `data` map, but only for the final transfer to the destination.
+As specified in the
+[section to update the transfer and mint events data field](#emit-a-map-as-the-data-field-in-the-transfer-and-mint-event-if-muxed-information-is-being-emitted-for-the-destination),
+`to_muxed_id` can be added to the `data` map, but only for the final transfer
+to the destination.
#### Create Account
+
Emit the following event:
+
```
contract: native asset, topics: ["transfer", from:Address, to:Address, sep0011_asset:String], data: amount:i128
```
-* `from` is the account being debited (creator).
-* `to` is the account being credited (created).
-* `amount` is the starting native balance.
+- `from` is the account being debited (creator).
+- `to` is the account being credited (created).
+- `amount` is the starting native balance.
-As specified in the [section to update the transfer and mint events data field](#emit-a-map-as-the-data-field-in-the-transfer-and-mint-event-if-muxed-information-is-being-emitted-for-the-destination), `to_muxed_id` can be added to the `data` map. The `destination` on a `CreateAccountOp` can't be muxed, so `to_muxed_id`, will only be added to the data map if the transaction memo is set.
+As specified in the
+[section to update the transfer and mint events data field](#emit-a-map-as-the-data-field-in-the-transfer-and-mint-event-if-muxed-information-is-being-emitted-for-the-destination),
+`to_muxed_id` can be added to the `data` map. The `destination` on a
+`CreateAccountOp` can't be muxed, so `to_muxed_id`, will only be added to the
+data map if the transaction memo is set.
#### Merge Account
+
Emit the following event:
+
```
contract: native asset, topics: ["transfer", from:Address, to:Address, sep0011_asset:String], data: amount:i128
```
-* `from` is the account being debited (merged).
-* `to` is the account being credited (merged into).
-* `amount` is the merged native balance.
+- `from` is the account being debited (merged).
+- `to` is the account being credited (merged into).
+- `amount` is the merged native balance.
-As specified in the [section to update the transfer and mint events data field](#emit-a-map-as-the-data-field-in-the-transfer-and-mint-event-if-muxed-information-is-being-emitted-for-the-destination), `to_muxed_id` can be added to the `data` map.
+As specified in the
+[section to update the transfer and mint events data field](#emit-a-map-as-the-data-field-in-the-transfer-and-mint-event-if-muxed-information-is-being-emitted-for-the-destination),
+`to_muxed_id` can be added to the `data` map.
#### Create Claimable Balance
+
Emit the following event:
+
```
contract: asset, topics: ["transfer", from:Address, to:Address, sep0011_asset:String], data: amount:i128
```
-* from is the account being debited.
-* to is the claimable balance being created. The type of this address will be `SC_ADDRESS_TYPE_CLAIMABLE_BALANCE`.
-* amount is the amount moved into the claimable balance.
+- from is the account being debited.
+- to is the claimable balance being created. The type of this address will be
+ `SC_ADDRESS_TYPE_CLAIMABLE_BALANCE`.
+- amount is the amount moved into the claimable balance.
+
+If an asset is a movement from the issuer of the asset, instead emit for the
+movement:
-If an asset is a movement from the issuer of the asset, instead emit for the movement:
```
contract: asset, topics: ["mint", to:Address, sep0011_asset:String], data: amount:i128
```
-As specified in the [section to update the transfer and mint events data field](#emit-a-map-as-the-data-field-in-the-transfer-and-mint-event-if-muxed-information-is-being-emitted-for-the-destination), the `to` address will be a claimable balance id, so it does not make sense to add the transaction memo into the event. Therefore, `to_muxed_id` will not be emitted in this case.
+As specified in the
+[section to update the transfer and mint events data field](#emit-a-map-as-the-data-field-in-the-transfer-and-mint-event-if-muxed-information-is-being-emitted-for-the-destination),
+the `to` address will be a claimable balance id, so it does not make sense to
+add the transaction memo into the event. Therefore, `to_muxed_id` will not be
+emitted in this case.
#### Claim Claimable Balance
+
Emit the following event:
+
```
contract: asset, topics: ["transfer", from:Address, to:Address, sep0011_asset:String], data: amount:i128
```
-* `from` is the claimable balance. The type of this address will be `SC_ADDRESS_TYPE_CLAIMABLE_BALANCE`.
-* `to` is the account being credited
-* `amount` is the amount in the claimable balance
-If the claim is a movement to the issuer of the asset, instead emit for the movement:
+- `from` is the claimable balance. The type of this address will be
+ `SC_ADDRESS_TYPE_CLAIMABLE_BALANCE`.
+- `to` is the account being credited
+- `amount` is the amount in the claimable balance
+
+If the claim is a movement to the issuer of the asset, instead emit for the
+movement:
+
```
contract: asset, topics: ["burn", from:Address, sep0011_asset:String], data: amount:i128
```
-As specified in the [section to update the transfer and mint events data field](#emit-a-map-as-the-data-field-in-the-transfer-and-mint-event-if-muxed-information-is-being-emitted-for-the-destination), `to_muxed_id` can be added to the `data` map on the `transfer` event. The `destination` on a claimable balance's `claimant` cannot be muxed, so `to_muxed_id` will only be added to the data map if the transaction memo is set.
+As specified in the
+[section to update the transfer and mint events data field](#emit-a-map-as-the-data-field-in-the-transfer-and-mint-event-if-muxed-information-is-being-emitted-for-the-destination),
+`to_muxed_id` can be added to the `data` map on the `transfer` event. The
+`destination` on a claimable balance's `claimant` cannot be muxed, so
+`to_muxed_id` will only be added to the data map if the transaction memo is
+set.
#### Liquidity Pool Deposit
+
Emit the following events:
+
```
contract: assetA, topics: ["transfer", from:Address, to:Address, sep0011_asset:String], data: amount:i128
contract: assetB, topics: ["transfer", from:Address, to:Address, sep0011_asset:String], data: amount:i128
```
-* `from` is the account being debited.
-* `to` is the liquidity pool being credited. The type of this address will be `SC_ADDRESS_TYPE_LIQUIDITY_POOL`.
-* `amount` is the amount moved into the liquidity pool.
+- `from` is the account being debited.
+- `to` is the liquidity pool being credited. The type of this address will be
+ `SC_ADDRESS_TYPE_LIQUIDITY_POOL`.
+- `amount` is the amount moved into the liquidity pool.
+
+If an asset is a movement from the issuer of the asset, instead emit for the
+movement:
-If an asset is a movement from the issuer of the asset, instead emit for the movement:
```
contract: asset, topics: ["mint", to:Address, sep0011_asset:String], data: amount:i128
```
-As specified in the [section to update the transfer and mint events data field](#emit-a-map-as-the-data-field-in-the-transfer-and-mint-event-if-muxed-information-is-being-emitted-for-the-destination), the `to` address will be a liquidity pool id, so it does not make sense to add the transaction memo into the event. Therefore, `to_muxed_id` will not be emitted in this case.
+As specified in the
+[section to update the transfer and mint events data field](#emit-a-map-as-the-data-field-in-the-transfer-and-mint-event-if-muxed-information-is-being-emitted-for-the-destination),
+the `to` address will be a liquidity pool id, so it does not make sense to add
+the transaction memo into the event. Therefore, `to_muxed_id` will not be
+emitted in this case.
#### Liquidity Pool Withdraw
+
Emit the following events:
+
```
contract: assetA, topics: ["transfer", from:Address, to:Address, sep0011_asset:String], data: amount:i128
contract: assetB, topics: ["transfer", from:Address, to:Address, sep0011_asset:String], data: amount:i128
```
-* `from` is the liquidity pool. The type of this address will be `SC_ADDRESS_TYPE_LIQUIDITY_POOL`.
-* `to` is the account being credited.
-* `amount` is the amount moved out of the liquidity pool.
+- `from` is the liquidity pool. The type of this address will be
+ `SC_ADDRESS_TYPE_LIQUIDITY_POOL`.
+- `to` is the account being credited.
+- `amount` is the amount moved out of the liquidity pool.
+If an asset is issued by the withdrawer, instead emit for the movement of the
+issued asset:
-If an asset is issued by the withdrawer, instead emit for the movement of the issued asset:
```
contract: asset, topics: ["burn", from:Address, sep0011_asset:String], data: amount:i128
```
-As specified in the [section to update the transfer and mint events data field](#emit-a-map-as-the-data-field-in-the-transfer-and-mint-event-if-muxed-information-is-being-emitted-for-the-destination), `to_muxed_id` can be added to the `data` map on the `transfer` event.
+As specified in the
+[section to update the transfer and mint events data field](#emit-a-map-as-the-data-field-in-the-transfer-and-mint-event-if-muxed-information-is-being-emitted-for-the-destination),
+`to_muxed_id` can be added to the `data` map on the `transfer` event.
#### Manage Buy Offer / Manage Sell Offer / Create Passive Sell Offer
-Emit two events per offer traded against. Each pair of events represents both sides of a trade. This does mean zero events can be emitted if the resulting offer is not marketable -
+
+Emit two events per offer traded against. Each pair of events represents both
+sides of a trade. This does mean zero events can be emitted if the resulting
+offer is not marketable -
```
contract: assetA, topics: ["transfer", from:Address, to:Address, sep0011_asset:String], data: amount:i128
contract: assetB, topics: ["transfer", from:Address, to:Address, sep0011_asset:String], data: amount:i128
```
-If `from` is the issuer on a side of the trade, emit the following instead for that side of the trade:
+If `from` is the issuer on a side of the trade, emit the following instead for
+that side of the trade:
+
```
contract: asset, topics: ["mint", to:Address, sep0011_asset:String], data: amount:i128
```
-If `to` is the issuer on a side of the trade, emit the following instead for that side of the trade:
+If `to` is the issuer on a side of the trade, emit the following instead for
+that side of the trade:
+
```
contract: asset, topics: ["burn", from:Address, sep0011_asset:String], data: amount:i128
```
-* `from` is the account being debited (seller).
-* `to` is the account being credited (buyer).
+- `from` is the account being debited (seller).
+- `to` is the account being credited (buyer).
-`to_muxed_id` will not be set for events emitted by any of these offer operations.
+`to_muxed_id` will not be set for events emitted by any of these offer
+operations.
#### Clawback / Clawback Claimable Balance
+
Emit the following event:
```
contract: asset, topics: ["clawback", from:Address, sep0011_asset:String], data: amount:i128
```
-* `from` is the account or claimable balance being debited.
-* `amount` is the amount being moved out of the account and burned.
-
+- `from` is the account or claimable balance being debited.
+- `amount` is the amount being moved out of the account and burned.
#### Allow Trust / Set Trustline Flags
-If a trustline has the `AUTHORIZED_FLAG` added or removed, as a result of one of these operations, then a `set_authorized` event should be emitted.
+
+If a trustline has the `AUTHORIZED_FLAG` added or removed, as a result of one
+of these operations, then a `set_authorized` event should be emitted.
```
contract: asset, topics: ["set_authorized", id:Address, sep0011_asset:String], data: authorize:bool
```
-* `id` is the account that the trustline belongs to.
-* `authorize` is a bool that determines if the trustline is now authorized to send and receive payments for this asset.
+- `id` is the account that the trustline belongs to.
+- `authorize` is a bool that determines if the trustline is now authorized to
+ send and receive payments for this asset.
-If either operation is used to revoke authorization from a trustline that deposited into a liquidity pool then claimable balances can be created for the withdrawn assets (See [CAP-0038](cap-0038.md#SetTrustLineFlagsOp-and-AllowTrustOp) for more info). If any claimable balances are created due to this scenario, emit the following event for each claimable balance:
+If either operation is used to revoke authorization from a trustline that
+deposited into a liquidity pool then claimable balances can be created for the
+withdrawn assets (See
+[CAP-0038](cap-0038.md#SetTrustLineFlagsOp-and-AllowTrustOp) for more info). If
+any claimable balances are created due to this scenario, emit the following
+event for each claimable balance:
```
contract: asset, topics: ["transfer", from:Address, to:Address, sep0011_asset:String], data: amount:i128
```
-* `from` is the liquidity pool. The type of this address will be `SC_ADDRESS_TYPE_LIQUIDITY_POOL`.
-* `to` is the claimable balance being created. The type of this address will be `SC_ADDRESS_TYPE_CLAIMABLE_BALANCE`.
-* `amount` is the amount moved into the claimable balance.
+- `from` is the liquidity pool. The type of this address will be
+ `SC_ADDRESS_TYPE_LIQUIDITY_POOL`.
+- `to` is the claimable balance being created. The type of this address will be
+ `SC_ADDRESS_TYPE_CLAIMABLE_BALANCE`.
+- `amount` is the amount moved into the claimable balance.
+
+If the an asset in the liquidity pool is being withdrawn for the issuer, then
+no claimable balance will be created for that asset, so instead emit a burn
+event -
-If the an asset in the liquidity pool is being withdrawn for the issuer, then no claimable balance will be created for that asset, so instead emit a burn event -
```
contract: asset, topics: ["burn", from:Address, sep0011_asset:String], data: amount:i128
```
-Due to the fact that `to` is a claimable balance id, we will not emit `to_muxed_id` in the `transfer` event.
+Due to the fact that `to` is a claimable balance id, we will not emit
+`to_muxed_id` in the `transfer` event.
#### Inflation
+
Emit a `mint` event for each inflation winner:
```
contract: asset, topics: ["mint", to:Address, sep0011_asset:String], data: amount:i128
```
-* `to` is one of the winning account addresses.
-* `amount` is the amount of the native asset won.
+- `to` is one of the winning account addresses.
+- `amount` is the amount of the native asset won.
`to_muxed_id` will not be set for events emitted by the Inflation operation.
### Retroactively emitting events
-The events specified above not only need to be emitted going forward, but also on replay for all ledgers from genesis so balances can be built from events. Some Stellar Asset Contract events are also being updated (removing the admin from `mint`/`clawback`, and emitting `mint`/`burn` instead of `transfer` when the issuer if `from` or `to`). Due to the fact that Soroban events are hashed into the ledger, we will continue to emit Stellar Asset Contract events as they were emitted in previous protocols. This will, however, cause issues with rebuilding balances on replay, so there will be a configuration flag added that will result in [V20,V22] events being emitted in the V23 format for the scenarios just mentioned. This does mean that the events can't be used to verify the hash of the events in `InvokeHostFunctionResult`, but this flag will be disabled by default.
-#### Pre-protocol 8 bug
+The events specified above not only need to be emitted going forward, but also
+on replay for all ledgers from genesis so balances can be built from events.
+Some Stellar Asset Contract events are also being updated (removing the admin
+from `mint`/`clawback`, and emitting `mint`/`burn` instead of `transfer` when
+the issuer if `from` or `to`). Due to the fact that Soroban events are hashed
+into the ledger, we will continue to emit Stellar Asset Contract events as they
+were emitted in previous protocols. This will, however, cause issues with
+rebuilding balances on replay, so there will be a configuration flag added that
+will result in [V20,V22] events being emitted in the V23 format for the
+scenarios just mentioned. This does mean that the events can't be used to
+verify the hash of the events in `InvokeHostFunctionResult`, but this flag will
+be disabled by default.
-Another relevant detail for replaying events is that prior to protocol 8, there was a bug that could result in the minting/burning of XLM. To allow for the ability to build balances with only events, we not only need to emit the events specified in this CAP from genesis, but also handle that bug properly.
+#### Pre-protocol 8 bug
-For both the mint and burn scenario, the affected account will be the source account, and that should be the only account in an operation with a balance difference not entirely reflected by the events specified in this CAP. If we take the total diff of XLM in an `Operations` `OperationMeta` (similar to what the `ConservationOfLumens` invariant does) and emit that diff as a mint/burn event for the source account, then consumers should be able to track balances correctly. For every operation, if the source account is found to be affected, then exactly one event will be emitted - either an XLM mint or an XLM burn. To make sure the events are emitted in a way where the balances don't go negative and make sense, any XLM mint should be emitted first in `OperationMetaV2`, and burns should be emitted last.
+Another relevant detail for replaying events is that prior to protocol 8, there
+was a bug that could result in the minting/burning of XLM. To allow for the
+ability to build balances with only events, we not only need to emit the events
+specified in this CAP from genesis, but also handle that bug properly.
+
+For both the mint and burn scenario, the affected account will be the source
+account, and that should be the only account in an operation with a balance
+difference not entirely reflected by the events specified in this CAP. If we
+take the total diff of XLM in an `Operations` `OperationMeta` (similar to what
+the `ConservationOfLumens` invariant does) and emit that diff as a mint/burn
+event for the source account, then consumers should be able to track balances
+correctly. For every operation, if the source account is found to be affected,
+then exactly one event will be emitted - either an XLM mint or an XLM burn. To
+make sure the events are emitted in a way where the balances don't go negative
+and make sense, any XLM mint should be emitted first in `OperationMetaV2`, and
+burns should be emitted last.
+
+If we do identify that the total diff of XLM in an operation is not zero, then
+the operation is expected to be one of the following, along with the extra
+event that will be emitted -
## Design Rationale
### Remove the admin from the SAC `mint` and `clawback` events
-The admin isn't relevant information when a mint or `clawback` occurs, and it hinders compatibility with SEP-41 for when these two events are added to it because the admin is an implementation detail. For a custom token, an admin doesn't need to be a single `Address`, or an admin may not required at all to emit either event.
+The admin isn't relevant information when a mint or `clawback` occurs, and it
+hinders compatibility with SEP-41 for when these two events are added to it
+because the admin is an implementation detail. For a custom token, an admin
+doesn't need to be a single `Address`, or an admin may not required at all to
+emit either event.
### TransactionMetaV4
-This CAP introduces a new `TransactionMeta` version, `TransactionMetaV4`. Now that we're emitting events for more than just Soroban, this allows us to clean up the structure of meta because `TransactionMetaV3` assumed events would only be emitted for Soroban. This change also allows us to emit events at the operation layer instead of the transaction layer using the new `OperationMetaV2` type. Transaction level events like `fee` will still be emitted at the transaction level under `TransactionMetaV4.events`.
+This CAP introduces a new `TransactionMeta` version, `TransactionMetaV4`. Now
+that we're emitting events for more than just Soroban, this allows us to clean
+up the structure of meta because `TransactionMetaV3` assumed events would only
+be emitted for Soroban. This change also allows us to emit events at the
+operation layer instead of the transaction layer using the new
+`OperationMetaV2` type. Transaction level events like `fee` will still be
+emitted at the transaction level under `TransactionMetaV4.events`.
-It's important to note that transaction meta is not part of the protocol, so the emission of `TransactionMetaV4` instead of `TransactionMetaV3` can be done using a config flag, allowing consumers of meta to switch on their own time.
+It's important to note that transaction meta is not part of the protocol, so
+the emission of `TransactionMetaV4` instead of `TransactionMetaV3` can be done
+using a config flag, allowing consumers of meta to switch on their own time.
### Emit the semantically correct event instead of no longer allowing the issuer to transfer due to missing a trustline
-The Stellar Asset Contract special cases the issuer logic because issuers can't hold a trustline for their own assets. This matches the logic in Classic. The special case was unnecessary however because the Stellar Asset Contract provides the `mint` and `burn` functions. This CAP could instead just remove the special case and allow `transfers` involving the issuer to fail due to a missing trustline,
-but this would break any contracts that rely on this behavior (it's not known at this time if contracts like this exist, but we could check if there are any `transfers` on pubnet that involve the issuer). That's why this CAP chooses to instead emit the correct event in this scenario.
+The Stellar Asset Contract special cases the issuer logic because issuers can't
+hold a trustline for their own assets. This matches the logic in Classic. The
+special case was unnecessary however because the Stellar Asset Contract
+provides the `mint` and `burn` functions. This CAP could instead just remove
+the special case and allow `transfers` involving the issuer to fail due to a
+missing trustline, but this would break any contracts that rely on this
+behavior (it's not known at this time if contracts like this exist, but we
+could check if there are any `transfers` on pubnet that involve the issuer).
+That's why this CAP chooses to instead emit the correct event in this scenario.
### Classic events will not be hashed into the ledger
-For now, the new events being added for the non-soroban operations will not be hashed into the ledger to give us more flexibility while we figure out if we want to transform more of the meta ledger entry changes into events. We can start hashing the events at a later point. Soroban events will continued to be hashed as they are in protocol 22.
+For now, the new events being added for the non-soroban operations will not be
+hashed into the ledger to give us more flexibility while we figure out if we
+want to transform more of the meta ledger entry changes into events. We can
+start hashing the events at a later point. Soroban events will continued to be
+hashed as they are in protocol 22.
### New SCAddressType types
-This CAP adds two new `SCAddressType` types - `SC_ADDRESS_TYPE_CLAIMABLE_BALANCE` and `SC_ADDRESS_TYPE_LIQUIDITY_POOL`. These types are used in the topic of an event where the address is not a contract or a stellar account.
+This CAP adds two new `SCAddressType` types -
+`SC_ADDRESS_TYPE_CLAIMABLE_BALANCE` and `SC_ADDRESS_TYPE_LIQUIDITY_POOL`. These
+types are used in the topic of an event where the address is not a contract or
+a stellar account.
### Order of precedence for `to_muxed_id` on the `transfer` and `mint` events
-The `transfer` and `mint` event can emit muxed information for `to`, where the transaction memo will be forwarded to `to_muxed_id` for classic if the destination is not muxed, but the CAP has specified that if the destination is muxed and the transaction memo is set for classic events, then we set `to_muxed_id` using an order of precedence. We use the destinations muxed information over the transaction memo. The alternative was to also emit a transaction level `tx_memo` event, but we determined that this was not necessary as setting both a muxed destination account and a tx memo is an edge case without a relevant use case. If a consumer wants the tx memo as well, they can just look for it in the transaction.
+The `transfer` and `mint` event can emit muxed information for `to`, where the
+transaction memo will be forwarded to `to_muxed_id` for classic if the
+destination is not muxed, but the CAP has specified that if the destination is
+muxed and the transaction memo is set for classic events, then we set
+`to_muxed_id` using an order of precedence. We use the destinations muxed
+information over the transaction memo. The alternative was to also emit a
+transaction level `tx_memo` event, but we determined that this was not
+necessary as setting both a muxed destination account and a tx memo is an edge
+case without a relevant use case. If a consumer wants the tx memo as well, they
+can just look for it in the transaction.
### No diagnostics in OperationMetaV2
-We currently clear all `OperationMeta` when a transaction fails, but with the way `OperationMetaV2` is setup in the CAP, we wouldn't be able to do that if there was a diagnostics vector inside `OperationMetaV2` is used. We would have to keep around each `OperationMetaV2` object, and clear the non diagnostic internals. Instead of making this change that downstream consumers may not expect, using the transaction level diagnostics vector for all diagnostics is sufficient. Classic operations won't emit any diagnostics in V23, and if we want them to in the future, we can just modify the diagnostic events to contain the operation id if that information is needed.
+We currently clear all `OperationMeta` when a transaction fails, but with the
+way `OperationMetaV2` is setup in the CAP, we wouldn't be able to do that if
+there was a diagnostics vector inside `OperationMetaV2` is used. We would have
+to keep around each `OperationMetaV2` object, and clear the non diagnostic
+internals. Instead of making this change that downstream consumers may not
+expect, using the transaction level diagnostics vector for all diagnostics is
+sufficient. Classic operations won't emit any diagnostics in V23, and if we
+want them to in the future, we can just modify the diagnostic events to contain
+the operation id if that information is needed.
## Protocol Upgrade Transition
-On the protocol upgrade, the SAC will start emitting the `mint` and `clawback` events without the `admin` topic. Also, the `transfer` event will not be emitted for `transfers` involving the issuer. Instead, the appropriate `mint`/`burn` will be emitted.
-The unified events will not be part of the protocol, so they can be enabled with a configuration flag at anytime.
+On the protocol upgrade, the SAC will start emitting the `mint` and `clawback`
+events without the `admin` topic. Also, the `transfer` event will not be
+emitted for `transfers` involving the issuer. Instead, the appropriate
+`mint`/`burn` will be emitted.
-### Backwards Incompatibilities
+The unified events will not be part of the protocol, so they can be enabled
+with a configuration flag at anytime.
+### Backwards Incompatibilities
### Resource Utilization
-The additional events will use more resources if a node chooses to emit them.
+The additional events will use more resources if a node chooses to emit them.
## Security Concerns
-
## Future work
-
## Test Cases
-
## Implementation
diff --git a/core/cap-0068.md b/core/cap-0068.md
index 5f9726086..fabe37f98 100644
--- a/core/cap-0068.md
+++ b/core/cap-0068.md
@@ -15,7 +15,8 @@ Protocol version: 23
## Simple Summary
-This is a CAP describing a new Soroban host function for getting the executable corresponding to an `Address`.
+This is a CAP describing a new Soroban host function for getting the executable
+corresponding to an `Address`.
## Working Group
@@ -23,27 +24,46 @@ As described in the preamble section.
## Motivation
-There is currently no way to get the executable for an arbitrary address, so it's not possible to distinguish between Wasm and built-in contracts (currently, the only such contract is Stellar Asset contract) and not possible to get the specific Wasm hash corresponding to the contract. This information serves multiple different use cases, such as:
-- Custom accounts will be able to provide authorization policies based on the executables, for example, only allow a few vetted token implementations to be used.
-- Contracts will be able to generically distinguish between SAC instances and custom tokens on-chain. Currently possible for a custom token to impersonate metadata of any SAC, including the one for the 'native' token (XLM) and there is no on-chain way to distinguish between these. This is especially relevant for the bridge protocols that provide a capability to wrap Stellar tokens on different chains and need to create metadata for the wrapped tokens.
-- In general contracts will be able to 'pin' the exact implementations of their dependencies. While this is not necessary (or even desired) for majority of the protocols, there are use cases where pinning increases security. For example, this allows custom accounts to give restricted authorization privileges to the separate 'session' contracts without worrying about the change in the implementation of these contracts.
-
+There is currently no way to get the executable for an arbitrary address, so
+it's not possible to distinguish between Wasm and built-in contracts
+(currently, the only such contract is Stellar Asset contract) and not possible
+to get the specific Wasm hash corresponding to the contract. This information
+serves multiple different use cases, such as:
+
+- Custom accounts will be able to provide authorization policies based on the
+ executables, for example, only allow a few vetted token implementations to be
+ used.
+- Contracts will be able to generically distinguish between SAC instances and
+ custom tokens on-chain. Currently possible for a custom token to impersonate
+ metadata of any SAC, including the one for the 'native' token (XLM) and there
+ is no on-chain way to distinguish between these. This is especially relevant
+ for the bridge protocols that provide a capability to wrap Stellar tokens on
+ different chains and need to create metadata for the wrapped tokens.
+- In general contracts will be able to 'pin' the exact implementations of their
+ dependencies. While this is not necessary (or even desired) for majority of
+ the protocols, there are use cases where pinning increases security. For
+ example, this allows custom accounts to give restricted authorization
+ privileges to the separate 'session' contracts without worrying about the
+ change in the implementation of these contracts.
### Goals Alignment
+
This CAP is aligned with the following Stellar Network Goals:
- - The Stellar Network should make it easy for developers of Stellar projects
- to create highly usable products
+- The Stellar Network should make it easy for developers of Stellar projects to
+ create highly usable products
## Abstract
-A new host functions that returns an executable for the provided `Address` is added to the Soroban host.
+A new host functions that returns an executable for the provided `Address` is
+added to the Soroban host.
## Specification
### New host function
-The diff is based on commit `822727b37b7ef2eea1fc0bafc558820dc450c67e` of `rs-soroban-env`.
+The diff is based on commit `822727b37b7ef2eea1fc0bafc558820dc450c67e` of
+`rs-soroban-env`.
```diff mddiffcheck.ignore=true
soroban-env-common/env.json | 40 ++++++++++++++++++++++++++++++++++++-
@@ -77,7 +97,9 @@ index d421dca2..41bc7e47 100644
### Semantics
-`get_address_executable` host function will be added. This function returns the value of type `Option` where `AddressExecutable` is defined as follows:
+`get_address_executable` host function will be added. This function returns the
+value of type `Option` where `AddressExecutable` is defined
+as follows:
```rust
#[contracttype]
@@ -90,17 +112,25 @@ enum AddressExecutable {
The semantics of the return value are as follows:
-- If there is no ledger entry for the provided address in the storage `None` (i.e. `Val::VOID`) is returned
- - 'No ledger entry' means no respective `AccountEntry` for the account (`ScAddress::Account`) addresses and no respective `ContractData` (with `ScVal::LedgerKeyContractInstance` key) entry for contract addresses (`ScAddress::Contract`)
+- If there is no ledger entry for the provided address in the storage `None`
+ (i.e. `Val::VOID`) is returned
+ - 'No ledger entry' means no respective `AccountEntry` for the account
+ (`ScAddress::Account`) addresses and no respective `ContractData` (with
+ `ScVal::LedgerKeyContractInstance` key) entry for contract addresses
+ (`ScAddress::Contract`)
- If there is a ledger entry, then it is converted to the `AddressExecutable`:
- - Contracts are represented depending on their executable type:
- - Wasm contracts are represented by `AddressExecutable::Wasm` with the corresponding SHA-256 hash of the underlying Wasm code
- - Stellar asset contract instances are represented by `AddressExecutable::StellarAsset`
- - Accounts are represented as `AddressExecutable::Account`
+ - Contracts are represented depending on their executable type:
+ - Wasm contracts are represented by `AddressExecutable::Wasm` with the
+ corresponding SHA-256 hash of the underlying Wasm code
+ - Stellar asset contract instances are represented by
+ `AddressExecutable::StellarAsset`
+ - Accounts are represented as `AddressExecutable::Account`
## Protocol Upgrade Transition
-The proposed host function will use the standard mechanism for protocol-gating the host functions and will become available in protocol 23. Before protocol 23 it will be impossible to upload Wasm that uses the new functions.
+The proposed host function will use the standard mechanism for protocol-gating
+the host functions and will become available in protocol 23. Before protocol 23
+it will be impossible to upload Wasm that uses the new functions.
### Backwards Incompatibilities
@@ -108,11 +138,15 @@ This CAP does not introduce any backward incompatibilities.
### Resource Utilization
-The new host function will have the appropriate metering. No new cost types need to be introduced, as the operations can lean on the existing metering primitives.
+The new host function will have the appropriate metering. No new cost types
+need to be introduced, as the operations can lean on the existing metering
+primitives.
## Security Concerns
-`get_address_executable` gives contracts access to data that has previously been unavailable. However, there is no obvious way to abuse this data, as it does not reveal the internal state of the contract.
+`get_address_executable` gives contracts access to data that has previously
+been unavailable. However, there is no obvious way to abuse this data, as it
+does not reveal the internal state of the contract.
## Test Cases
diff --git a/core/cap-0069.md b/core/cap-0069.md
index 410575c08..b9fd62342 100644
--- a/core/cap-0069.md
+++ b/core/cap-0069.md
@@ -14,7 +14,9 @@ Protocol version: 23
```
## Simple Summary
-This is a CAP describing new Soroban host functions for conversion between `String` and `Bytes` types in Soroban.
+
+This is a CAP describing new Soroban host functions for conversion between
+`String` and `Bytes` types in Soroban.
## Working Group
@@ -22,26 +24,38 @@ As described in the preamble section.
## Motivation
-`String` host type is basically an alias to the `Bytes` type as it doesn't enforce any specific encoding to be used. However, the only way to convert between the two is to allocate a buffer of arbitrary length in the linear memory of the contract, which is rather inefficient as it requires the contract to link the allocator library which might be otherwise not necessary. Conversion between the compatible types is a generally useful feature to provide (e.g. for connecting different contract APIs), but the additional motivation in this is case is also the fact that the set of the host functions defined for `Bytes` is much broader than the one for `String`.
-
+`String` host type is basically an alias to the `Bytes` type as it doesn't
+enforce any specific encoding to be used. However, the only way to convert
+between the two is to allocate a buffer of arbitrary length in the linear
+memory of the contract, which is rather inefficient as it requires the contract
+to link the allocator library which might be otherwise not necessary.
+Conversion between the compatible types is a generally useful feature to
+provide (e.g. for connecting different contract APIs), but the additional
+motivation in this is case is also the fact that the set of the host functions
+defined for `Bytes` is much broader than the one for `String`.
### Goals Alignment
+
This CAP is aligned with the following Stellar Network Goals:
- - The Stellar Network should make it easy for developers of Stellar projects
- to create highly usable products
+- The Stellar Network should make it easy for developers of Stellar projects to
+ create highly usable products
## Abstract
+
The following new host functions are provided:
-- A function that converts a `BytesObject` into `StringObject` with the same contents
-- A function that converts a `StringObject` into `BytesObject` with the same contents
+- A function that converts a `BytesObject` into `StringObject` with the same
+ contents
+- A function that converts a `StringObject` into `BytesObject` with the same
+ contents
## Specification
### New host functions
-The diff is based on commit `822727b37b7ef2eea1fc0bafc558820dc450c67e` of `rs-soroban-env`.
+The diff is based on commit `822727b37b7ef2eea1fc0bafc558820dc450c67e` of
+`rs-soroban-env`.
```diff mddiffcheck.ignore=true
soroban-env-common/env.json | 40 ++++++++++++++++++++++++++++++++++++-
@@ -90,13 +104,20 @@ index d421dca2..41bc7e47 100644
### Semantics
-`string_to_bytes`/`bytes_to_string` functions will be added to Soroban host. These functions perform the `StringObject`<->`BytesObject` conversions. Both allocate a new object of the respective output type from the underlying bytes buffer without performing any additional validation.
+`string_to_bytes`/`bytes_to_string` functions will be added to Soroban host.
+These functions perform the `StringObject`<->`BytesObject` conversions. Both
+allocate a new object of the respective output type from the underlying bytes
+buffer without performing any additional validation.
-While there is room for optimization here via using the shared bytes buffer, this CAP doesn't go for it as the benefit is too marginal compared to the necessary effort.
+While there is room for optimization here via using the shared bytes buffer,
+this CAP doesn't go for it as the benefit is too marginal compared to the
+necessary effort.
## Protocol Upgrade Transition
-The proposed host functions will use the standard mechanism for protocol-gating the host functions and will become available in protocol 23. Before protocol 23 it will be impossible to upload Wasm that uses the new functions.
+The proposed host functions will use the standard mechanism for protocol-gating
+the host functions and will become available in protocol 23. Before protocol 23
+it will be impossible to upload Wasm that uses the new functions.
### Backwards Incompatibilities
@@ -104,7 +125,9 @@ This CAP does not introduce any backward incompatibilities.
### Resource Utilization
-The new host functions will have the appropriate metering. No new cost types need to be introduced, as the operations can lean on the existing metering primitives.
+The new host functions will have the appropriate metering. No new cost types
+need to be introduced, as the operations can lean on the existing metering
+primitives.
## Security Concerns
diff --git a/core/cap-0070.md b/core/cap-0070.md
index 6be041641..16f9c6c25 100644
--- a/core/cap-0070.md
+++ b/core/cap-0070.md
@@ -15,20 +15,28 @@ Protocol version: 23
## Simple Summary
-This CAP introduces ledger configuration settings allowing the Stellar network to dynamically adjust ledger close times, nomination timeouts,
-and ballot timeouts to facilitate controlled, incremental improvements to block time performance.
+This CAP introduces ledger configuration settings allowing the Stellar network
+to dynamically adjust ledger close times, nomination timeouts, and ballot
+timeouts to facilitate controlled, incremental improvements to block time
+performance.
## Motivation
-Currently, Stellar's ledger close time and SCP timeout settings are hardcoded. This restricts the network's ability to gradually test and implement
-shorter block times, limiting improvements in performance and responsiveness. Additionally, as other network parameters such as ledger limits and transaction
-throughput evolve, SCP nomination and ballot phase timings may need to be adjusted to maintain efficiency and resilience.
+Currently, Stellar's ledger close time and SCP timeout settings are hardcoded.
+This restricts the network's ability to gradually test and implement shorter
+block times, limiting improvements in performance and responsiveness.
+Additionally, as other network parameters such as ledger limits and transaction
+throughput evolve, SCP nomination and ballot phase timings may need to be
+adjusted to maintain efficiency and resilience.
-This CAP addresses these issues by making these parameters configurable, enabling small, incremental adjustments and performance tuning.
+This CAP addresses these issues by making these parameters configurable,
+enabling small, incremental adjustments and performance tuning.
## Goals Alignment
-The proposal aligns with Stellar’s goals of scalability, resilience, and performance by enabling iterative improvements in ledger close time and consensus efficiency.
+The proposal aligns with Stellar’s goals of scalability, resilience, and
+performance by enabling iterative improvements in ledger close time and
+consensus efficiency.
## Abstract
@@ -40,7 +48,8 @@ We define new network configuration settings for:
- Per-round nomination timeout increment (milliseconds)
- Per-round ballot timeout increment (milliseconds)
-These settings are introduced as ledger config parameters, allowing the Stellar network to adjust consensus timings incrementally.
+These settings are introduced as ledger config parameters, allowing the Stellar
+network to adjust consensus timings incrementally.
## Specification
@@ -54,7 +63,7 @@ index 30e41ad..5fc19f3 100644
@@ -302,6 +302,14 @@ struct EvictionIterator {
uint64 bucketFileOffset;
};
-
+
+struct ConfigSettingSCPTiming {
+ uint32 ledgerTargetCloseTimeMilliseconds;
+ uint32 nominationTimeoutInitialMilliseconds;
@@ -65,7 +74,7 @@ index 30e41ad..5fc19f3 100644
+
// limits the ContractCostParams size to 20kB
const CONTRACT_COST_COUNT_LIMIT = 1024;
-
+
@@ -325,7 +333,8 @@ enum ConfigSettingID
CONFIG_SETTING_LIVE_SOROBAN_STATE_SIZE_WINDOW = 12,
CONFIG_SETTING_EVICTION_ITERATOR = 13,
@@ -74,7 +83,7 @@ index 30e41ad..5fc19f3 100644
+ CONFIG_SETTING_CONTRACT_LEDGER_COST_EXT_V0 = 15,
+ CONFIG_SETTING_SCP_TIMING = 16
};
-
+
union ConfigSettingEntry switch (ConfigSettingID configSettingID)
@@ -362,5 +371,7 @@ case CONFIG_SETTING_CONTRACT_PARALLEL_COMPUTE_V0:
ConfigSettingContractParallelComputeV0 contractParallelCompute;
@@ -88,33 +97,36 @@ index 30e41ad..5fc19f3 100644
## Semantics
-All initial values will match the current hardcoded values. The initial upgrade to protocol 23 should
-have no actual effect on SCP, but will open the door for future changes.
+All initial values will match the current hardcoded values. The initial upgrade
+to protocol 23 should have no actual effect on SCP, but will open the door for
+future changes.
-Note: All ranges are approximate values for sanity checking, but are untested. Actual upgrades should be within
-these ranges, but must still be tested and validated.
+Note: All ranges are approximate values for sanity checking, but are untested.
+Actual upgrades should be within these ranges, but must still be tested and
+validated.
- **ledgerTargetCloseTimeMilliseconds**
- - Target ledger close time in milliseconds. Validators will use this value to set the nextLedgerTrigger timer.
- - Initial value: 5000
- - Range: [4000, 5000]
+ - Target ledger close time in milliseconds. Validators will use this value to
+ set the nextLedgerTrigger timer.
+ - Initial value: 5000
+ - Range: [4000, 5000]
- **nominationTimeoutInitialMilliseconds**
- - Initial timeout for SCP nomination phase in milliseconds.
- - Initial value: 1000
- - Range: [750, 2500]
+ - Initial timeout for SCP nomination phase in milliseconds.
+ - Initial value: 1000
+ - Range: [750, 2500]
- **nominationTimeoutIncrementMilliseconds**
- - Incremental timeout increase per additional nomination round in milliseconds.
- - Initial value: 1000
- - Range: [500, 2000]
+ - Incremental timeout increase per additional nomination round in
+ milliseconds.
+ - Initial value: 1000
+ - Range: [500, 2000]
- **ballotTimeoutInitialMilliseconds**
- - Initial timeout for SCP ballot phase in milliseconds
- - Initial value: 1000
- - Range: [750, 2500]
-- **ballotTimeoutIncrementMilliseconds**
- - Incremental timeout increase per additional ballot round in milliseconds
- - Initial value: 1000
- - Range: [500, 2000]
-For some ballot/nomination round `i`, the timeout for that round is calculated as:
+ - Initial timeout for SCP ballot phase in milliseconds
+ - Initial value: 1000
+ - Range: [750, 2500]
+- **ballotTimeoutIncrementMilliseconds** - Incremental timeout increase per
+ additional ballot round in milliseconds - Initial value: 1000 - Range: [500,
+ 2000] For some ballot/nomination round `i`, the timeout for that round is
+ calculated as:
```
round_timeout(i) = initial_timeout + (i * increment_timeout)
@@ -122,46 +134,73 @@ round_timeout(i) = initial_timeout + (i * increment_timeout)
## Design Rationale
-Introducing configurable consensus parameters provides essential flexibility, allowing the network to incrementally test and adopt shorter ledger
-close times without large, disruptive protocol updates. While significant changes, such as going from 5 seconds to 2.5 seconds, will require significant
-protocol work, smaller latency gains can be more easily achieved. While the network of Stellar validators may be able to achieve shorter block times,
-downstream systems may assume a consistent 5 second block time or not be performant enough to handle shorter block times. By making gradual changes via
-SLP instead of protocol upgrades, the network can slowly test small changes and adapt over time to faster ledgers.
-
-When changing `ledgerTargetCloseTimeMilliseconds`, corresponding adjustments to smart contract ledger limits must be made proportionally.
-This ensures consistent resource utilization per second, preventing overload and maintaining network stability. For example, if ledger close times decrease by 20%,
-ledger limits must also decrease by approximately 20% to keep resource usage constant. This proportional adjustment is crucial to avoid excessive validator workloads
+Introducing configurable consensus parameters provides essential flexibility,
+allowing the network to incrementally test and adopt shorter ledger close times
+without large, disruptive protocol updates. While significant changes, such as
+going from 5 seconds to 2.5 seconds, will require significant protocol work,
+smaller latency gains can be more easily achieved. While the network of Stellar
+validators may be able to achieve shorter block times, downstream systems may
+assume a consistent 5 second block time or not be performant enough to handle
+shorter block times. By making gradual changes via SLP instead of protocol
+upgrades, the network can slowly test small changes and adapt over time to
+faster ledgers.
+
+When changing `ledgerTargetCloseTimeMilliseconds`, corresponding adjustments to
+smart contract ledger limits must be made proportionally. This ensures
+consistent resource utilization per second, preventing overload and maintaining
+network stability. For example, if ledger close times decrease by 20%, ledger
+limits must also decrease by approximately 20% to keep resource usage constant.
+This proportional adjustment is crucial to avoid excessive validator workloads
or performance degradation as block times decrease.
-As block times decrease, or smart contract limits increase, nomination and ballot timeouts may need to be adjusted for optimal performance. Increased performance
-does not necessarily mean that timeouts should be decreased. For example, shortening the nomination timeout can lead to more timeouts if there are slower nodes
-on the network. These additional nomination timeouts cause more bandwidth and CPU load on the network, which can further degrade slow nodes. This results in
-a "snowball" effect, where the network becomes more and more congested and slow nodes fall further and further behind. As TPS increases and transaction set size
-increases, longer nomination timeouts may actually improve network performance and decrease overall block times, as less values are nominated. While longer
-nomination timeouts can increase network latency on average, they can also cause larger latency spikes if certain nodes are offline or slow.
-
-Given these complexities, it is vital that timeout adjustments be incrementally tested and validated through simulation to identify safe and optimal settings.
+As block times decrease, or smart contract limits increase, nomination and
+ballot timeouts may need to be adjusted for optimal performance. Increased
+performance does not necessarily mean that timeouts should be decreased. For
+example, shortening the nomination timeout can lead to more timeouts if there
+are slower nodes on the network. These additional nomination timeouts cause
+more bandwidth and CPU load on the network, which can further degrade slow
+nodes. This results in a "snowball" effect, where the network becomes more and
+more congested and slow nodes fall further and further behind. As TPS increases
+and transaction set size increases, longer nomination timeouts may actually
+improve network performance and decrease overall block times, as less values
+are nominated. While longer nomination timeouts can increase network latency on
+average, they can also cause larger latency spikes if certain nodes are offline
+or slow.
+
+Given these complexities, it is vital that timeout adjustments be incrementally
+tested and validated through simulation to identify safe and optimal settings.
Only small, gradual changes should be proposed.
### Config Setting Limits
-Unlike Soroban TX limit values, SCP timing values are nuanced and complicated to test. Additionally, a
-bad value could cause significant harm to the network, such as a network wide halt, out of sync downstream systems, etc.
-To prevent a bad value being introduced by mistake or by an uninformed proposal, SCP timing values will be limited to
-a tight range at the implementation level. As the Stellar Network becomes more efficient, these ranges can be changed
-on protocol boundaries, i.e. for protocol 23 `ledgerTargetCloseTimeMilliseconds` can only be reduced to 4000, but a
-future protocol may lower this limit further. It is important for these limits to change on protocol boundaries.
-Should a limit be reduced in a minor point release, it is possible for an SCP timing upgrade to occur for older nodes
-on the network that are not capable of keeping up with these new timings. Note that these limits are untested sanity
-checks and may still cause harm to the network. However, these bounds appear tight enough to not cause substantial
-network failure.
+Unlike Soroban TX limit values, SCP timing values are nuanced and complicated
+to test. Additionally, a bad value could cause significant harm to the network,
+such as a network wide halt, out of sync downstream systems, etc. To prevent a
+bad value being introduced by mistake or by an uninformed proposal, SCP timing
+values will be limited to a tight range at the implementation level. As the
+Stellar Network becomes more efficient, these ranges can be changed on protocol
+boundaries, i.e. for protocol 23 `ledgerTargetCloseTimeMilliseconds` can only
+be reduced to 4000, but a future protocol may lower this limit further. It is
+important for these limits to change on protocol boundaries. Should a limit be
+reduced in a minor point release, it is possible for an SCP timing upgrade to
+occur for older nodes on the network that are not capable of keeping up with
+these new timings. Note that these limits are untested sanity checks and may
+still cause harm to the network. However, these bounds appear tight enough to
+not cause substantial network failure.
## Test Cases
-For the implementation of this CAP, no testing will be required outside of unit tests, as the default values will be identical to the current hardcoded values.
-However, changes to these configs should be thoroughly tested via [supercluster simulation](https://github.com/stellar/supercluster). When it comes to testing
-network timing changes, it is important to test on large topologies that are most similar to the mainnet topology with installed latency to simulate slow nodes.
-This can be achieved via the "SimulatePubnet" and "SimulatePubnetMixedLoad" missions. On an ideal network, where all nodes are fast, it is likely that
-shorter timeouts will improve network performance. However, on a network with slow nodes, shorter timeouts may cause more values to be nominated, resulting
-in more work for all validators on the network and decreased performance. Network performance can be very sensitive to these timing parameters, so it is important
-to propose small, gradual changes that have been thoroughly simulated.
+For the implementation of this CAP, no testing will be required outside of unit
+tests, as the default values will be identical to the current hardcoded values.
+However, changes to these configs should be thoroughly tested via
+[supercluster simulation](https://github.com/stellar/supercluster). When it
+comes to testing network timing changes, it is important to test on large
+topologies that are most similar to the mainnet topology with installed latency
+to simulate slow nodes. This can be achieved via the "SimulatePubnet" and
+"SimulatePubnetMixedLoad" missions. On an ideal network, where all nodes are
+fast, it is likely that shorter timeouts will improve network performance.
+However, on a network with slow nodes, shorter timeouts may cause more values
+to be nominated, resulting in more work for all validators on the network and
+decreased performance. Network performance can be very sensitive to these
+timing parameters, so it is important to propose small, gradual changes that
+have been thoroughly simulated.
diff --git a/core/cap-0071.md b/core/cap-0071.md
index ee77c6e3b..2920eec02 100644
--- a/core/cap-0071.md
+++ b/core/cap-0071.md
@@ -4,7 +4,7 @@ Title: Authentication delegation for custom accounts
Working Group:
Owner: Dmytro Kozhevin <@dmkozh>
Authors: Dmytro Kozhevin <@dmkozh>
- Consulted:
+ Consulted:
Status: Draft
Created: 2025-09-10
Discussion: https://github.com/orgs/stellar/discussions/1784
@@ -13,7 +13,8 @@ Protocol version: TBD
## Simple Summary
-Add a built-in way for custom (smart contract) accounts to delegate the authentication logic to other addresses.
+Add a built-in way for custom (smart contract) accounts to delegate the
+authentication logic to other addresses.
## Working Group
@@ -21,42 +22,90 @@ As specified in the Preamble.
## Motivation
-Custom (smart contract-based) accounts on Stellar had the capability to delegate their authentication logic to a different address starting from their introduction in protocol 20. This can be achieved via calling `require_auth_for_args` for an `Address` (or multiple addresses) inside the `__check_auth` function of the custom account instead of, or in addition to, performing signature validation. The authentication would then be requested from these addresses, thus potentially triggering their `__check_auth` functions.
-
-This feature has been experimented with in some custom account implementations, and is generally deemed useful, as it allows for creating more flexible and 'modular' custom accounts. A significant motivating use case for the authentication delegation is also introduced in [CAP-72](./cap-0072.md): it adds the authentication logic customization capability to the Stellar accounts via adding contracts as additional account signers. This is the same case of authentication delegation, with the only difference being that the authentication procedure for the Stellar accounts is built into the protocol.
-
- However, since the current delegation support happens to be just a by-product of the authorization framework design, there are significant user and developer experience flaws with the approach:
-
-- It is hard to properly simulate delegated authorization. When simulation records the necessary authorization payloads, it doesn't ever enter `__check_auth`, as it doesn't have sufficient information to call it. Thus users need to build the authorization payload of the inner call themselves and then they also need to run simulation again.
- - This could be somewhat simplified on the simulation side by introducing a mode in which authorization data may be partially initialized, which would simplify the inner payload building. However, this approach still comes with complexity for developers (such as the need to do more simulation calls), as well as the added simulation complexity.
-- Every account for which `__check_auth` is called requires an authorization entry in the transaction, with its own nonce. That increases transaction cost and further increases complexity.
-- It is hard to forward the authorization context to the delegated signers. To do that, one needs to pass the whole context into the `require_auth_for_args` call. But that means that the whole context needs to be present in the authorization payload of the delegated signer, which bloats the transaction size unnecessarily, and increases the complexity for the developers if they want to write logic dependent on the context.
-
-Most of these issues, besides double simulation, can be mitigated with a rather small protocol change proposed in the CAP.
+Custom (smart contract-based) accounts on Stellar had the capability to
+delegate their authentication logic to a different address starting from their
+introduction in protocol 20. This can be achieved via calling
+`require_auth_for_args` for an `Address` (or multiple addresses) inside the
+`__check_auth` function of the custom account instead of, or in addition to,
+performing signature validation. The authentication would then be requested
+from these addresses, thus potentially triggering their `__check_auth`
+functions.
+
+This feature has been experimented with in some custom account implementations,
+and is generally deemed useful, as it allows for creating more flexible and
+'modular' custom accounts. A significant motivating use case for the
+authentication delegation is also introduced in [CAP-72](./cap-0072.md): it
+adds the authentication logic customization capability to the Stellar accounts
+via adding contracts as additional account signers. This is the same case of
+authentication delegation, with the only difference being that the
+authentication procedure for the Stellar accounts is built into the protocol.
+
+However, since the current delegation support happens to be just a by-product
+of the authorization framework design, there are significant user and developer
+experience flaws with the approach:
+
+- It is hard to properly simulate delegated authorization. When simulation
+ records the necessary authorization payloads, it doesn't ever enter
+ `__check_auth`, as it doesn't have sufficient information to call it. Thus
+ users need to build the authorization payload of the inner call themselves
+ and then they also need to run simulation again.
+ - This could be somewhat simplified on the simulation side by introducing a
+ mode in which authorization data may be partially initialized, which would
+ simplify the inner payload building. However, this approach still comes
+ with complexity for developers (such as the need to do more simulation
+ calls), as well as the added simulation complexity.
+- Every account for which `__check_auth` is called requires an authorization
+ entry in the transaction, with its own nonce. That increases transaction cost
+ and further increases complexity.
+- It is hard to forward the authorization context to the delegated signers. To
+ do that, one needs to pass the whole context into the `require_auth_for_args`
+ call. But that means that the whole context needs to be present in the
+ authorization payload of the delegated signer, which bloats the transaction
+ size unnecessarily, and increases the complexity for the developers if they
+ want to write logic dependent on the context.
+
+Most of these issues, besides double simulation, can be mitigated with a rather
+small protocol change proposed in the CAP.
### Goals Alignment
This CAP is aligned with the following Stellar Network Goals:
- - The Stellar Network should make it easy for developers of Stellar projects to create highly usable products
+- The Stellar Network should make it easy for developers of Stellar projects to
+ create highly usable products
## Abstract
-A new authorization host function `delegate_account_auth` is introduced for delegating the authentication logic from an `Address`es `__check_auth` function to a different address. Also, a new type of Soroban authorization credentials is introduced that allows specifying the delegate addresses together with their signatures.
+A new authorization host function `delegate_account_auth` is introduced for
+delegating the authentication logic from an `Address`es `__check_auth` function
+to a different address. Also, a new type of Soroban authorization credentials
+is introduced that allows specifying the delegate addresses together with their
+signatures.
-Each of the delegated addresses has its own signature and its own authentication logic defined by the address executable. However, the signature payload and authorization context are shared among all the signers and are equivalent to the signature payload and the context of the 'top-level' address to which the authorization entry belongs.
+Each of the delegated addresses has its own signature and its own
+authentication logic defined by the address executable. However, the signature
+payload and authorization context are shared among all the signers and are
+equivalent to the signature payload and the context of the 'top-level' address
+to which the authorization entry belongs.
-`delegate_account_auth` calls can also be nested recursively, and every authentication call will still inherit the 'top-level' arguments.
+`delegate_account_auth` calls can also be nested recursively, and every
+authentication call will still inherit the 'top-level' arguments.
-Another host function called `get_delegated_signers_for_current_auth_check` is also introduced for standardizing and simplifying the delegated signers access by providing the direct access to the delegated signers populated in the authorization payload.
+Another host function called `get_delegated_signers_for_current_auth_check` is
+also introduced for standardizing and simplifying the delegated signers access
+by providing the direct access to the delegated signers populated in the
+authorization payload.
-With these changes only a single authorization entry is necessary to accommodate an arbitrary number of potentially nested delegated signers, which reduces the transaction size greatly and simplifies the simulation logic.
+With these changes only a single authorization entry is necessary to
+accommodate an arbitrary number of potentially nested delegated signers, which
+reduces the transaction size greatly and simplifies the simulation logic.
## Specification
### XDR changes
-This patch of XDR changes is based on the XDR files in commit `4b7a2ef7931ab2ca2499be68d849f38190b443ca` of stellar-xdr.
+This patch of XDR changes is based on the XDR files in commit
+`4b7a2ef7931ab2ca2499be68d849f38190b443ca` of stellar-xdr.
```diff mddiffcheck.ignore=true
diff --git a/Stellar-ledger-entries.x b/Stellar-ledger-entries.x
@@ -71,7 +120,7 @@ index b9a9a16..348311c 100644
+ ENVELOPE_TYPE_SOROBAN_AUTHORIZATION = 9,
+ ENVELOPE_TYPE_SOROBAN_AUTHORIZATION_WITH_ADDRESS = 10
};
-
+
enum BucketListType
diff --git a/Stellar-transaction.x b/Stellar-transaction.x
index 9a14d6e..d59d1d5 100644
@@ -80,7 +129,7 @@ index 9a14d6e..d59d1d5 100644
@@ -569,10 +569,23 @@ struct SorobanAddressCredentials
SCVal signature;
};
-
+
+struct SorobanDelegateSignature {
+ SCAddress address;
+ SCVal signature;
@@ -100,16 +149,16 @@ index 9a14d6e..d59d1d5 100644
+ SOROBAN_CREDENTIALS_ADDRESS = 1,
+ SOROBAN_CREDENTIALS_ADDRESS_WITH_DELEGATES = 2
};
-
+
union SorobanCredentials switch (SorobanCredentialsType type)
@@ -581,6 +594,8 @@ case SOROBAN_CREDENTIALS_SOURCE_ACCOUNT:
void;
case SOROBAN_CREDENTIALS_ADDRESS:
SorobanAddressCredentials address;
-+ case SOROBAN_CREDENTIALS_ADDRESS_WITH_DELEGATES:
++ case SOROBAN_CREDENTIALS_ADDRESS_WITH_DELEGATES:
+ SorobanAddressCredentialsWithDelegates addressWithDelegates;
};
-
+
/* Unit of authorization data for Soroban.
@@ -731,6 +746,15 @@ case ENVELOPE_TYPE_SOROBAN_AUTHORIZATION:
uint32 signatureExpirationLedger;
@@ -125,13 +174,15 @@ index 9a14d6e..d59d1d5 100644
+ SorobanAuthorizedInvocation invocation;
+ } sorobanAuthorization;
};
-
+
enum MemoType
```
### New host functions
-The diff is based on commit `f5153ff476ce7e68ee0bda278188cded135ff301` of `rs-soroban-env`. Min supported protocol is intentionally not specified as this CAP's protocol version is TBD.
+The diff is based on commit `f5153ff476ce7e68ee0bda278188cded135ff301` of
+`rs-soroban-env`. Min supported protocol is intentionally not specified as this
+CAP's protocol version is TBD.
```diff mddiffcheck.ignore=true
diff --git a/soroban-env-common/env.json b/soroban-env-common/env.json
@@ -170,67 +221,142 @@ index 50293483..269862fc 100644
#### `SorobanAddressCredentialsWithDelegates` validation
-When Soroban host accepts authorization entries (`SorobanAuthorizationEntry`) it performs some basic validation on them (such as signatures being well-formed host values). For credentials of type `SorobanAddressCredentialsWithDelegates` the following additional validation rules are added:
+When Soroban host accepts authorization entries (`SorobanAuthorizationEntry`)
+it performs some basic validation on them (such as signatures being well-formed
+host values). For credentials of type `SorobanAddressCredentialsWithDelegates`
+the following additional validation rules are added:
-- Every `SorobanDelegateSignature` array (both top-level `delegates` field and every `nestedDelegates` field) must be sorted in the increasing order of the `address` field
+- Every `SorobanDelegateSignature` array (both top-level `delegates` field and
+ every `nestedDelegates` field) must be sorted in the increasing order of the
+ `address` field
- Every `SorobanDelegateSignature` array must contain no duplicate addresses
-If any of these conditions is violated, the host function invocation will fail immediately before entering the contract.
+If any of these conditions is violated, the host function invocation will fail
+immediately before entering the contract.
#### `delegate_account_auth` function
-A general mechanism for supporting authentication delegation is introduced, both for use to support the delegation for G-accounts, as well as for use in any custom (contract-based) accounts.
-
-`delegate_account_auth` host function is introduced by this CAP. It may only be called from the reserved `__check_auth` function, i.e. only when performing account authentication. All the addresses for which `delegate_account_auth` is called must be present in `SorobanAddressCredentialsWithDelegates` credentials in the `delegates` or `nestedDelegates` fields. Every such address will have `__check_auth` called for it with the same signature payload and context as for the top-level address the credentials belong to.
-
-More formally, the algorithm for handling `delegate_account_auth` is defined as follows:
-
-- Define 'top level' `__check_auth` call for non-source address `A` as a call that occurs when `require_auth[_for_args]` is matched to `A` for the first time, as per [CAP-46-11](./cap-0046-11.md).
- - Note, that implementation of `__check_auth` for G-accounts is built into host and is not observed as a contract call; however that doesn't affect the algorithm.
-- Define top level call arguments as `A.__check_auth(P, S(A), C)` where `P` is the 32-byte signature payload, `S(A)` is the signature from the `A`s auth entry, and `C` is the authorization context derived from the auth entry.
-- For every `__check_auth` call, define `current_delegate_list` as a list of delegated signer addresses with the corresponding signatures and a `used` flag.
-- For top level call, define `current_delegate_list` as the value of `delegates` inside `SorobanAddressCredentialsWithDelegates` in `A`s auth entry, or an empty list if `SorobanAddressCredentials` is used. Mark every delegate as unused by setting their `used` flag to `false`.
+A general mechanism for supporting authentication delegation is introduced,
+both for use to support the delegation for G-accounts, as well as for use in
+any custom (contract-based) accounts.
+
+`delegate_account_auth` host function is introduced by this CAP. It may only be
+called from the reserved `__check_auth` function, i.e. only when performing
+account authentication. All the addresses for which `delegate_account_auth` is
+called must be present in `SorobanAddressCredentialsWithDelegates` credentials
+in the `delegates` or `nestedDelegates` fields. Every such address will have
+`__check_auth` called for it with the same signature payload and context as for
+the top-level address the credentials belong to.
+
+More formally, the algorithm for handling `delegate_account_auth` is defined as
+follows:
+
+- Define 'top level' `__check_auth` call for non-source address `A` as a call
+ that occurs when `require_auth[_for_args]` is matched to `A` for the first
+ time, as per [CAP-46-11](./cap-0046-11.md).
+ - Note, that implementation of `__check_auth` for G-accounts is built into
+ host and is not observed as a contract call; however that doesn't affect
+ the algorithm.
+- Define top level call arguments as `A.__check_auth(P, S(A), C)` where `P` is
+ the 32-byte signature payload, `S(A)` is the signature from the `A`s auth
+ entry, and `C` is the authorization context derived from the auth entry.
+- For every `__check_auth` call, define `current_delegate_list` as a list of
+ delegated signer addresses with the corresponding signatures and a `used`
+ flag.
+- For top level call, define `current_delegate_list` as the value of
+ `delegates` inside `SorobanAddressCredentialsWithDelegates` in `A`s auth
+ entry, or an empty list if `SorobanAddressCredentials` is used. Mark every
+ delegate as unused by setting their `used` flag to `false`.
- Any time when `delegate_account_auth` is called for an address `B`:
- 1. Search for the first unused (`used == false`) occurrence of `B` in `current_delegate_list` and store the found index `i`. If there is no such occurrence, fail authentication process.
+ 1. Search for the first unused (`used == false`) occurrence of `B` in
+ `current_delegate_list` and store the found index `i`. If there is no such
+ occurrence, fail authentication process.
2. Set `current_delegate_list[i].used` to `true`.
3. Call `B.__check_auth(P, current_delegate_list[i].signature, C)`.
- - For the call, set `current_delegate_list` to the value of `current_delegate_list[i].nestedDelegates` and mark every delegate as unused.
+ - For the call, set `current_delegate_list` to the value of
+ `current_delegate_list[i].nestedDelegates` and mark every delegate as
+ unused.
#### `get_delegated_signers_for_current_auth_check` function
-`get_delegated_signers_for_current_auth_check` is a supplementary host function that simplifies the usage of `delegate_account_auth`. It returns the `current_delegate_list` populated as per the algorithm in the previous section as a vector of addresses.
+`get_delegated_signers_for_current_auth_check` is a supplementary host function
+that simplifies the usage of `delegate_account_auth`. It returns the
+`current_delegate_list` populated as per the algorithm in the previous section
+as a vector of addresses.
-This allows account contracts to have `Void`/empty signature when only the delegated signers are being used, which further reduces the transaction size and complexity, and simplifies the contract itself.
+This allows account contracts to have `Void`/empty signature when only the
+delegated signers are being used, which further reduces the transaction size
+and complexity, and simplifies the contract itself.
#### Signature payload for `SorobanAddressCredentialsWithDelegates`
-When `SorobanAddressCredentialsWithDelegates` type of credentials is used, the signature payload expected by the protocol is SHA-256 hash of `HashIDPreimage` XDR with `ENVELOPE_TYPE_SOROBAN_AUTHORIZATION_WITH_ADDRESS` variant. The contents of the payload preimage are equivalent to `ENVELOPE_TYPE_SOROBAN_AUTHORIZATION_WITH_ADDRESS`, as defined by [CAP-46-11](./cap-0046-11.md#soroban-authorization-signature-payload). The only difference is that `address` field must be filled in as well with the value of the top-level address to which the respective credentials belong.
+When `SorobanAddressCredentialsWithDelegates` type of credentials is used, the
+signature payload expected by the protocol is SHA-256 hash of `HashIDPreimage`
+XDR with `ENVELOPE_TYPE_SOROBAN_AUTHORIZATION_WITH_ADDRESS` variant. The
+contents of the payload preimage are equivalent to
+`ENVELOPE_TYPE_SOROBAN_AUTHORIZATION_WITH_ADDRESS`, as defined by
+[CAP-46-11](./cap-0046-11.md#soroban-authorization-signature-payload). The only
+difference is that `address` field must be filled in as well with the value of
+the top-level address to which the respective credentials belong.
## Design Rationale
### New signature payload type
-While it would be easier to just re-use the existing payload, that would open up a potential vulnerability.
-
-Specifically, if an account uses only delegated signers, then signatures for a delegated signer could be replayed by using the delegated signer's address in place of the original account's address in the invocation. For example, if account `A` needs to sign a token transfer `token.transfer(A, to, 100)` via delegated signer `D` **and** token implementation only calls `A.require_auth_for_args((to, 100))`, then the signature of `D` would be reusable in `token.transfer(D, to, 100)` call. That would happen because `ENVELOPE_TYPE_SOROBAN_AUTHORIZATION` is not _explicitly_ attached to the account address, it's implicitly linked to it via cryptographic signature. Thus the issue would only happen for the contracts that don't explicitly require authorization for the address as an invocation argument.
-
-In order to prevent the vulnerability in all the cases, a new payload type is introduced that fixes the issue by explicitly adding the address to the payload and thus explicitly linking the nonce and signatures to the top-level account.
-
-Note, that in the 'legacy' delegation flow that currently exists in the protocol the same kind of linking is achieved due to the need for `D` to explicitly authorize `A.__check_auth` call (i.e. linking happens in `D`'s authorized invocation specification within the signed payload).
+While it would be easier to just re-use the existing payload, that would open
+up a potential vulnerability.
+
+Specifically, if an account uses only delegated signers, then signatures for a
+delegated signer could be replayed by using the delegated signer's address in
+place of the original account's address in the invocation. For example, if
+account `A` needs to sign a token transfer `token.transfer(A, to, 100)` via
+delegated signer `D` **and** token implementation only calls
+`A.require_auth_for_args((to, 100))`, then the signature of `D` would be
+reusable in `token.transfer(D, to, 100)` call. That would happen because
+`ENVELOPE_TYPE_SOROBAN_AUTHORIZATION` is not _explicitly_ attached to the
+account address, it's implicitly linked to it via cryptographic signature. Thus
+the issue would only happen for the contracts that don't explicitly require
+authorization for the address as an invocation argument.
+
+In order to prevent the vulnerability in all the cases, a new payload type is
+introduced that fixes the issue by explicitly adding the address to the payload
+and thus explicitly linking the nonce and signatures to the top-level account.
+
+Note, that in the 'legacy' delegation flow that currently exists in the
+protocol the same kind of linking is achieved due to the need for `D` to
+explicitly authorize `A.__check_auth` call (i.e. linking happens in `D`'s
+authorized invocation specification within the signed payload).
### Delegated signer recursive nesting
-The ability to nest the delegated signers makes protocol a bit more complex and requires 4 bytes of additional space in credentials even if it's not being used. This is introduced mostly for the sake of completeness and consistency, so that any accounts could be used as delegated signers (including ones that have delegated signers of their own). This may also potentially help some complex protocols that may come in the future. The relative protocol complexity increase is pretty marginal and thus should be tolerable.
+The ability to nest the delegated signers makes protocol a bit more complex and
+requires 4 bytes of additional space in credentials even if it's not being
+used. This is introduced mostly for the sake of completeness and consistency,
+so that any accounts could be used as delegated signers (including ones that
+have delegated signers of their own). This may also potentially help some
+complex protocols that may come in the future. The relative protocol complexity
+increase is pretty marginal and thus should be tolerable.
### Delegated signer getter
-``get_delegated_signers_for_current_auth_check` function is technically optional for this CAP, as smart accounts could get away with defining a special signature type to account for the delegated signers.
+``get_delegated_signers_for_current_auth_check` function is technically
+optional for this CAP, as smart accounts could get away with defining a special
+signature type to account for the delegated signers.
- However, this creates some unnecessary duplication in the transaction data (as delegated signers would need to be specified twice), and introduces unnecessary fragmentation into how delegated signers work, as different contracts may use slightly different UDTs to represent them in signatures. This CAP already provides a standardized approach for storing the delegated signers in the transaction, so there is no good reason not to benefit from that on the contract implementation side as well, while also avoiding duplication.
+However, this creates some unnecessary duplication in the transaction data (as
+delegated signers would need to be specified twice), and introduces unnecessary
+fragmentation into how delegated signers work, as different contracts may use
+slightly different UDTs to represent them in signatures. This CAP already
+provides a standardized approach for storing the delegated signers in the
+transaction, so there is no good reason not to benefit from that on the
+contract implementation side as well, while also avoiding duplication.
## Protocol Upgrade Transition
-The proposed host function will use the standard mechanism for protocol-gating the host functions and will become available in protocol _TBD_. The new type of credentials will also only be available starting from protocol _TBD_, otherwise including it into transaction will invalidate the transaction.
+The proposed host function will use the standard mechanism for protocol-gating
+the host functions and will become available in protocol _TBD_. The new type of
+credentials will also only be available starting from protocol _TBD_, otherwise
+including it into transaction will invalidate the transaction.
### Backwards Incompatibilities
@@ -238,13 +364,21 @@ This CAP does not introduce any backward incompatibilities.
### Resource Utilization
-The new host function will have the appropriate metering. No new cost types need to be introduced, as the operations can lean on the existing metering primitives.
+The new host function will have the appropriate metering. No new cost types
+need to be introduced, as the operations can lean on the existing metering
+primitives.
## Security Concerns
-Delegated signers increase the account implementation complexity and thus may increase the probability of it being vulnerable to exploits. However, this CAP doesn't significantly change the risk surface, as similar functionality already exists in the protocol.
+Delegated signers increase the account implementation complexity and thus may
+increase the probability of it being vulnerable to exploits. However, this CAP
+doesn't significantly change the risk surface, as similar functionality already
+exists in the protocol.
-Similarly to the existing authorization framework code, the new authorization-related code is very sensitive and must be thoroughly reviewed. Given the correct implementation there aren't significant new concerns from the protocol standpoint.
+Similarly to the existing authorization framework code, the new
+authorization-related code is very sensitive and must be thoroughly reviewed.
+Given the correct implementation there aren't significant new concerns from the
+protocol standpoint.
## Test Cases
@@ -256,17 +390,40 @@ TBD
## Appendix: Simulation flow
-While simulation is not a part of the protocol, it is an important part of the overall Stellar developer experience, so it's useful to mention it in order to put this CAP into context.
+While simulation is not a part of the protocol, it is an important part of the
+overall Stellar developer experience, so it's useful to mention it in order to
+put this CAP into context.
-Even with this CAP, two simulation runs are necessary in order to come up with the correct transaction footprint and resources. Note, that two simulation runs is a general issue that any custom account likely needs to deal with. There is a possibility to avoid the second simulation run via supplying all the signers and 'dummy' signatures for them, but that change is unrelated to this CAP.
+Even with this CAP, two simulation runs are necessary in order to come up with
+the correct transaction footprint and resources. Note, that two simulation runs
+is a general issue that any custom account likely needs to deal with. There is
+a possibility to avoid the second simulation run via supplying all the signers
+and 'dummy' signatures for them, but that change is unrelated to this CAP.
-However, the process is still significantly simplified and streamlined. Specifically, the flow for any account `A` with any number of delegated signers will look like the following (for simplicity a call where only a single authorization entry needs to be signed is described, but this scales to an arbitrary number of payloads):
+However, the process is still significantly simplified and streamlined.
+Specifically, the flow for any account `A` with any number of delegated signers
+will look like the following (for simplicity a call where only a single
+authorization entry needs to be signed is described, but this scales to an
+arbitrary number of payloads):
1. Send a simulation request in the recording authorization mode
-2. Receive simulation response that contains the invocation part of the authorization entry for `A`
-3. Build `HashIDPreimage::ENVELOPE_TYPE_SOROBAN_AUTHORIZATION_WITH_ADDRESS` (with `A`'s address, invocation tree, network ID, nonce, and signature expiration ledger as inputs) and compute its SHA-256 hash `H`.
-4. Compute the signature for `A`, depending on the account implementation (e.g. sign `H` with `A`s key, or just pass a `Void` value if only delegate signers are used) and attach it as a top-level signature in the output `SorobanAddressCredentialsWithDelegates`
-5. For every delegated signer `D`, sign `H` according to the implementation and attach `D`s address and signature to `SorobanAddressCredentialsWithDelegates`. In case of nested accounts, the signature must go to the appropriate node of the tree (the assumption is that wallet knows the intricacies of the account authentication scheme, which is necessary anyways to perform signing)
-6. Attach the final `SorobanAddressCredentialsWithDelegates` with all the signatures to `SorobanAuthorizationEntry` in the transaction and send it for another round of simulation in the enforcing mode
-7. Use the resources from the second simulation run for sending the final transaction to the network
-
+2. Receive simulation response that contains the invocation part of the
+ authorization entry for `A`
+3. Build `HashIDPreimage::ENVELOPE_TYPE_SOROBAN_AUTHORIZATION_WITH_ADDRESS`
+ (with `A`'s address, invocation tree, network ID, nonce, and signature
+ expiration ledger as inputs) and compute its SHA-256 hash `H`.
+4. Compute the signature for `A`, depending on the account implementation (e.g.
+ sign `H` with `A`s key, or just pass a `Void` value if only delegate signers
+ are used) and attach it as a top-level signature in the output
+ `SorobanAddressCredentialsWithDelegates`
+5. For every delegated signer `D`, sign `H` according to the implementation and
+ attach `D`s address and signature to
+ `SorobanAddressCredentialsWithDelegates`. In case of nested accounts, the
+ signature must go to the appropriate node of the tree (the assumption is
+ that wallet knows the intricacies of the account authentication scheme,
+ which is necessary anyways to perform signing)
+6. Attach the final `SorobanAddressCredentialsWithDelegates` with all the
+ signatures to `SorobanAuthorizationEntry` in the transaction and send it for
+ another round of simulation in the enforcing mode
+7. Use the resources from the second simulation run for sending the final
+ transaction to the network
diff --git a/core/cap-0072.md b/core/cap-0072.md
index eac850851..c39e82b6a 100644
--- a/core/cap-0072.md
+++ b/core/cap-0072.md
@@ -13,7 +13,8 @@ Protocol version: TBD
## Simple Summary
-Provide protocol tools for customizing authentication logic for the Stellar (G-) accounts via smart contracts.
+Provide protocol tools for customizing authentication logic for the Stellar
+(G-) accounts via smart contracts.
## Working Group
@@ -21,33 +22,73 @@ As specified in the Preamble.
## Motivation
-Contract (C-) accounts on Stellar are fully customizable, but wallets get access to more of the ecosystem today with a G-account.
+Contract (C-) accounts on Stellar are fully customizable, but wallets get
+access to more of the ecosystem today with a G-account.
-One type of functionality that some wallets want to adopt is passkeys, and specifically, the recovery processes available to passkeys on modern phones where they can be backed up in the cloud. However, to adopt passkeys is to adopt a contract account and step away from everything the ecosystem has only for G-accounts.
+One type of functionality that some wallets want to adopt is passkeys, and
+specifically, the recovery processes available to passkeys on modern phones
+where they can be backed up in the cloud. However, to adopt passkeys is to
+adopt a contract account and step away from everything the ecosystem has only
+for G-accounts.
-There is a wide range of possible solutions for bridging the gap between the C- and G-accounts, with different degree of complexity and technical challenges. This CAP aims at reducing the complexity by focusing on the specific issue of G-account customization for limited use cases and providing a solution that addresses this issue, but not necessarily more. For example, it's not a goal for this CAP to solve the issue of not being able to pay fees using contract-based authentication.
+There is a wide range of possible solutions for bridging the gap between the C-
+and G-accounts, with different degree of complexity and technical challenges.
+This CAP aims at reducing the complexity by focusing on the specific issue of
+G-account customization for limited use cases and providing a solution that
+addresses this issue, but not necessarily more. For example, it's not a goal
+for this CAP to solve the issue of not being able to pay fees using
+contract-based authentication.
### Goals Alignment
This CAP is aligned with the following Stellar Network Goals:
- - The Stellar Network should make it easy for developers of Stellar projects to create highly usable products
+- The Stellar Network should make it easy for developers of Stellar projects to
+ create highly usable products
## Abstract
-This CAP introduces a new kind of G-account signers, called 'delegated signers', that are usable only from within the smart contract environment, i.e. these signers can not be used to sign the transactions directly, but can be used to sign for `SorobanAuthorizationEntry`. Delegated signers are stored in the `AccountEntry` in the same fashion as any other G-account signer and they also can be managed via `SetOptionsOp`.
-
-Delegated signers have a signature weight, similarly to the existing ed25519 account signers. However, instead of performing the cryptographic verification, `delegate_account_auth` (introduced in [CAP-71](./cap-0071.md)) host function will be called for the delegated signer's address in order to perform authentication. Thus, if the delegated signer is a contract, then `__check_auth` will be called for it, which allows for the G-account authentication logic customization. In case if delegated signer is a G-account itself, the `__check_auth` implementation for G-accounts built into Soroban host will be called.
-
-Since delegated signers can only be used for authentication in the smart contract environment, in order to fulfill the requirement for being able to use customizable authentication for managing the account, every G-account is treated as a built-in smart contract with an interface that provides all the account management capabilities necessary for the recovery flows. Unlike the Stellar Asset contract, every account on chain will be implicitly instantiated as a contract, i.e. its address will just become callable without any additional actions required from the users.
-
- Modifications performed to delegated and regular (ed25519) signers via the G-account built-in contract require updating the account base reserves and removing the sponsorships on deletion. This sets a precedent for the classic base reserves being managed from the Soroban environment, including releasing the reserves from the sponsoring accounts on deletion. Using a new sponsorship itself won't be possible for the signers added via G-account contract, so a G-account *must* hold a sufficient XLM balance in order to add new signers via its contract.
+This CAP introduces a new kind of G-account signers, called 'delegated
+signers', that are usable only from within the smart contract environment, i.e.
+these signers can not be used to sign the transactions directly, but can be
+used to sign for `SorobanAuthorizationEntry`. Delegated signers are stored in
+the `AccountEntry` in the same fashion as any other G-account signer and they
+also can be managed via `SetOptionsOp`.
+
+Delegated signers have a signature weight, similarly to the existing ed25519
+account signers. However, instead of performing the cryptographic verification,
+`delegate_account_auth` (introduced in [CAP-71](./cap-0071.md)) host function
+will be called for the delegated signer's address in order to perform
+authentication. Thus, if the delegated signer is a contract, then
+`__check_auth` will be called for it, which allows for the G-account
+authentication logic customization. In case if delegated signer is a G-account
+itself, the `__check_auth` implementation for G-accounts built into Soroban
+host will be called.
+
+Since delegated signers can only be used for authentication in the smart
+contract environment, in order to fulfill the requirement for being able to use
+customizable authentication for managing the account, every G-account is
+treated as a built-in smart contract with an interface that provides all the
+account management capabilities necessary for the recovery flows. Unlike the
+Stellar Asset contract, every account on chain will be implicitly instantiated
+as a contract, i.e. its address will just become callable without any
+additional actions required from the users.
+
+Modifications performed to delegated and regular (ed25519) signers via the
+G-account built-in contract require updating the account base reserves and
+removing the sponsorships on deletion. This sets a precedent for the classic
+base reserves being managed from the Soroban environment, including releasing
+the reserves from the sponsoring accounts on deletion. Using a new sponsorship
+itself won't be possible for the signers added via G-account contract, so a
+G-account _must_ hold a sufficient XLM balance in order to add new signers via
+its contract.
## Specification
### XDR changes
-This patch of XDR changes is based on the XDR files in commit `4b7a2ef7931ab2ca2499be68d849f38190b443ca` of stellar-xdr.
+This patch of XDR changes is based on the XDR files in commit
+`4b7a2ef7931ab2ca2499be68d849f38190b443ca` of stellar-xdr.
```diff mddiffcheck.ignore=true
diff --git a/Stellar-transaction.x b/Stellar-transaction.x
@@ -63,7 +104,7 @@ index 9a14d6e..0735db7 100644
+ // A signer with unsupported type was specified.
+ SET_OPTIONS_SIGNER_TYPE_NOT_SUPPORTED = -11
};
-
+
union SetOptionsResult switch (SetOptionsResultCode code)
@@ -1999,7 +2001,10 @@ enum TransactionResultCode
txBAD_SPONSORSHIP = -14, // sponsorship not confirmed
@@ -71,11 +112,11 @@ index 9a14d6e..0735db7 100644
txMALFORMED = -16, // precondition is invalid
- txSOROBAN_INVALID = -17 // soroban-specific preconditions were not met
+ txSOROBAN_INVALID = -17, // soroban-specific preconditions were not met
-+ // Transaction had an extra signer of type SIGNER_KEY_TYPE_SC_DELEGATED,
++ // Transaction had an extra signer of type SIGNER_KEY_TYPE_SC_DELEGATED,
+ // which is not supported.
-+ txACCOUNT_DELEGATED_SIGNER_NOT_SUPPORTED = -18
++ txACCOUNT_DELEGATED_SIGNER_NOT_SUPPORTED = -18
};
-
+
// InnerTransactionResult must be binary compatible with TransactionResult
diff --git a/Stellar-types.x b/Stellar-types.x
index f383d2e..ff1ed72 100644
@@ -89,7 +130,7 @@ index f383d2e..ff1ed72 100644
+ SIGNER_KEY_TYPE_ED25519_SIGNED_PAYLOAD = KEY_TYPE_ED25519_SIGNED_PAYLOAD,
+ SIGNER_KEY_TYPE_SC_DELEGATED = 4
};
-
+
union PublicKey switch (PublicKeyType type)
@@ -74,6 +75,8 @@ case SIGNER_KEY_TYPE_ED25519_SIGNED_PAYLOAD:
/* Payload to be raw signed by ed25519. */
@@ -98,28 +139,30 @@ index f383d2e..ff1ed72 100644
+case SIGNER_KEY_TYPE_SC_DELEGATED:
+ SCAddress delegatedSCSigner;
};
-
+
// variable size as the size depends on the signature scheme used
```
### Stellar(G-) account contract interface
-This CAP introduces a callable smart contract interface for the Stellar accounts. The following Rust trait specifies the interface as Soroban Rust SDK contract.
+This CAP introduces a callable smart contract interface for the Stellar
+accounts. The following Rust trait specifies the interface as Soroban Rust SDK
+contract.
```rust
pub trait StellarAccountInterface {
/// Adds a new ed25519 signer to the account with the given weight, or
/// updates the weight if the signer already exists.
- ///
+ ///
/// Weight must be in range (0, 255].
fn update_ed25519_signer(env: Env, key: BytesN<32>, weight: u32);
- /// Removes an ed25519 signer from the account.
+ /// Removes an ed25519 signer from the account.
fn remove_ed25519_signer(env: Env, key: BytesN<32>);
/// Adds a new delegated signer to the account with the given weight, or
/// updates the weight if the signer already exists.
- ///
+ ///
/// Weight must be in range (0, 255].
///
/// Instead of performing signature verification for the delegated signers,
@@ -131,80 +174,130 @@ pub trait StellarAccountInterface {
fn remove_delegated_signer(env: Env, signer: Address);
/// Sets the weight for the 'master' key of the account.
- ///
+ ///
/// The 'master' key is the public ed25519 key that identifies the account
/// itself. Setting the weight to 0 effectively removes the master key
/// from the account.
fn set_master_weight(env: Env, weight: u32);
/// Updates the signature thresholds for the account.
- ///
+ ///
/// `None` values leave the corresponding threshold unchanged.
- ///
+ ///
/// `low` threshold is only used for authorizing a few non-sensitive Stellar
// operations, such as bumping the account's sequence number.
- ///
- /// `med` threshold is used for authorizing most of the operations,
+ ///
+ /// `med` threshold is used for authorizing most of the operations,
/// including the Smart Contract interactions authorized via `require_auth`.
- ///
+ ///
/// `high` threshold is used for managing the account itself. Note, that
/// all the functions in this interface require high threshold, while still
/// being authorized via `require_auth`.
fn update_thresholds(env: Env, low: Option, med: Option, high: Option);
}
```
+
### Classic transaction semantics
-A new type of G-account signer `SIGNER_KEY_TYPE_SC_DELEGATED` is added. It is supported in most of the contexts where the `Signer` struct is used, such as the `SetOptionsOp`, which thus allows adding/removing/updating the delegated signers. Only delegated signers with types convertible to the Soroban `Address` are supported, i.e. those with types `SC_ADDRESS_TYPE_ACCOUNT` and `SC_ADDRESS_TYPE_CONTRACT`. Otherwise, the operation will fail with `SET_OPTIONS_SIGNER_TYPE_NOT_SUPPORTED` error.
+A new type of G-account signer `SIGNER_KEY_TYPE_SC_DELEGATED` is added. It is
+supported in most of the contexts where the `Signer` struct is used, such as
+the `SetOptionsOp`, which thus allows adding/removing/updating the delegated
+signers. Only delegated signers with types convertible to the Soroban `Address`
+are supported, i.e. those with types `SC_ADDRESS_TYPE_ACCOUNT` and
+`SC_ADDRESS_TYPE_CONTRACT`. Otherwise, the operation will fail with
+`SET_OPTIONS_SIGNER_TYPE_NOT_SUPPORTED` error.
-The only context where `SIGNER_KEY_TYPE_SC_DELEGATED` is explicitly not supported is the `extraSigners` transaction precondition, which allows users specifying the `SignerKey` that must sign a transaction. If `SignerKey` is of type `SIGNER_KEY_TYPE_SC_DELEGATED`, then the transaction will be considered not valid with the `txACCOUNT_DELEGATED_SIGNER_NOT_SUPPORTED` error.
+The only context where `SIGNER_KEY_TYPE_SC_DELEGATED` is explicitly not
+supported is the `extraSigners` transaction precondition, which allows users
+specifying the `SignerKey` that must sign a transaction. If `SignerKey` is of
+type `SIGNER_KEY_TYPE_SC_DELEGATED`, then the transaction will be considered
+not valid with the `txACCOUNT_DELEGATED_SIGNER_NOT_SUPPORTED` error.
-Delegated signers are ignored during the transaction signature verification and they can't even be logically matched to the transaction signature hint due to `SCAddress` payload.
+Delegated signers are ignored during the transaction signature verification and
+they can't even be logically matched to the transaction signature hint due to
+`SCAddress` payload.
### Smart contract semantics
#### G-account authentication
-The algorithm for verifying detached (non-`SOURCE_ACCOUNT`) smart contract authorization payload in Soroban host is updated to enable delegated account support. The authentication is updated in the following way:
-
-- `get_delegated_signers_for_current_auth_check` host function (defined in CAP-71) is used to retrieve all the delegated signers corresponding to the authentication
-- If the total number of signatures and delegated signers is `0` or exceeds `MAX_ACCOUNT_SIGNATURES` (20), fail
-- If the delegated signers are not sorted in the ascending order, or contain duplicates, fail
+The algorithm for verifying detached (non-`SOURCE_ACCOUNT`) smart contract
+authorization payload in Soroban host is updated to enable delegated account
+support. The authentication is updated in the following way:
+
+- `get_delegated_signers_for_current_auth_check` host function (defined in
+ CAP-71) is used to retrieve all the delegated signers corresponding to the
+ authentication
+- If the total number of signatures and delegated signers is `0` or exceeds
+ `MAX_ACCOUNT_SIGNATURES` (20), fail
+- If the delegated signers are not sorted in the ascending order, or contain
+ duplicates, fail
- If any delegated signer is not stored in the `AccountEntry`, fail
-- Call `delegate_account_auth` for every delegated signer, and fail if the call fails
+- Call `delegate_account_auth` for every delegated signer, and fail if the call
+ fails
- Add the weight of every delegated signer to the signature weight
-- Process the regular signatures passed with the authorization entries according to [CAP-46-11](./cap-0046-11.md#stellar-account-authentication) and add their weights to the total weight
-- Compare the total signature weight to the required threshold (`MEDIUM` or `HIGH`, see details below)
+- Process the regular signatures passed with the authorization entries
+ according to [CAP-46-11](./cap-0046-11.md#stellar-account-authentication) and
+ add their weights to the total weight
+- Compare the total signature weight to the required threshold (`MEDIUM` or
+ `HIGH`, see details below)
-An additional change is made in order to use the proper signature threshold for the account management. Currently, G-account authentication rules in Soroban use `MEDIUM` threshold when authenticating an account for any Soroban operation, i.e. the built-in G-account contract ignores the authorization context completely.
+An additional change is made in order to use the proper signature threshold for
+the account management. Currently, G-account authentication rules in Soroban
+use `MEDIUM` threshold when authenticating an account for any Soroban
+operation, i.e. the built-in G-account contract ignores the authorization
+context completely.
-With this CAP, if any contract call on a G-account is present in authorization context, then the threshold requirement will be raised to `HIGH`.
+With this CAP, if any contract call on a G-account is present in authorization
+context, then the threshold requirement will be raised to `HIGH`.
#### G-account contract (GAC)
-Every G-account on-chain gets an implicit contract 'instance' which is just represented by the account entry itself. The contract will be called GAC further for simplicity.
+Every G-account on-chain gets an implicit contract 'instance' which is just
+represented by the account entry itself. The contract will be called GAC
+further for simplicity.
-When a contract call is performed on a G-address, the implementation of GAC built into host will handle the call. This is similar to the Stellar Asset contract handling (SAC), with the only difference being that a non-contract address is being used for routing the calls.
+When a contract call is performed on a G-address, the implementation of GAC
+built into host will handle the call. This is similar to the Stellar Asset
+contract handling (SAC), with the only difference being that a non-contract
+address is being used for routing the calls.
#### `AccountEntry` updates from Soroban host
-All GAC operations have to update the `AccountEntry` that belongs to the corresponding G-account. Updates to the signers require following the base reserve semantics in the same fashion as for the `SetOptions` operation.
+All GAC operations have to update the `AccountEntry` that belongs to the
+corresponding G-account. Updates to the signers require following the base
+reserve semantics in the same fashion as for the `SetOptions` operation.
-When a signer is added to the `AccountEntry`, the number of account sub-entries is increased, so the base reserve has to go up. If the account does not have sufficient balance for increasing the base reserve, then the function call will fail. Sponsorship is not supported for the Soroban operations in general, so there is no way to sponsor the base reserve instead.
+When a signer is added to the `AccountEntry`, the number of account sub-entries
+is increased, so the base reserve has to go up. If the account does not have
+sufficient balance for increasing the base reserve, then the function call will
+fail. Sponsorship is not supported for the Soroban operations in general, so
+there is no way to sponsor the base reserve instead.
-When a signer is removed from the `AccountEntry`, it might be sponsored. If there is no sponsorship, then the sub-entries count is just reduced for the entry. If there is a sponsorship, then information is updated accordingly in both the affected account and its sponsor, i.e. base reserve is returned to the sponsor. This update does not require additional authorization from the sponsor, so it can be performed by the Soroban Host by just changing 2 account entries accordingly.
+When a signer is removed from the `AccountEntry`, it might be sponsored. If
+there is no sponsorship, then the sub-entries count is just reduced for the
+entry. If there is a sponsorship, then information is updated accordingly in
+both the affected account and its sponsor, i.e. base reserve is returned to the
+sponsor. This update does not require additional authorization from the
+sponsor, so it can be performed by the Soroban Host by just changing 2 account
+entries accordingly.
##### GAC functions
-Every GAC function calls `require_auth` for the corresponding G-account. Authentication procedure will require `HIGH` signature threshold, as per G-account authentication semantics described in the authentication [section](#g-account-authentication).
+Every GAC function calls `require_auth` for the corresponding G-account.
+Authentication procedure will require `HIGH` signature threshold, as per
+G-account authentication semantics described in the authentication
+[section](#g-account-authentication).
The following sections describe semantics of all the GAC functions.
###### `update_ed25519_signer`
-Adds a new ed25519 signer with the provided 32-byte public key to the account. If the signer already exists, updates its weight instead.
+Adds a new ed25519 signer with the provided 32-byte public key to the account.
+If the signer already exists, updates its weight instead.
-Fails if a new signer is being added and an account already has `MAX_SIGNERS` (20) signers.
+Fails if a new signer is being added and an account already has `MAX_SIGNERS`
+(20) signers.
###### `remove_ed25519_signer`
@@ -214,9 +307,11 @@ Fails if the signer does not exist.
###### `update_delegated_signer`
-Adds a new delegated signer with the provided `Address` and the provided weight. If the signer already exists, updates the weight instead.
+Adds a new delegated signer with the provided `Address` and the provided
+weight. If the signer already exists, updates the weight instead.
-Fails if a new signer is being added and an account already has `MAX_SIGNERS` (20) signers.
+Fails if a new signer is being added and an account already has `MAX_SIGNERS`
+(20) signers.
###### `remove_delegated_signer`
@@ -226,39 +321,85 @@ Fails if the signer does not exist.
###### `set_master_weight`
-Sets the weight of the 'master' key, i.e. the public key that identifies the account.
+Sets the weight of the 'master' key, i.e. the public key that identifies the
+account.
###### `update_thresholds`
-Updates the signature thresholds of the account when the corresponding arguments are set for the low/medium/high thresholds.
+Updates the signature thresholds of the account when the corresponding
+arguments are set for the low/medium/high thresholds.
## Design Rationale
### Implicit account contracts
-There is no need to explicitly created a contract instance for the G-accounts. This reduces the complexity compared to the Stellar Asset contract and reduces the necessary ledger state size. While this approach means that there is no way to opt out from the account being accessible via GAC, the account implementation has the same authorization requirements as the existing account management operation (`SetOptionsOp`), so there is no additional risk surface or cost induced by the implicit GAC instances.
+There is no need to explicitly created a contract instance for the G-accounts.
+This reduces the complexity compared to the Stellar Asset contract and reduces
+the necessary ledger state size. While this approach means that there is no way
+to opt out from the account being accessible via GAC, the account
+implementation has the same authorization requirements as the existing account
+management operation (`SetOptionsOp`), so there is no additional risk surface
+or cost induced by the implicit GAC instances.
### Account base reserve management from Soroban
-Until now, Soroban used to only modify a few fields in the 'classic' entries, which haven't required making any changes to the account base reserves and sponsorships. This CAP introduces modifications that require updating the base reserves and sponsorships from Soroban.
-
-This has a somewhat non-intuitive consequence of Soroban authorization being used to modify XLM balance of the account for the fee purposes, which hasn't been the case before. However, the behavior is consistent with the classic protocol itself, so in the end it's unlikely to cause significant confusion for the users.
-
-There is also no full feature parity with the classic protocol, as using the sponsorships is not possible in Soroban, and thus the specific account that entries are being added to has to hold XLM balance.
-
-These issues could be avoided by moving the new signers to a separate contract data entry that is subject to the State Archival. The sponsorships removals would still need to be performed on the Soroban side (unless we forfeit `remove_ed25519_signer` function). This approach allows anyone to pay fee for creating the new entries. However, the downside of the contract data approach as it requires much more Soroban semantics support in the 'classic' part of protocol vs the amount of the 'classic' support in Soroban proposed in this CAP. State archival would affect every operation that works with the account signers (including the transaction validation). Since it's not cheap to tell if an entry is archived, it is not possible to provide a clear error to the users in case if the entry is archived without introducing the DOS risks. The change scope is just generally larger and thus more risky, and wallets would have a harder time adapting to the extended account structure.
+Until now, Soroban used to only modify a few fields in the 'classic' entries,
+which haven't required making any changes to the account base reserves and
+sponsorships. This CAP introduces modifications that require updating the base
+reserves and sponsorships from Soroban.
+
+This has a somewhat non-intuitive consequence of Soroban authorization being
+used to modify XLM balance of the account for the fee purposes, which hasn't
+been the case before. However, the behavior is consistent with the classic
+protocol itself, so in the end it's unlikely to cause significant confusion for
+the users.
+
+There is also no full feature parity with the classic protocol, as using the
+sponsorships is not possible in Soroban, and thus the specific account that
+entries are being added to has to hold XLM balance.
+
+These issues could be avoided by moving the new signers to a separate contract
+data entry that is subject to the State Archival. The sponsorships removals
+would still need to be performed on the Soroban side (unless we forfeit
+`remove_ed25519_signer` function). This approach allows anyone to pay fee for
+creating the new entries. However, the downside of the contract data approach
+as it requires much more Soroban semantics support in the 'classic' part of
+protocol vs the amount of the 'classic' support in Soroban proposed in this
+CAP. State archival would affect every operation that works with the account
+signers (including the transaction validation). Since it's not cheap to tell if
+an entry is archived, it is not possible to provide a clear error to the users
+in case if the entry is archived without introducing the DOS risks. The change
+scope is just generally larger and thus more risky, and wallets would have a
+harder time adapting to the extended account structure.
### CAP-71 dependency
-This CAP benefits from the authentication mechanism introduced in [CAP-71](./cap-0071.md). It allows using a single authorization entry for an account with delegated signers and simplifies the simulation and overall developer experience.
+This CAP benefits from the authentication mechanism introduced in
+[CAP-71](./cap-0071.md). It allows using a single authorization entry for an
+account with delegated signers and simplifies the simulation and overall
+developer experience.
-This CAP could technically be implemented without CAP-71 by using the existing delegation approach (described in CAP-71 [motivation](./cap-0071.md#motivation) section). However, that would come with a much worse developer experience, and is also hard to change going forward. That's why CAP-71 is a pre-requisite for this CAP.
+This CAP could technically be implemented without CAP-71 by using the existing
+delegation approach (described in CAP-71 [motivation](./cap-0071.md#motivation)
+section). However, that would come with a much worse developer experience, and
+is also hard to change going forward. That's why CAP-71 is a pre-requisite for
+this CAP.
## Security Concerns
-A new way to modify G-account settings is introduced. As usual for the sensitive authorization-related code, the main risk lies in the implementation correctness, thus it has to be reviewed and tested thoroughly. However, the design itself does not introduce any new risks from the protocol standpoint: the new account interface has the same high signature threshold as the `SetOptions` operation and only allows performing modifications that are a subset of `SetOptions`.
+A new way to modify G-account settings is introduced. As usual for the
+sensitive authorization-related code, the main risk lies in the implementation
+correctness, thus it has to be reviewed and tested thoroughly. However, the
+design itself does not introduce any new risks from the protocol standpoint:
+the new account interface has the same high signature threshold as the
+`SetOptions` operation and only allows performing modifications that are a
+subset of `SetOptions`.
-Adding delegated, contract-based signers comes with a risk for a user: every contract signer added to the account has to be a trusted and audited contract. This risk exists for any contract-based account in general. The users don't have to add contract signers to the account - if they don't do that, they are not subject to any new risks.
+Adding delegated, contract-based signers comes with a risk for a user: every
+contract signer added to the account has to be a trusted and audited contract.
+This risk exists for any contract-based account in general. The users don't
+have to add contract signers to the account - if they don't do that, they are
+not subject to any new risks.
## Test Cases
@@ -266,4 +407,4 @@ TBD
## Implementation
-TBD
\ No newline at end of file
+TBD
diff --git a/core/cap-0073.md b/core/cap-0073.md
index 03d980903..e651ac566 100644
--- a/core/cap-0073.md
+++ b/core/cap-0073.md
@@ -13,7 +13,8 @@ Protocol version: TBD
## Simple Summary
-Add Stellar Account contract (SAC) functions for managing trustlines of Stellar (G-)accounts and create new G-accounts on XLM transfer.
+Add Stellar Account contract (SAC) functions for managing trustlines of Stellar
+(G-)accounts and create new G-accounts on XLM transfer.
## Working Group
@@ -21,26 +22,49 @@ As specified in the Preamble.
## Motivation
-Stellar Asset contract allows moving the Stellar assets between any accounts (both classic G-accounts and custom C-accounts), but Soroban doesn't provide a way to create a new G-account balance. This behavior is not consistent with the contract (C-)accounts, for which the balances are created automatically on the first transfer.
-
-The feature imparity is tolerable to some degree, because a G-account can create a trustline via a classic operation, and a new G-account itself can be created via a separate operation. While the workaround provides a way for using SAC for G-accounts after some 'classic'-side setup, it comes with some issues, such as:
-
-- It's not possible to create an trustline and perform a SAC transfer to it atomically as classic operations can't be mixed with Soroban operations
-- It's tricky for Soroban-based apps to provide programmatic support for the missing trustlines, as bespoke processing is necessary. The 'simplest' option is to bubble up the error and ask the user to establish a trustline, and even that is not trivial to support
-
-By allowing the trustline and account creation from within Soroban the protocol will resolve these feature gaps and improve the user and developer experience for any use case that needs to deal with SAC.
+Stellar Asset contract allows moving the Stellar assets between any accounts
+(both classic G-accounts and custom C-accounts), but Soroban doesn't provide a
+way to create a new G-account balance. This behavior is not consistent with the
+contract (C-)accounts, for which the balances are created automatically on the
+first transfer.
+
+The feature imparity is tolerable to some degree, because a G-account can
+create a trustline via a classic operation, and a new G-account itself can be
+created via a separate operation. While the workaround provides a way for using
+SAC for G-accounts after some 'classic'-side setup, it comes with some issues,
+such as:
+
+- It's not possible to create an trustline and perform a SAC transfer to it
+ atomically as classic operations can't be mixed with Soroban operations
+- It's tricky for Soroban-based apps to provide programmatic support for the
+ missing trustlines, as bespoke processing is necessary. The 'simplest' option
+ is to bubble up the error and ask the user to establish a trustline, and even
+ that is not trivial to support
+
+By allowing the trustline and account creation from within Soroban the protocol
+will resolve these feature gaps and improve the user and developer experience
+for any use case that needs to deal with SAC.
### Goals Alignment
This CAP is aligned with the following Stellar Network Goals:
- - The Stellar Network should make it easy for developers of Stellar projects to create highly usable products
+- The Stellar Network should make it easy for developers of Stellar projects to
+ create highly usable products
## Abstract
-This CAP introduces a new Stellar Asset contract function `trust` for creating trustlines. The `trust` function creates an unlimited trustline for a G-account if it does not already exist. The trustline creation semantics are the same as for the existing `ChangeTrustOp`. Account authorization is required for actually creating the trustline, but not for calling `trust`.
+This CAP introduces a new Stellar Asset contract function `trust` for creating
+trustlines. The `trust` function creates an unlimited trustline for a G-account
+if it does not already exist. The trustline creation semantics are the same as
+for the existing `ChangeTrustOp`. Account authorization is required for
+actually creating the trustline, but not for calling `trust`.
-This also updates the `transfer` and `transfer_from` functions specifically for the 'native' asset (i.e. for the XLM SAC). When an XLM transfer is coming to a G-address that doesn't have a corresponding account entry yet, the account will be created as long as the transfer amount covers the minimum allowed account balance.
+This also updates the `transfer` and `transfer_from` functions specifically for
+the 'native' asset (i.e. for the XLM SAC). When an XLM transfer is coming to a
+G-address that doesn't have a corresponding account entry yet, the account will
+be created as long as the transfer amount covers the minimum allowed account
+balance.
## Specification
@@ -54,7 +78,7 @@ The following new functions are added to the Stellar Asset contract:
/// This is no-op if the input address is a C-address, or if provided G-address
/// already has the respective trustline.
///
-/// If the trustline is actually created, this will require
+/// If the trustline is actually created, this will require
/// authorization from `address` (i.e. `address.require_auth` will be called).
///
/// Panics only during trustline creation if the asset issuer does not exist, or
@@ -66,58 +90,110 @@ fn trust(env: Env, address: Address);
#### `trust` function
-When `trust` SAC function creates a new trustline, it follows the semantics of `ChangeTrustOp` with the respective input arguments. There are only a few Soroban-specified adjustments:
-
-- The asset for which the trustline is managed is implied by the SAC instance for which `trust` is called
- - Thus, unlike for `ChangeTrustOp`, only the regular `Asset` trustlines can be created, as liquidity pool shares don't have a SAC associated with them
-- There is no fine control over the limits, a new trustline with limit `i64::MAX` is created if no trustline exists for a given address. Limit is not modified otherwise
+When `trust` SAC function creates a new trustline, it follows the semantics of
+`ChangeTrustOp` with the respective input arguments. There are only a few
+Soroban-specified adjustments:
+
+- The asset for which the trustline is managed is implied by the SAC instance
+ for which `trust` is called
+ - Thus, unlike for `ChangeTrustOp`, only the regular `Asset` trustlines can
+ be created, as liquidity pool shares don't have a SAC associated with them
+- There is no fine control over the limits, a new trustline with limit
+ `i64::MAX` is created if no trustline exists for a given address. Limit is
+ not modified otherwise
- Input `address` can be a C-address, but passing it is always a no-op
-- `require_auth` is called to perform `address` authorization for creating a trustline, which means that all the Soroban auth features are available
-- Sponsorship is not supported for creating a new trustline, so the address must have sufficient XLM balance in order to be able to create a new trustline
+- `require_auth` is called to perform `address` authorization for creating a
+ trustline, which means that all the Soroban auth features are available
+- Sponsorship is not supported for creating a new trustline, so the address
+ must have sufficient XLM balance in order to be able to create a new
+ trustline
+Besides these differences, the trustline creation semantics are directly
+translated from `ChangeTrustOp`. Here is a quick summary:
-Besides these differences, the trustline creation semantics are directly translated from `ChangeTrustOp`. Here is a quick summary:
- If the asset issuer does not exist, the function panics
- If trustline does not exist, it is created with `i64::MAX` limit
- - This increases the number of sub-entries (and thus base reserve) of the account that owns the trustline and thus is subject to the existing protocol limitations for creating the classic sub-entries
- - Sponsorship is not compatible with Soroban and thus base reserve may only belong to the trustline owner
-- Creating a trustline to account's own asset will result in panic
-
+ - This increases the number of sub-entries (and thus base reserve) of the
+ account that owns the trustline and thus is subject to the existing
+ protocol limitations for creating the classic sub-entries
+ - Sponsorship is not compatible with Soroban and thus base reserve may only
+ belong to the trustline owner
+- Creating a trustline to account's own asset will result in panic
#### XLM transfer creates `AccountEntry`
-When an XLM transfer is performed to a G-address that does not exist yet (i.e. it does not have a respective `AccountEntry`), Soroban host will attempt to create a new `AccountEntry` for it. Specifically, it will create a new `AccountEntry` with the full transfer amount as balance if the transfer amount is at least the minimum account balance (1 XLM as of this CAP). Otherwise, it will panic.
-If the transfer destination is M-address, its G-address part will be considered to perform the logic described above.
+When an XLM transfer is performed to a G-address that does not exist yet (i.e.
+it does not have a respective `AccountEntry`), Soroban host will attempt to
+create a new `AccountEntry` for it. Specifically, it will create a new
+`AccountEntry` with the full transfer amount as balance if the transfer amount
+is at least the minimum account balance (1 XLM as of this CAP). Otherwise, it
+will panic. If the transfer destination is M-address, its G-address part will
+be considered to perform the logic described above.
-This change affects both SAC functions that perform transfers: `transfer` and `transfer_from`.
+This change affects both SAC functions that perform transfers: `transfer` and
+`transfer_from`.
-The newly created account will immediately become available to interact with, including using it to perform authorization via `require_auth`.
+The newly created account will immediately become available to interact with,
+including using it to perform authorization via `require_auth`.
## Design Rationale
### Account base reserve management from Soroban
-This CAP introduces a way to create and remove the classic trustline entries from Soroban. Most of the design coincide with those for the G-account signer management in [CAP-72](./cap-0072.md#account-base-reserve-management-from-soroban). Signers could in theory be stored in a contract data entry and that would require a relatively small amount of changes. However, if Soroban created trustlines only as contract data entries, then almost every classic operation would need to be modified to account for these entries. Thus, that alternative is even less feasible than in the case of CAP-72.
+This CAP introduces a way to create and remove the classic trustline entries
+from Soroban. Most of the design coincide with those for the G-account signer
+management in
+[CAP-72](./cap-0072.md#account-base-reserve-management-from-soroban). Signers
+could in theory be stored in a contract data entry and that would require a
+relatively small amount of changes. However, if Soroban created trustlines only
+as contract data entries, then almost every classic operation would need to be
+modified to account for these entries. Thus, that alternative is even less
+feasible than in the case of CAP-72.
### `trust` function vs automatic trustline creation
-Transfers to accounts without a trustline could initiate trustline creation in a similar fashion to the account entry creation in case of XLM SAC. However, that would likely result in confusing user experience: an additional signature from the transfer _receiver_ would be required (in order to approve the trustline creation). It's not generally expected for the transfer to expect the receiver signature, and that scenario would also happen very rarely in a general case, so there is a high chance for developers to miss it.
+Transfers to accounts without a trustline could initiate trustline creation in
+a similar fashion to the account entry creation in case of XLM SAC. However,
+that would likely result in confusing user experience: an additional signature
+from the transfer _receiver_ would be required (in order to approve the
+trustline creation). It's not generally expected for the transfer to expect the
+receiver signature, and that scenario would also happen very rarely in a
+general case, so there is a high chance for developers to miss it.
-`trust` function allows contract developers to explicitly request trustline creation from the users. That requires a conscious effort to support the trustline creation behavior, which reduces the chance of the additional required signature payload to go unnoticed.
+`trust` function allows contract developers to explicitly request trustline
+creation from the users. That requires a conscious effort to support the
+trustline creation behavior, which reduces the chance of the additional
+required signature payload to go unnoticed.
### `trust` function granularity
-Over the last year over 98% of the active trustlines had limit over 1e18 (effectively unlimited, 95+% of trustlines are also at int64 max), so unlimited trust is going to be sufficient most of the time. For the remaining small amount of cases, it is not likely that there is a good generalized approach to managing the exact trustline limit on the contract side, so it's not obvious that a more granular function would actually end up being useful.
+Over the last year over 98% of the active trustlines had limit over 1e18
+(effectively unlimited, 95+% of trustlines are also at int64 max), so unlimited
+trust is going to be sufficient most of the time. For the remaining small
+amount of cases, it is not likely that there is a good generalized approach to
+managing the exact trustline limit on the contract side, so it's not obvious
+that a more granular function would actually end up being useful.
### No trustline creation events
-There is no protocol-defined event for the trustline creation and thus `trust` function will not emit any events.
+There is no protocol-defined event for the trustline creation and thus `trust`
+function will not emit any events.
### Muxed account support
-Since `transfer` function has support for muxed destinations, we need to take that into account for the purpose of this CAP. We create an underlying G-account in order to keep the semantics consistent for all kind of transfers (muxed id part normally has no impact on the on-chain contract logic beyond the events). The transfer will then proceed to emit a regular transfer event with `to_muxed_id` populated according to the existing semantics.
+Since `transfer` function has support for muxed destinations, we need to take
+that into account for the purpose of this CAP. We create an underlying
+G-account in order to keep the semantics consistent for all kind of transfers
+(muxed id part normally has no impact on the on-chain contract logic beyond the
+events). The transfer will then proceed to emit a regular transfer event with
+`to_muxed_id` populated according to the existing semantics.
-In case of the new `trust` function, muxed addresses are not supported. Even though trustline creation may be necessary to be performed before the actual transfer, there is no reason to pass a muxed address into the creation function itself. There is no event to be emitted that would contain the muxed id, and that would be generally inconsistent with the SAC interface that only supports muxed destination specifically for transfers.
+In case of the new `trust` function, muxed addresses are not supported. Even
+though trustline creation may be necessary to be performed before the actual
+transfer, there is no reason to pass a muxed address into the creation function
+itself. There is no event to be emitted that would contain the muxed id, and
+that would be generally inconsistent with the SAC interface that only supports
+muxed destination specifically for transfers.
### Backwards Incompatibilities
@@ -125,7 +201,9 @@ This CAP does not introduce backward incompatibilities.
### Resource Utilization
-The new contract functions will have the appropriate metering. No new cost types need to be introduced, as the operations can lean on the existing metering primitives.
+The new contract functions will have the appropriate metering. No new cost
+types need to be introduced, as the operations can lean on the existing
+metering primitives.
## Security Concerns
@@ -136,4 +214,3 @@ TBD
## Implementation
TBD
-
diff --git a/core/cap-0074.md b/core/cap-0074.md
index 4dcbefd65..1ecd44bcd 100644
--- a/core/cap-0074.md
+++ b/core/cap-0074.md
@@ -14,27 +14,37 @@ Protocol version: 24
```
## Simple Summary
-BN254 is a pairing-friendly elliptic curve that has wide support in the ecosystem, and CAP proposes a set of new host functions to provide native support for this curve.
+
+BN254 is a pairing-friendly elliptic curve that has wide support in the
+ecosystem, and CAP proposes a set of new host functions to provide native
+support for this curve.
## Working Group
As described in the preamble section.
## Motivation
-While Stellar has native support for BLS12-381, many existing applications still rely on BN254 because it's the only pairing friendly elliptic curve with native support in the EVM. Adding BN254 support will allow those applications to add support for Soroban without the need to use a different curve.
+
+While Stellar has native support for BLS12-381, many existing applications
+still rely on BN254 because it's the only pairing friendly elliptic curve with
+native support in the EVM. Adding BN254 support will allow those applications
+to add support for Soroban without the need to use a different curve.
### Goals Alignment
+
This CAP is aligned with the following Stellar Network Goals:
- - The Stellar Network should make it easy for developers of Stellar projects
- to create highly usable products
+- The Stellar Network should make it easy for developers of Stellar projects to
+ create highly usable products
## Abstract
+
Three new host functions are proposed for performing curve operations on BN254.
## Specification
### New host functions
+
```
{
"export": "m",
@@ -90,6 +100,7 @@ Three new host functions are proposed for performing curve operations on BN254.
```
### XDR changes
+
```
diff --git a/Stellar-contract-config-setting.x b/Stellar-contract-config-setting.x
index b075c6b4f..36abfeb7c 100644
@@ -123,7 +134,7 @@ index b075c6b4f..36abfeb7c 100644
+ // Cost of converting a BN254 scalar element from U256
+ Bn254FrFromU256 = 79
};
-
+
struct ContractCostParamEntry {
```
@@ -131,105 +142,202 @@ index b075c6b4f..36abfeb7c 100644
#### Field and groups
-`fp` - field element in the base field. Encoding rule: big-endian encoding of the underlying unsigned 32-byte integer. `fp` cannot be larger than the field modulus `0x30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47`.
+`fp` - field element in the base field. Encoding rule: big-endian encoding of
+the underlying unsigned 32-byte integer. `fp` cannot be larger than the field
+modulus `0x30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47`.
-`fp2`- field element in the quadratic extension of the base prime field. Encoding rule: concatenation of the two encoded-components `c1` and `c0` i.e. `be_encode(c1) || be_encode(c0)`
+`fp2`- field element in the quadratic extension of the base prime field.
+Encoding rule: concatenation of the two encoded-components `c1` and `c0` i.e.
+`be_encode(c1) || be_encode(c0)`
-`fp12` - field element in the 12-degree prime extension field. This is the output from the pairing operation. `fp12` is only used as the intermediary and encoding is not needed.
+`fp12` - field element in the 12-degree prime extension field. This is the
+output from the pairing operation. `fp12` is only used as the intermediary and
+encoding is not needed.
-`fr` - scalar. A scalar has maximum length of 32 bytes. `fr` is represented with an `U256Val`, and it can be any number between `0` and `2^{256}-1`.
+`fr` - scalar. A scalar has maximum length of 32 bytes. `fr` is represented
+with an `U256Val`, and it can be any number between `0` and `2^{256}-1`.
-`G1` - group containing points over the base prime field that satisfy the curve equation, plus point at infinity. Encoding rule: concatenation of the two encoded-coordinates (uncompressed form), each being an `fp`, i.e. `be_encode(X) || be_encode(Y)`. The point at infinity is encoded as `(0,0)`. No flag bits are reserved, i.e. the highest two bits in the first byte (`0x80` and `0x40`) must be unset.
+`G1` - group containing points over the base prime field that satisfy the curve
+equation, plus point at infinity. Encoding rule: concatenation of the two
+encoded-coordinates (uncompressed form), each being an `fp`, i.e.
+`be_encode(X) || be_encode(Y)`. The point at infinity is encoded as `(0,0)`. No
+flag bits are reserved, i.e. the highest two bits in the first byte (`0x80` and
+`0x40`) must be unset.
-`G2` - group containing points over the quadratic extension of the base prime field that satisfy the curve equation, plus the point at infinity. Encoding rule: concatenation of the two encoded-coordinates (uncompressed form), each following `fp2` encoding rule, i.e. `be_encode(X_c1) || be_encode(X_c0) || be_encode(Y_c1) || be_encode(Y_c0)`. The point at infinity is encoded as `(0,0)`. No flag bits are reserved, i.e. the highest two bits in the first byte (`0x80` and `0x40`) must be unset.
+`G2` - group containing points over the quadratic extension of the base prime
+field that satisfy the curve equation, plus the point at infinity. Encoding
+rule: concatenation of the two encoded-coordinates (uncompressed form), each
+following `fp2` encoding rule, i.e.
+`be_encode(X_c1) || be_encode(X_c0) || be_encode(Y_c1) || be_encode(Y_c0)`. The
+point at infinity is encoded as `(0,0)`. No flag bits are reserved, i.e. the
+highest two bits in the first byte (`0x80` and `0x40`) must be unset.
#### New host functions introduced
##### `bn254_g1_add`
-**Description**: perform point addition in G1.
+**Description**: perform point addition in G1.
-**Cost**: covers the cost of decoding (`Bn254DecodeFp`) and on curve check (`Bn254G1CheckPointOnCurve`) of G1 points, point addition (`Bn254G1Add`), conversion of from projective to affine space (`Bn254G1ProjectiveToAffine`), and encoding the result to bytes `Bn254EncodeFp`.
+**Cost**: covers the cost of decoding (`Bn254DecodeFp`) and on curve check
+(`Bn254G1CheckPointOnCurve`) of G1 points, point addition (`Bn254G1Add`),
+conversion of from projective to affine space (`Bn254G1ProjectiveToAffine`),
+and encoding the result to bytes `Bn254EncodeFp`.
+
+**Error condition**: if the input bytes contained in each `BytesObject` do not
+decode into valid G1 points or do not conform the specified encoding standard.
-**Error condition**: if the input bytes contained in each `BytesObject` do not decode into valid G1 points or do not conform the specified encoding standard.
- Bytes length is not equal to 64
- The point is compressed.
- Either input point does not belong on the G1 curve.
##### `bn254_g1_mul`
-**Description** perform scalar multiplication in G1.
-**Cost**: includes decoding G1 point, converting `fr` from `U256` (`Bn254FrFromU256`), point multiplication `Bn254G1Mul`, converting the point from project to affine and encoding the result into bytes.
+**Description** perform scalar multiplication in G1.
+
+**Cost**: includes decoding G1 point, converting `fr` from `U256`
+(`Bn254FrFromU256`), point multiplication `Bn254G1Mul`, converting the point
+from project to affine and encoding the result into bytes.
+
+**Error condition**: if the input `BytesObject` does not decode into a valid G1
+points or does not conform the specified encoding standard.
-**Error condition**: if the input `BytesObject` does not decode into a valid G1 points or does not conform the specified encoding standard.
- Bytes length is not equal to 64.
- The point is compressed.
- The input point does not belong on the G1 curve.
##### `bn254_multi_pairing_check`
-**Description**: performs pairing operation on a vector of `G1` and a vector of `G2` points, returns `true` if the result equals `1_fp12`, otherwise returns `false`.
-**Cost**: includes deserialization of the point vectors (in G1 and G2 respectively), cost of performing the pairing operation `Bn254Pairing`.
+**Description**: performs pairing operation on a vector of `G1` and a vector of
+`G2` points, returns `true` if the result equals `1_fp12`, otherwise returns
+`false`.
+
+**Cost**: includes deserialization of the point vectors (in G1 and G2
+respectively), cost of performing the pairing operation `Bn254Pairing`.
+
+**Error conditions**:
-**Error conditions**:
1. two input vectors have different length
2. either input vector has zero length
-3. any element in the G1 vector does not decode into a valid G1 point or does not conform the specified encoding standard.
+3. any element in the G1 vector does not decode into a valid G1 point or does
+ not conform the specified encoding standard.
+
- Bytes length is not equal to 64
- The point is compressed.
- Either input point does not belong on the G1 curve.
-4. any element in the G2 vector does not decode into a valid G2 point or does not conform the specified encoding standard.
+
+4. any element in the G2 vector does not decode into a valid G2 point or does
+ not conform the specified encoding standard.
+
- Bytes length is not equal to 128
- The point is compressed.
- Either input point does not belong on the G2 curve.
- Either input point does not belong to the correct subgroup.
#### New metering `CostType`s introduced
-- `Bn254EncodeFp` - Cost of encoding a BN254 Fp (base field element). Encoding includes the necessary conversion from the internal representation into integer form, and serialization into bytes. Type: constant.
-- `Bn254DecodeFp` - Cost of decoding a BN254 Fp (base field element). Decoding includes deserialization from bytes into integer, and the necessary conversion from the integer form to the internal representation. Type: constant.
-- `Bn254G1CheckPointOnCurve` - Cost of validating that a G1 point lies on the curve. Type: constant.
-- `Bn254G2CheckPointOnCurve` - Cost of validating that a G2 point lies on the curve. Type: constant.
-- `Bn254G2CheckPointInSubgroup` - Cost of validating that a G2 point belongs to the correct subgroup. Type: constant.
-- `Bn254G1ProjectiveToAffine` - Cost of converting a BN254 G1 point from projective to affine coordinates. Type: constant.
+
+- `Bn254EncodeFp` - Cost of encoding a BN254 Fp (base field element). Encoding
+ includes the necessary conversion from the internal representation into
+ integer form, and serialization into bytes. Type: constant.
+- `Bn254DecodeFp` - Cost of decoding a BN254 Fp (base field element). Decoding
+ includes deserialization from bytes into integer, and the necessary
+ conversion from the integer form to the internal representation. Type:
+ constant.
+- `Bn254G1CheckPointOnCurve` - Cost of validating that a G1 point lies on the
+ curve. Type: constant.
+- `Bn254G2CheckPointOnCurve` - Cost of validating that a G2 point lies on the
+ curve. Type: constant.
+- `Bn254G2CheckPointInSubgroup` - Cost of validating that a G2 point belongs to
+ the correct subgroup. Type: constant.
+- `Bn254G1ProjectiveToAffine` - Cost of converting a BN254 G1 point from
+ projective to affine coordinates. Type: constant.
- `Bn254G1Add` - Cost of performing BN254 G1 point addition. Type: constant.
-- `Bn254G1Mul` - Cost of performing BN254 G1 scalar multiplication. Type: constant.
-- `Bn254Pairing` - Cost of performing BN254 pairing operation. Type: linear w.r.t to the length of the input vectors.
-- `Bn254FrFromU256` - Cost of converting a BN254 scalar element from U256. This includes necessary conversion from the integer form to the internal representation. Type: constant.
+- `Bn254G1Mul` - Cost of performing BN254 G1 scalar multiplication. Type:
+ constant.
+- `Bn254Pairing` - Cost of performing BN254 pairing operation. Type: linear
+ w.r.t to the length of the input vectors.
+- `Bn254FrFromU256` - Cost of converting a BN254 scalar element from U256. This
+ includes necessary conversion from the integer form to the internal
+ representation. Type: constant.
## Design Rationale
### Function list choice
-The list of host functions give Stellar parity with Ethereum. Specifically, we're adding support for the precompiles specified in [EIP-196](https://eips.ethereum.org/EIPS/eip-196) and [EIP-197](https://eips.ethereum.org/EIPS/eip-197).
+
+The list of host functions give Stellar parity with Ethereum. Specifically,
+we're adding support for the precompiles specified in
+[EIP-196](https://eips.ethereum.org/EIPS/eip-196) and
+[EIP-197](https://eips.ethereum.org/EIPS/eip-197).
#### `U256Val` for scalar
-All field and group elements mentioned in the host functions are represented as `BytesObject`, with encoding rule specified in [fields and groups](#field-and-groups), except for the scalar element. Since the scalar can be up to 32-bytes, with the same semantics as an unsigned integer, using `U256Val` is the natural choice. Extracting bytes in the correct format can be tricky and error prone, depending on the underlying implementation. Using `U256Val` also takes advantage of the internal small value optimization which reduces storage space for small numbers, see [value type repertoire](cap-0046-01.md/#rationale-for-value-and-object-type-repertoires).
+
+All field and group elements mentioned in the host functions are represented as
+`BytesObject`, with encoding rule specified in
+[fields and groups](#field-and-groups), except for the scalar element. Since
+the scalar can be up to 32-bytes, with the same semantics as an unsigned
+integer, using `U256Val` is the natural choice. Extracting bytes in the correct
+format can be tricky and error prone, depending on the underlying
+implementation. Using `U256Val` also takes advantage of the internal small
+value optimization which reduces storage space for small numbers, see
+[value type repertoire](cap-0046-01.md/#rationale-for-value-and-object-type-repertoires).
#### Encode/Decode
-The only two cost types representing encoding/decoding are of the base field element `Bn254EncodeFp` and `Bn254DecodeFp`, since all field and group elements can be composed of the base elements. (En)Decoding `G1` is (en)decoding two `fp` separately, same for `fp2`. `G2` contains two `fp2`, that are (en)decoded separately.
+
+The only two cost types representing encoding/decoding are of the base field
+element `Bn254EncodeFp` and `Bn254DecodeFp`, since all field and group elements
+can be composed of the base elements. (En)Decoding `G1` is (en)decoding two
+`fp` separately, same for `fp2`. `G2` contains two `fp2`, that are (en)decoded
+separately.
#### No G1 subgroup check
-If the point is on the G1 curve, then it also belongs to the G1 subgroup (https://hackmd.io/@jpw/bn254#Subgroup-check-for-mathbb-G_1), so there's no need for a G1 subgroup check. You can also see that the
-[arkworks](https://github.com/arkworks-rs/algebra/blob/c6f9284c17df00c50d954a5fe1c72dd4a5698103/curves/bn254/src/curves/g1.rs#L61) library always returns true for the subgroup check.
+
+If the point is on the G1 curve, then it also belongs to the G1 subgroup
+(https://hackmd.io/@jpw/bn254#Subgroup-check-for-mathbb-G_1), so there's no
+need for a G1 subgroup check. You can also see that the
+[arkworks](https://github.com/arkworks-rs/algebra/blob/c6f9284c17df00c50d954a5fe1c72dd4a5698103/curves/bn254/src/curves/g1.rs#L61)
+library always returns true for the subgroup check.
## Protocol Upgrade Transition
-The proposed host functions will become available protocol 24, i.e. with `"min_supported_protocol": 24` in the interface definition. For `protocol_version <= 23`, attempting to import any of these function definitions in the WASM will lead to a linking error during Vm instantiation time.
+
+The proposed host functions will become available protocol 24, i.e. with
+`"min_supported_protocol": 24` in the interface definition. For
+`protocol_version <= 23`, attempting to import any of these function
+definitions in the WASM will lead to a linking error during Vm instantiation
+time.
### Backwards Incompatibilities
+
This CAP does not introduce any backward incompatibilities.
### Resource Utilization
-The performance impact of the new host functions have been captured by the new `CostType` described above. The cpu and memory consumption need to be calibrated carefully on each new `CostType` to ensure that the cost of running BN host functions are metered properly and subject to the network limits. Final calibration numbers are TBD.
+
+The performance impact of the new host functions have been captured by the new
+`CostType` described above. The cpu and memory consumption need to be
+calibrated carefully on each new `CostType` to ensure that the cost of running
+BN host functions are metered properly and subject to the network limits. Final
+calibration numbers are TBD.
## Security Concerns
+
The main security concerns include
-- Logic correctness. The proposed set of functions cover a wide range of cryptographic operations, which rely on correctness of 3rd party implementations. Incorrect implementation or failure to cover certain corner case potentially be exploitable vulnerabilities.
-- Denial of service. Since the proposed operations are computationally intensive, failure to properly calibrate any part, or to properly account for an extra-expensive path, could lead to the actual computation time significantly exceeding the metered costs, thus potentially lead to denial of service.
+
+- Logic correctness. The proposed set of functions cover a wide range of
+ cryptographic operations, which rely on correctness of 3rd party
+ implementations. Incorrect implementation or failure to cover certain corner
+ case potentially be exploitable vulnerabilities.
+- Denial of service. Since the proposed operations are computationally
+ intensive, failure to properly calibrate any part, or to properly account for
+ an extra-expensive path, could lead to the actual computation time
+ significantly exceeding the metered costs, thus potentially lead to denial of
+ service.
- Curve security - the curve no longer offers 128-bit security.
## Test Cases
-1. Added tests from the ethereum-go repo [here](https://github.com/stellar/rs-soroban-env/blob/cf58d535ab05d02802a5e804a95524650f8c62c7/soroban-env-host/src/test/bn254.rs#L993).
-2. Added test from the evm precompile docs [here](https://github.com/stellar/rs-soroban-env/blob/cf58d535ab05d02802a5e804a95524650f8c62c7/soroban-env-host/src/test/bn254.rs#L772C1-L772C42).
+
+1. Added tests from the ethereum-go repo
+ [here](https://github.com/stellar/rs-soroban-env/blob/cf58d535ab05d02802a5e804a95524650f8c62c7/soroban-env-host/src/test/bn254.rs#L993).
+2. Added test from the evm precompile docs
+ [here](https://github.com/stellar/rs-soroban-env/blob/cf58d535ab05d02802a5e804a95524650f8c62c7/soroban-env-host/src/test/bn254.rs#L772C1-L772C42).
## Implementation
-TODO
\ No newline at end of file
+TODO
diff --git a/core/cap-0075.md b/core/cap-0075.md
index 5426eae4b..67a4b31ee 100644
--- a/core/cap-0075.md
+++ b/core/cap-0075.md
@@ -15,7 +15,8 @@ Protocol version: 24
## Simple Summary
-This CAP proposes host functions for Poseidon and Poseidon2 permutation primitives.
+This CAP proposes host functions for Poseidon and Poseidon2 permutation
+primitives.
## Working Group
@@ -23,25 +24,38 @@ TBD
## Motivation
-Poseidon is a family of hash functions designed specifically for efficient zero-knowledge proof systems, due to its operations being native to the prime fields. Even though proofs are generated off-chain (and proof verification does not require rehashing), having consistent hash functions in the contract logic with the prove-system choice is crucial. Supporting Poseidon as host functions in Soroban can facilitate adoption of ZK applications and interoperability with other ecosystems.
-
+Poseidon is a family of hash functions designed specifically for efficient
+zero-knowledge proof systems, due to its operations being native to the prime
+fields. Even though proofs are generated off-chain (and proof verification does
+not require rehashing), having consistent hash functions in the contract logic
+with the prove-system choice is crucial. Supporting Poseidon as host functions
+in Soroban can facilitate adoption of ZK applications and interoperability with
+other ecosystems.
### Goals Alignment
This CAP is aligned with the following Stellar Network Goals:
-- The Stellar Network should make it easy for developers of Stellar projects to create highly usable products
-- The Stellar Network should facilitate simplicity and interoperability with other protocols and networks
+- The Stellar Network should make it easy for developers of Stellar projects to
+ create highly usable products
+- The Stellar Network should facilitate simplicity and interoperability with
+ other protocols and networks
## Abstract
-This CAP adds two host functions implementing the Poseidon and Poseidon2 permutation primitives. These primitives operate on field elements from BLS12-381 Fr or BN254 Fr scalar fields. The permutation functions accept configurable parameters, allowing developers to construct hash functions tailored to specific use cases, thus maintaining interoperability with other ZK systems.
+This CAP adds two host functions implementing the Poseidon and Poseidon2
+permutation primitives. These primitives operate on field elements from
+BLS12-381 Fr or BN254 Fr scalar fields. The permutation functions accept
+configurable parameters, allowing developers to construct hash functions
+tailored to specific use cases, thus maintaining interoperability with other ZK
+systems.
## Specification
### New host functions
-Two new functions with export names `p` and `q` in the crypto module (`c`) are added to the Soroban environment's exported interface.
+Two new functions with export names `p` and `q` in the crypto module (`c`) are
+added to the Soroban environment's exported interface.
```json
{
@@ -110,48 +124,70 @@ diff --git a/Stellar-contract-config-setting.x b/Stellar-contract-config-setting
#### Field Selection
The `field` parameter specifies the scalar field:
-- `0`: BLS12-381 Fr (scalar field order r = 0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001)
-- `1`: BN254 Fr (scalar field order r = 0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001)
-Scalar field elements are represented as `U256Val` objects as specified in CAP-0059.
+- `0`: BLS12-381 Fr (scalar field order r =
+ 0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001)
+- `1`: BN254 Fr (scalar field order r =
+ 0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001)
+
+Scalar field elements are represented as `U256Val` objects as specified in
+CAP-0059.
#### Permutation Parameters
-Both Poseidon and Poseidon2 are based on the HADES design strategy, which alternates between full rounds and partial rounds:
+Both Poseidon and Poseidon2 are based on the HADES design strategy, which
+alternates between full rounds and partial rounds:
-- `t`: State size. The internal state contains `t` field elements. Common values are 2, 3, 4, or 5 depending on the use case (e.g., binary tree hashing uses t=3).
+- `t`: State size. The internal state contains `t` field elements. Common
+ values are 2, 3, 4, or 5 depending on the use case (e.g., binary tree hashing
+ uses t=3).
-- `d`: S-box degree. The S-box is defined as S(x) = x^d. For BLS12-381 and BN254, d=5 is standard. Poseidon2 supports additional S-box degrees (3, 7, 11).
+- `d`: S-box degree. The S-box is defined as S(x) = x^d. For BLS12-381 and
+ BN254, d=5 is standard. Poseidon2 supports additional S-box degrees (3, 7,
+ 11).
-- `rounds_f`: Number of full rounds. Must be even. In full rounds, the S-box is applied to all `t` state elements.
+- `rounds_f`: Number of full rounds. Must be even. In full rounds, the S-box is
+ applied to all `t` state elements.
-- `rounds_p`: Number of partial rounds. In partial rounds, the S-box is applied to only one state element.
+- `rounds_p`: Number of partial rounds. In partial rounds, the S-box is applied
+ to only one state element.
The total number of rounds is `rounds_f + rounds_p`.
#### Matrix Parameters
-**Poseidon** uses an MDS (Maximum Distance Separable) matrix for the linear layer:
+**Poseidon** uses an MDS (Maximum Distance Separable) matrix for the linear
+layer:
-- `mds`: A `t`-by-`t` MDS matrix represented as `Vec>`. The matrix is typically a Cauchy matrix. During each round, the state vector is multiplied by this matrix.
+- `mds`: A `t`-by-`t` MDS matrix represented as `Vec>`. The matrix
+ is typically a Cauchy matrix. During each round, the state vector is
+ multiplied by this matrix.
**Poseidon2** uses an optimized internal linear layer:
-- `mat_internal_diag_m_1`: Internal matrix diagonal minus 1, represented as `Vec` of length `t`. Poseidon2 replaces the MDS matrix in partial rounds with a more efficient construction that reduces the number of field multiplications.
+- `mat_internal_diag_m_1`: Internal matrix diagonal minus 1, represented as
+ `Vec` of length `t`. Poseidon2 replaces the MDS matrix in partial
+ rounds with a more efficient construction that reduces the number of field
+ multiplications.
#### Round Constants
-- `round_constants`: A matrix of size `(rounds_f + rounds_p)` by `t`, represented as `Vec>`. Each round adds a different constant to each state element.
+- `round_constants`: A matrix of size `(rounds_f + rounds_p)` by `t`,
+ represented as `Vec>`. Each round adds a different constant to
+ each state element.
#### Input and Output
-- `input`: Vector of `t` field elements representing the initial state. Elements are `U256Val` objects.
+- `input`: Vector of `t` field elements representing the initial state.
+ Elements are `U256Val` objects.
-- Return value: Vector of `t` field elements representing the output state after applying the permutation.
+- Return value: Vector of `t` field elements representing the output state
+ after applying the permutation.
#### Error Conditions
The host function will trap if:
+
- `input` vector length does not equal `t`
- `mds` matrix is not `t` by `t` (Poseidon only)
- `mat_internal_diag_m_1` vector length does not equal `t` (Poseidon2 only)
@@ -160,30 +196,52 @@ The host function will trap if:
#### Cost Metering
-The new cost types `Bn254FrFromU256`, `Bn254FrToU256`, `Bn254FrAddSub`, `Bn254FrMul`, `Bn254FrPow`, and `Bn254FrInv` meter BN254 scalar field operations. For BLS12-381 operations, existing cost types from CAP-0059 are used.
+The new cost types `Bn254FrFromU256`, `Bn254FrToU256`, `Bn254FrAddSub`,
+`Bn254FrMul`, `Bn254FrPow`, and `Bn254FrInv` meter BN254 scalar field
+operations. For BLS12-381 operations, existing cost types from CAP-0059 are
+used.
-The permutation cost is dominated by field multiplications. The cost scales linearly with the number of rounds and quadratically with the state size `t` due to matrix operations.
+The permutation cost is dominated by field multiplications. The cost scales
+linearly with the number of rounds and quadratically with the state size `t`
+due to matrix operations.
## Design Rationale
### Permutation Primitives vs. Hash Functions
-Poseidon is a family of hash functions built using the sponge construction. The sponge construction consists of two phases:
+Poseidon is a family of hash functions built using the sponge construction. The
+sponge construction consists of two phases:
-1. **Absorb**: Input data is absorbed into the state, followed by applying the permutation
+1. **Absorb**: Input data is absorbed into the state, followed by applying the
+ permutation
2. **Squeeze**: Output is extracted from the state after final permutation
-The permutation function $\pi$ is the core cryptographic primitive that provides the one-way and collision-resistant properties.
+The permutation function $\pi$ is the core cryptographic primitive that
+provides the one-way and collision-resistant properties.
-This CAP exposes the permutation primitives rather than complete hash functions for the following reasons:
+This CAP exposes the permutation primitives rather than complete hash functions
+for the following reasons:
-**Flexibility**: Different applications require different configurations. For Merkle tree hashing in a binary tree, the rate r=2 and capacity c=1 (state size t=3). For Merkle trees with higher arity or different use cases, different parameters are required. By exposing the permutation, developers can construct hash functions with arbitrary input sizes and security parameters.
+**Flexibility**: Different applications require different configurations. For
+Merkle tree hashing in a binary tree, the rate r=2 and capacity c=1 (state size
+t=3). For Merkle trees with higher arity or different use cases, different
+parameters are required. By exposing the permutation, developers can construct
+hash functions with arbitrary input sizes and security parameters.
-**Interoperability**: Different ZK systems use different parameter sets generated according to their security requirements. Exposing the permutation allows Soroban contracts to maintain compatibility with external systems without requiring multiple specialized hash function variants.
+**Interoperability**: Different ZK systems use different parameter sets
+generated according to their security requirements. Exposing the permutation
+allows Soroban contracts to maintain compatibility with external systems
+without requiring multiple specialized hash function variants.
-**Reduced maintenance surface**: The permutation contains all cryptographic complexity. The sponge construction is a simple state machine that can be efficiently implemented in the SDK guest code. This minimizes the host function surface area that requires cryptographic review.
+**Reduced maintenance surface**: The permutation contains all cryptographic
+complexity. The sponge construction is a simple state machine that can be
+efficiently implemented in the SDK guest code. This minimizes the host function
+surface area that requires cryptographic review.
-**Composability**: The Poseidon papers describe multiple constructions beyond basic hashing, including compression functions and variable-length hashing modes. Permutation primitives enable all these constructions without requiring additional host functions.
+**Composability**: The Poseidon papers describe multiple constructions beyond
+basic hashing, including compression functions and variable-length hashing
+modes. Permutation primitives enable all these constructions without requiring
+additional host functions.
Example SDK usage for a 2-to-1 hash (pseudo-code):
@@ -215,41 +273,68 @@ pub fn poseidon2_hash_two(input1: Fr, input2: Fr) -> Fr {
### Parameter Selection
-Parameter selection (state size, rounds, matrices, constants) must follow the security analysis in the Poseidon and Poseidon2 papers. The original papers provide reference scripts for generating secure parameter sets. Users are responsible for selecting parameters appropriate to their security level and use case.
+Parameter selection (state size, rounds, matrices, constants) must follow the
+security analysis in the Poseidon and Poseidon2 papers. The original papers
+provide reference scripts for generating secure parameter sets. Users are
+responsible for selecting parameters appropriate to their security level and
+use case.
-Common parameter sets have been published by the Poseidon authors and adopted by various ZK systems. Developers should use established parameter sets when interoperability with existing systems is required.
+Common parameter sets have been published by the Poseidon authors and adopted
+by various ZK systems. Developers should use established parameter sets when
+interoperability with existing systems is required.
## Protocol Upgrade Transition
### Backwards Incompatibilities
-This CAP does not introduce backward incompatibilities. The new host functions are only available in protocol version 24 and later.
+This CAP does not introduce backward incompatibilities. The new host functions
+are only available in protocol version 24 and later.
## Security Concerns
### Cryptographic Correctness
-The security of Poseidon relies on the correctness of the permutation implementation. Implementation must be carefully vetted and verified against official test vectors. Any deviation from the specification could compromise the desired hash properties.
+The security of Poseidon relies on the correctness of the permutation
+implementation. Implementation must be carefully vetted and verified against
+official test vectors. Any deviation from the specification could compromise
+the desired hash properties.
### Parameter Validation
-The host function performs minimal parameter validation (vector lengths, field bounds). It does not validate whether the provided parameters (rounds, matrices, constants) constitute a secure configuration. Users are responsible for selecting cryptographically sound parameters.
+The host function performs minimal parameter validation (vector lengths, field
+bounds). It does not validate whether the provided parameters (rounds,
+matrices, constants) constitute a secure configuration. Users are responsible
+for selecting cryptographically sound parameters.
-Insecure parameter selection could result in weakened hash functions vulnerable to collision attacks or preimage attacks. Users should use parameter sets generated according to the methodologies described in the Poseidon and Poseidon2 papers or use widely-adopted standard parameter sets. To mitigate such risk, we will provide presets of securely generated parameters used in production in existing production proof systems (e.g. circom, Noir).
+Insecure parameter selection could result in weakened hash functions vulnerable
+to collision attacks or preimage attacks. Users should use parameter sets
+generated according to the methodologies described in the Poseidon and
+Poseidon2 papers or use widely-adopted standard parameter sets. To mitigate
+such risk, we will provide presets of securely generated parameters used in
+production in existing production proof systems (e.g. circom, Noir).
### Denial of Service
-The permutation cost scales with the number of rounds and state size. Improper metering could allow attackers to consume excessive resources. The metering implementation must accurately measure the cost of all field operations within the permutation.
+The permutation cost scales with the number of rounds and state size. Improper
+metering could allow attackers to consume excessive resources. The metering
+implementation must accurately measure the cost of all field operations within
+the permutation.
-The host function does not impose upper bounds on parameter values (beyond vector length consistency). Resource limits are enforced through the standard Soroban metering framework as specified in CAP-0046-10.
+The host function does not impose upper bounds on parameter values (beyond
+vector length consistency). Resource limits are enforced through the standard
+Soroban metering framework as specified in CAP-0046-10.
## Test Cases
Test vectors will be sourced from:
-1. **HorizenLabs Poseidon2 implementation** (https://github.com/HorizenLabs/poseidon2): Provides reference test vectors for Poseidon2 with multiple field types and parameter configurations.
+1. **HorizenLabs Poseidon2 implementation**
+ (https://github.com/HorizenLabs/poseidon2): Provides reference test vectors
+ for Poseidon2 with multiple field types and parameter configurations.
-2. **Original Poseidon implementation** (https://extgit.isec.tugraz.at/krypto/hadeshash/): Reference implementation from the original Poseidon paper authors.
+2. **Original Poseidon implementation**
+ (https://extgit.isec.tugraz.at/krypto/hadeshash/): Reference implementation
+ from the original Poseidon paper authors.
## Implementation
diff --git a/core/cap-0076.md b/core/cap-0076.md
index 57569b892..d47f5fc12 100644
--- a/core/cap-0076.md
+++ b/core/cap-0076.md
@@ -13,7 +13,9 @@ Protocol version: 24
## Simple Summary
-Fix the entries that have been archived in corrupted state due to a bug in protocol 23 *and have never had corruption observed*. Also update the fee pool to reflect the unintentional XLM burns.
+Fix the entries that have been archived in corrupted state due to a bug in
+protocol 23 _and have never had corruption observed_. Also update the fee pool
+to reflect the unintentional XLM burns.
## Working Group
@@ -21,70 +23,153 @@ As specified in the Preamble.
## Motivation
-Protocol 23 has introduced a mechanism for evicting the persistent contract data and code entries into Hot Archive (see [CAP-62](./cap-0062.md) for details). However, due to an implementation bug, at the moment of the archival (i.e. when the entry gets removed from the live bucket list and is moved to the Hot Archive) an arbitrary historical state has been used instead of the most recent state. That means that some entries have been archived with a state that they had at some point at time, but not the most recent state.
-
-Consider the following example that illustrates how the bug manifests. Imagine a contract `C` that has a balance of 10 XLM. `C` then proceeds to spend 9 XLM and is left with 1 XLM balance. Then `C` doesn't perform any more XLM operations for a while and its balance entry gets archived. Due to a bug, the entry state where it had 10 XLM balance could get archived. If that was the case, then when someone would restore `C` balance, they would get back 10 XLM balance, thus effectively minting 9 XLM. Note, that until the entry has been restored its state is not observable on-chain and thus the 9 XLM mint is only meaningful from the moment of restoration.
-
-The actual bug impact on Stellar Mainnet has been that 478 ledger entries have been archived with an incorrect state before validators disabled the state eviction at the protocol entries. Luckily, 394 entries out of these have never been restored (at least at the moment of writing this CAP; however, the restoration has been disabled at the overlay level for these entries and it's highly unlikely that any more restorations would have happened until protocol 24 upgrade).
-
-The 394 entries that have never been restored are hashed into ledger as a part of the Hot Archive. However, as mentioned above, archived entries don't have any other observable on-chain impact (i.e. it's not possible for any transaction to read or modify their value). Because of that it is possible to amend the state of the corrupted entries to the correct value without risking to break any on-chain logic or invariants.
-
-Amending the corrupted entries would be beneficial to the network as it avoids all sorts of issues that would occur if entries were restored in the corrupted state (such as unexpected token mints or burns, or protocols assuming invalid state). While it is a rare and unusual precedent for validators to make any changes to the on-chain state they don't own (i.e. anything beyond the network configuration itself), this particular kind of amendment has a very limited and verifiable scope. It is possible for any observer to ingest the correct (i.e. pre-archival) state for every amended entry from the history and then verify that it matches the state of the entry after protocol 24 upgrade. It is also possible to verify that only the affected entries get amended via replaying the history and examining the Hot Archive hash after the upgrade. These factors make it not possible for validators to maliciously update the entries that haven't been corrupted, or updating the corrupted entries to incorrect values *without that being visible for any external observer*.
-
-Note, that this CAP does not allow amendment of any state that has actually been observed due to restoration. The only change that concerns restored corrupted entries is an amendment of the fee pool that is done just in order to reflect the total XLM balance on the network.
+Protocol 23 has introduced a mechanism for evicting the persistent contract
+data and code entries into Hot Archive (see [CAP-62](./cap-0062.md) for
+details). However, due to an implementation bug, at the moment of the archival
+(i.e. when the entry gets removed from the live bucket list and is moved to the
+Hot Archive) an arbitrary historical state has been used instead of the most
+recent state. That means that some entries have been archived with a state that
+they had at some point at time, but not the most recent state.
+
+Consider the following example that illustrates how the bug manifests. Imagine
+a contract `C` that has a balance of 10 XLM. `C` then proceeds to spend 9 XLM
+and is left with 1 XLM balance. Then `C` doesn't perform any more XLM
+operations for a while and its balance entry gets archived. Due to a bug, the
+entry state where it had 10 XLM balance could get archived. If that was the
+case, then when someone would restore `C` balance, they would get back 10 XLM
+balance, thus effectively minting 9 XLM. Note, that until the entry has been
+restored its state is not observable on-chain and thus the 9 XLM mint is only
+meaningful from the moment of restoration.
+
+The actual bug impact on Stellar Mainnet has been that 478 ledger entries have
+been archived with an incorrect state before validators disabled the state
+eviction at the protocol entries. Luckily, 394 entries out of these have never
+been restored (at least at the moment of writing this CAP; however, the
+restoration has been disabled at the overlay level for these entries and it's
+highly unlikely that any more restorations would have happened until protocol
+24 upgrade).
+
+The 394 entries that have never been restored are hashed into ledger as a part
+of the Hot Archive. However, as mentioned above, archived entries don't have
+any other observable on-chain impact (i.e. it's not possible for any
+transaction to read or modify their value). Because of that it is possible to
+amend the state of the corrupted entries to the correct value without risking
+to break any on-chain logic or invariants.
+
+Amending the corrupted entries would be beneficial to the network as it avoids
+all sorts of issues that would occur if entries were restored in the corrupted
+state (such as unexpected token mints or burns, or protocols assuming invalid
+state). While it is a rare and unusual precedent for validators to make any
+changes to the on-chain state they don't own (i.e. anything beyond the network
+configuration itself), this particular kind of amendment has a very limited and
+verifiable scope. It is possible for any observer to ingest the correct (i.e.
+pre-archival) state for every amended entry from the history and then verify
+that it matches the state of the entry after protocol 24 upgrade. It is also
+possible to verify that only the affected entries get amended via replaying the
+history and examining the Hot Archive hash after the upgrade. These factors
+make it not possible for validators to maliciously update the entries that
+haven't been corrupted, or updating the corrupted entries to incorrect values
+_without that being visible for any external observer_.
+
+Note, that this CAP does not allow amendment of any state that has actually
+been observed due to restoration. The only change that concerns restored
+corrupted entries is an amendment of the fee pool that is done just in order to
+reflect the total XLM balance on the network.
### Goals Alignment
-This CAP is aligned with the goal of maintaining the Stellar network reliability as it effectively fixes the data corruption bug for a large number of affected ledger entries and brings them to the expected state.
+This CAP is aligned with the goal of maintaining the Stellar network
+reliability as it effectively fixes the data corruption bug for a large number
+of affected ledger entries and brings them to the expected state.
## Abstract
-Two changes will be performed: amendment of the corrupted Hot Archive entries that will bring them back to the correct state they should have had during archival, and amendment of the fee pool.
+Two changes will be performed: amendment of the corrupted Hot Archive entries
+that will bring them back to the correct state they should have had during
+archival, and amendment of the fee pool.
-The entry amendment will only happen for the entries that are still in the Hot Archive and have never been restored or updated. The full list of the affected entries is attached to the CAP.
+The entry amendment will only happen for the entries that are still in the Hot
+Archive and have never been restored or updated. The full list of the affected
+entries is attached to the CAP.
-The fee pool is amended by adding `31879035` stroops in order to account for the XLM 'burns' that happened due to restoration of the corrupted XLM balances.
+The fee pool is amended by adding `31879035` stroops in order to account for
+the XLM 'burns' that happened due to restoration of the corrupted XLM balances.
## Specification
### The list of the corrupted entries
-The full list of the corrupted entries (including those that had been restored) is attached to this repository in [corrupted_hot_archive_entries.csv](./../contents/cap-0076/corrupted_hot_archive_entries.csv) file.
+The full list of the corrupted entries (including those that had been restored)
+is attached to this repository in
+[corrupted_hot_archive_entries.csv](./../contents/cap-0076/corrupted_hot_archive_entries.csv)
+file.
The file contains 478 rows with 5 columns:
-- `ledger_key` - base64 encoded `LedgerKey` XDR, ledger key of the affected entry
-- `correct_entry` - base64 encoded `LedgerEntry` XDR, the correct value that should have been archived (i.e. the value of the affected entry at the moment when it has been archived)
-- `archived_entry` - base64 encoded `LedgerEntry` XDR, the actual corrupted value that got archived
-- `evicted_ledger_seq` - the ledger on which the eviction that caused corruption has occurred (for audit/validation purposes)
-- `restored_ledger_seq` - the ledger on which the corrupted state has occurred or `not-restored` for entries that have never been restored and thus are in scope of this CAP (for audit/validation purposes)
+- `ledger_key` - base64 encoded `LedgerKey` XDR, ledger key of the affected
+ entry
+- `correct_entry` - base64 encoded `LedgerEntry` XDR, the correct value that
+ should have been archived (i.e. the value of the affected entry at the moment
+ when it has been archived)
+- `archived_entry` - base64 encoded `LedgerEntry` XDR, the actual corrupted
+ value that got archived
+- `evicted_ledger_seq` - the ledger on which the eviction that caused
+ corruption has occurred (for audit/validation purposes)
+- `restored_ledger_seq` - the ledger on which the corrupted state has occurred
+ or `not-restored` for entries that have never been restored and thus are in
+ scope of this CAP (for audit/validation purposes)
### Semantics
#### Hot archive entry amendment
-The entry amendments may strictly happen only for the entries that are still in the Hot Archive at the moment of protocol upgrade, and that still are in the exact state that has been observed at the moment of the corrupted archival.
-
-Specifically, for every `ledger_key` in [corrupted_hot_archive_entries](./../contents/cap-0076/corrupted_hot_archive_entries.csv) a check for an entry to be amendable will be performed. The check requires that all of the following conditions are true:
-
-- There is no entry corresponding to the `ledger_key` in the current live state
-- `ledger_key` is not being archived in the upgrade ledger
-- An entry `E` corresponding to the `ledger_key` exists in the Hot Archive
-- `E == archived_entry` (where `archived_entry` is the corresponding value from the corrupted_hot_archive_entries table)
-
-If the entry is amendable, then a new ledger entry with value ``correct_entry` - base64 encoded `LedgerEntry` XDR, the correct value that should have been archived (i.e. the value of the affected entry at the moment when it has been archived)
-` (from the table) will be written to the Hot Archive and thus become the most recent state of the entry that will be used for the restoration.
-
-Note, that since the amendment only occurs in the Hot Archive, the change will not be reflected in `LedgerCloseMeta`, as it can not contain Hot Archive changes. The change will only be observable due to the `bucketListHash` change in the upgrade ledger header. After the upgrade the amended changes will be observable at the moment of restoration (and they will have the state that matches the state they had prior to archival).
+The entry amendments may strictly happen only for the entries that are still in
+the Hot Archive at the moment of protocol upgrade, and that still are in the
+exact state that has been observed at the moment of the corrupted archival.
+
+Specifically, for every `ledger_key` in
+[corrupted_hot_archive_entries](./../contents/cap-0076/corrupted_hot_archive_entries.csv)
+a check for an entry to be amendable will be performed. The check requires that
+all of the following conditions are true:
+
+- There is no entry corresponding to the `ledger_key` in the current live state
+- `ledger_key` is not being archived in the upgrade ledger
+- An entry `E` corresponding to the `ledger_key` exists in the Hot Archive
+- `E == archived_entry` (where `archived_entry` is the corresponding value from
+ the corrupted_hot_archive_entries table)
+
+If the entry is amendable, then a new ledger entry with value
+``correct_entry` - base64 encoded `LedgerEntry` XDR, the correct value that
+should have been archived (i.e. the value of the affected entry at the moment
+when it has been archived) ` (from the table) will be written to the Hot
+Archive and thus become the most recent state of the entry that will be used
+for the restoration.
+
+Note, that since the amendment only occurs in the Hot Archive, the change will
+not be reflected in `LedgerCloseMeta`, as it can not contain Hot Archive
+changes. The change will only be observable due to the `bucketListHash` change
+in the upgrade ledger header. After the upgrade the amended changes will be
+observable at the moment of restoration (and they will have the state that
+matches the state they had prior to archival).
#### Fee pool amendment
-`31879035` stroops will be added to the `feePool` in the `LedgerHeader`. This change reflects the XLM burn that occurred due to bug. Specifically, two XLM contract balances have been restored with a balance that is lower than the balance they had before archival. The specific XLM burns are as follows (these can also be verified in [corrupted_hot_archive_entries](./../contents/cap-0076/corrupted_hot_archive_entries.csv)):
+`31879035` stroops will be added to the `feePool` in the `LedgerHeader`. This
+change reflects the XLM burn that occurred due to bug. Specifically, two XLM
+contract balances have been restored with a balance that is lower than the
+balance they had before archival. The specific XLM burns are as follows (these
+can also be verified in
+[corrupted_hot_archive_entries](./../contents/cap-0076/corrupted_hot_archive_entries.csv)):
-- Contract `CAS3J7GYLGXMF6TDJBBYYSE3HQ6BBSMLNUQ34T6TZMYMW2EVH34XOWMA` had XLM balance of `291100005` stroops before archival, but had `290100005` stroops at the moment of restoration (`1000000` stroops burned)
-- Contract `CDLMAKG5TSJA6FGP7LLC2FKJRQW6DQYMEPP6FURFVULDEQMP3PRZ4ISI` had XLM balance of `1173140246` stroops before archival, but had `1142261211` stroops balance at the moment of restoration (`30879035` stroops burned)
+- Contract `CAS3J7GYLGXMF6TDJBBYYSE3HQ6BBSMLNUQ34T6TZMYMW2EVH34XOWMA` had XLM
+ balance of `291100005` stroops before archival, but had `290100005` stroops
+ at the moment of restoration (`1000000` stroops burned)
+- Contract `CDLMAKG5TSJA6FGP7LLC2FKJRQW6DQYMEPP6FURFVULDEQMP3PRZ4ISI` had XLM
+ balance of `1173140246` stroops before archival, but had `1142261211` stroops
+ balance at the moment of restoration (`30879035` stroops burned)
-Thus `1000000 + 30879035 = 31879035` stroops have been burned, which is reflected via `feePool`.
+Thus `1000000 + 30879035 = 31879035` stroops have been burned, which is
+reflected via `feePool`.
### Backwards Incompatibilities
@@ -92,20 +177,41 @@ This CAP does not introduce any backward incompatibilities.
### Resource Utilization
-The upgrade ledger will need to do a few hundred additional disk lookups, but the performance impact of that is negligible.
+The upgrade ledger will need to do a few hundred additional disk lookups, but
+the performance impact of that is negligible.
## Security Concerns
-As mentioned in the 'Motivation' section this CAP performs modification of the ledger state that is not directly owned by the validators, which comes with the inherent risk of malicious modifications, or non-malicious erroneous modifications that lead to the further state corruption that can then be abused by the attackers.
-
-In order to resolve these concerns to some degree, and in order to ensure that the amendments only bring back the entries to their valid state, the Stellar Core build that performs the upgrade will contain the tools that allow anyone to ensure that:
-- During the replay of protocol 23 *only* the entries from [corrupted_hot_archive_entries](./../contents/cap-0076/corrupted_hot_archive_entries.csv) table are incorrectly incorrectly archived, and that their correct and archived states match those in the table
-- Ensure that *every* entry from the table has indeed been incorrectly archived
-- Ensure that during the protocol upgrade only the entries from the table have been updated, and that the update has brought them back to the correct state
+As mentioned in the 'Motivation' section this CAP performs modification of the
+ledger state that is not directly owned by the validators, which comes with the
+inherent risk of malicious modifications, or non-malicious erroneous
+modifications that lead to the further state corruption that can then be abused
+by the attackers.
+
+In order to resolve these concerns to some degree, and in order to ensure that
+the amendments only bring back the entries to their valid state, the Stellar
+Core build that performs the upgrade will contain the tools that allow anyone
+to ensure that:
+
+- During the replay of protocol 23 _only_ the entries from
+ [corrupted_hot_archive_entries](./../contents/cap-0076/corrupted_hot_archive_entries.csv)
+ table are incorrectly incorrectly archived, and that their correct and
+ archived states match those in the table
+- Ensure that _every_ entry from the table has indeed been incorrectly archived
+- Ensure that during the protocol upgrade only the entries from the table have
+ been updated, and that the update has brought them back to the correct state
## Test Cases
-The audit process described in the section above is implemented in Stellar Core. It will have an optional config called `PATH_TO_PROTOCOL_23_CORRUPTION_FILE`, which is the file path to the [corrupted_hot_archive_entries](./../contents/cap-0076/corrupted_hot_archive_entries.csv) file. When set, Stellar Core will add a series of asserts to verify the correctness of the file when replaying protocol 23 ledgers, as described in the Security Concerns section. In order to independently verify the correctness of the affected archived keys, set this config option and run catchup starting from the initial protocol 23 ledger, 58762517.
+The audit process described in the section above is implemented in Stellar
+Core. It will have an optional config called
+`PATH_TO_PROTOCOL_23_CORRUPTION_FILE`, which is the file path to the
+[corrupted_hot_archive_entries](./../contents/cap-0076/corrupted_hot_archive_entries.csv)
+file. When set, Stellar Core will add a series of asserts to verify the
+correctness of the file when replaying protocol 23 ledgers, as described in the
+Security Concerns section. In order to independently verify the correctness of
+the affected archived keys, set this config option and run catchup starting
+from the initial protocol 23 ledger, 58762517.
## Implementation
diff --git a/core/cap-0077.md b/core/cap-0077.md
index 03754e8d8..cabc3b7d4 100644
--- a/core/cap-0077.md
+++ b/core/cap-0077.md
@@ -4,7 +4,7 @@ Title: Ability to freeze ledger keys via network configuration
Working Group:
Owner: Dmytro Kozhevin <@dmkozh>
Authors: Dmytro Kozhevin <@dmkozh>
- Consulted:
+ Consulted:
Status: Draft
Created: 2025-11-25
Discussion: https://github.com/orgs/stellar/discussions/1811
@@ -13,7 +13,8 @@ Protocol version: 26
## Simple Summary
-Provide a way to make ledger keys inaccessible based on the network configuration upgrade performed with a validator vote.
+Provide a way to make ledger keys inaccessible based on the network
+configuration upgrade performed with a validator vote.
## Working Group
@@ -21,21 +22,43 @@ As specified in the Preamble.
## Motivation
-One of the first remediation steps for the data corruption incident that has occurred in protocol 23 has been the temporary 'freeze' of the corrupted ledger entries at the overlay layer in order to prevent further data corruption until the protocol has been fixed. Any transaction that has accessed any one of the corrupted keys (detectable via footprint) was rejected by the validators without being added to the mempool.
-
-This was a bespoke change that has been released in a separate Core build. The 'freeze' has only become fully active when every tier 1 validator has updated to the new build, and even then it was not 100% effective (e.g. a validator could roll back the build). Also notably this kind of change can only be easily done for the Soroban entries - it would be much trickier to do quickly in case if any classic accounts or trustlines have been corrupted for whatever reason.
-
-While the Stellar Core team puts a lot of effort to reduce the probability of the similar data corruption issues from ever occurring again, there is always a non-zero risk that something goes wrong. There also may be a possibility of using a similar mechanism for remediation of other issues beyond the protocol bugs, such as freezing the data entries that are known to be vulnerable in some way, e.g. are known to be hacked.
-
-If the mechanism for freezing the entries is implemented as a part of the protocol instead of an overlay-level filter, it will both be easier to enable (via consensus instead of relying on every validator picking up the settings), and also more transparent, as all the changes will be visible on-chain.
+One of the first remediation steps for the data corruption incident that has
+occurred in protocol 23 has been the temporary 'freeze' of the corrupted ledger
+entries at the overlay layer in order to prevent further data corruption until
+the protocol has been fixed. Any transaction that has accessed any one of the
+corrupted keys (detectable via footprint) was rejected by the validators
+without being added to the mempool.
+
+This was a bespoke change that has been released in a separate Core build. The
+'freeze' has only become fully active when every tier 1 validator has updated
+to the new build, and even then it was not 100% effective (e.g. a validator
+could roll back the build). Also notably this kind of change can only be easily
+done for the Soroban entries - it would be much trickier to do quickly in case
+if any classic accounts or trustlines have been corrupted for whatever reason.
+
+While the Stellar Core team puts a lot of effort to reduce the probability of
+the similar data corruption issues from ever occurring again, there is always a
+non-zero risk that something goes wrong. There also may be a possibility of
+using a similar mechanism for remediation of other issues beyond the protocol
+bugs, such as freezing the data entries that are known to be vulnerable in some
+way, e.g. are known to be hacked.
+
+If the mechanism for freezing the entries is implemented as a part of the
+protocol instead of an overlay-level filter, it will both be easier to enable
+(via consensus instead of relying on every validator picking up the settings),
+and also more transparent, as all the changes will be visible on-chain.
### Goals Alignment
-This CAP is aligned with the goal of maintaining the Stellar network reliability via providing tools for quick issue remediation.
+This CAP is aligned with the goal of maintaining the Stellar network
+reliability via providing tools for quick issue remediation.
## Abstract
-This CAP introduces two new types of configuration settings: one for storing the full list of the frozen ledger keys and another one for performing incremental updates to that list via the standard settings upgrade mechanism ([CAP-46-06](./cap-0046-09.md)).
+This CAP introduces two new types of configuration settings: one for storing
+the full list of the frozen ledger keys and another one for performing
+incremental updates to that list via the standard settings upgrade mechanism
+([CAP-46-06](./cap-0046-09.md)).
Only a subset of ledger entry types can be frozen, specifically:
@@ -43,17 +66,30 @@ Only a subset of ledger entry types can be frozen, specifically:
- Account entries
- Trustline entires
-Soroban transactions that have a frozen entry in the footprint will be considered invalid, which would cause them to never be included into ledger. For the most of the 'classic' transactions it's possible to tell which account or trustline entries are going to be accessed and thus they will also be considered invalid and never included into ledger.
+Soroban transactions that have a frozen entry in the footprint will be
+considered invalid, which would cause them to never be included into ledger.
+For the most of the 'classic' transactions it's possible to tell which account
+or trustline entries are going to be accessed and thus they will also be
+considered invalid and never included into ledger.
-For the transactions that use the opaque ids instead of the asset specification (claimable balance and liquidity pool related), the validation happens at apply time instead and results in transaction failure. For the transactions that interact with DEX there is no way to predict if a frozen trustline or account is going to be accessed and thus the offers that would modify a frozen entry are ignored and removed instead.
+For the transactions that use the opaque ids instead of the asset specification
+(claimable balance and liquidity pool related), the validation happens at apply
+time instead and results in transaction failure. For the transactions that
+interact with DEX there is no way to predict if a frozen trustline or account
+is going to be accessed and thus the offers that would modify a frozen entry
+are ignored and removed instead.
-There are also two exceptions for the access to the frozen entries: offers may be removed and update the frozen liabilities, and it's an entry that is sponsored by a frozen account entry may be removed and update the sponsorship counter.
+There are also two exceptions for the access to the frozen entries: offers may
+be removed and update the frozen liabilities, and it's an entry that is
+sponsored by a frozen account entry may be removed and update the sponsorship
+counter.
## Specification
### XDR changes
-This patch of XDR changes is based on the XDR files in commit `0a621ec7811db000a60efae5b35f78dee3aa2533` of stellar-xdr.
+This patch of XDR changes is based on the XDR files in commit
+`0a621ec7811db000a60efae5b35f78dee3aa2533` of stellar-xdr.
```diff mddiffcheck.ignore=true
diff --git a/Stellar-contract-config-setting.x b/Stellar-contract-config-setting.x
@@ -62,7 +98,7 @@ index 9a95937..927c9bf 100644
+++ b/Stellar-contract-config-setting.x
@@ -1,6 +1,9 @@
%#include "xdr/Stellar-types.h"
-
+
namespace stellar {
+
+typedef opaque EncodedLedgerKey<>;
@@ -73,7 +109,7 @@ index 9a95937..927c9bf 100644
@@ -341,6 +344,15 @@ struct ConfigSettingSCPTiming {
uint32 ballotTimeoutIncrementMilliseconds;
};
-
+
+struct FrozenLedgerKeys {
+ EncodedLedgerKey keys<>;
+};
@@ -85,7 +121,7 @@ index 9a95937..927c9bf 100644
+
// limits the ContractCostParams size to 20kB
const CONTRACT_COST_COUNT_LIMIT = 1024;
-
+
@@ -365,7 +377,9 @@ enum ConfigSettingID
CONFIG_SETTING_EVICTION_ITERATOR = 13,
CONFIG_SETTING_CONTRACT_PARALLEL_COMPUTE_V0 = 14,
@@ -95,7 +131,7 @@ index 9a95937..927c9bf 100644
+ CONFIG_SETTING_FROZEN_LEDGER_KEYS = 17,
+ CONFIG_SETTING_FROZEN_LEDGER_KEYS_DELTA = 18
};
-
+
union ConfigSettingEntry switch (ConfigSettingID configSettingID)
@@ -404,5 +418,9 @@ case CONFIG_SETTING_CONTRACT_LEDGER_COST_EXT_V0:
ConfigSettingContractLedgerCostExtV0 contractLedgerCostExt;
@@ -118,7 +154,7 @@ index 9a14d6e..5adc510 100644
- CREATE_CLAIMABLE_BALANCE_UNDERFUNDED = -5
+ CREATE_CLAIMABLE_BALANCE_UNDERFUNDED = -5,
};
-
+
union CreateClaimableBalanceResult switch (
@@ -1597,7 +1597,8 @@ enum ClaimClaimableBalanceResultCode
CLAIM_CLAIMABLE_BALANCE_CANNOT_CLAIM = -2,
@@ -128,7 +164,7 @@ index 9a14d6e..5adc510 100644
+ CLAIM_CLAIMABLE_BALANCE_NOT_AUTHORIZED = -5,
+ CLAIM_CLAIMABLE_BALANCE_TRUSTLINE_FROZEN = -6
};
-
+
union ClaimClaimableBalanceResult switch (ClaimClaimableBalanceResultCode code)
@@ -1609,6 +1610,7 @@ case CLAIM_CLAIMABLE_BALANCE_CANNOT_CLAIM:
case CLAIM_CLAIMABLE_BALANCE_LINE_FULL:
@@ -137,17 +173,17 @@ index 9a14d6e..5adc510 100644
+case CLAIM_CLAIMABLE_BALANCE_TRUSTLINE_FROZEN:
void;
};
-
+
@@ -1778,7 +1780,9 @@ enum LiquidityPoolDepositResultCode
LIQUIDITY_POOL_DEPOSIT_LINE_FULL = -5, // pool share trust line doesn't
// have sufficient limit
LIQUIDITY_POOL_DEPOSIT_BAD_PRICE = -6, // deposit price outside bounds
- LIQUIDITY_POOL_DEPOSIT_POOL_FULL = -7 // pool reserves are full
+ LIQUIDITY_POOL_DEPOSIT_POOL_FULL = -7, // pool reserves are full
-+ LIQUIDITY_POOL_DEPOSIT_TRUSTLINE_FROZEN = -8 // trustline for one of the
++ LIQUIDITY_POOL_DEPOSIT_TRUSTLINE_FROZEN = -8 // trustline for one of the
+ // assets is frozen
};
-
+
union LiquidityPoolDepositResult switch (LiquidityPoolDepositResultCode code)
@@ -1792,6 +1796,7 @@ case LIQUIDITY_POOL_DEPOSIT_UNDERFUNDED:
case LIQUIDITY_POOL_DEPOSIT_LINE_FULL:
@@ -156,17 +192,17 @@ index 9a14d6e..5adc510 100644
+case LIQUIDITY_POOL_DEPOSIT_TRUSTLINE_FROZEN:
void;
};
-
+
@@ -1810,7 +1815,9 @@ enum LiquidityPoolWithdrawResultCode
// pool share
LIQUIDITY_POOL_WITHDRAW_LINE_FULL = -4, // would go above limit for one
// of the assets
- LIQUIDITY_POOL_WITHDRAW_UNDER_MINIMUM = -5 // didn't withdraw enough
+ LIQUIDITY_POOL_WITHDRAW_UNDER_MINIMUM = -5, // didn't withdraw enough
-+ LIQUIDITY_POOL_WITHDRAW_TRUSTLINE_FROZEN = -6 // trustline for one of the
++ LIQUIDITY_POOL_WITHDRAW_TRUSTLINE_FROZEN = -6 // trustline for one of the
+ // assets is frozen
};
-
+
union LiquidityPoolWithdrawResult switch (LiquidityPoolWithdrawResultCode code)
@@ -1822,6 +1829,7 @@ case LIQUIDITY_POOL_WITHDRAW_NO_TRUST:
case LIQUIDITY_POOL_WITHDRAW_UNDERFUNDED:
@@ -175,7 +211,7 @@ index 9a14d6e..5adc510 100644
+case LIQUIDITY_POOL_WITHDRAW_TRUSTLINE_FROZEN:
void;
};
-
+
@@ -1999,7 +2007,8 @@ enum TransactionResultCode
txBAD_SPONSORSHIP = -14, // sponsorship not confirmed
txBAD_MIN_SEQ_AGE_OR_GAP = -15, // minSeqAge or minSeqLedgerGap conditions not met
@@ -184,7 +220,7 @@ index 9a14d6e..5adc510 100644
+ txSOROBAN_INVALID = -17, // soroban-specific preconditions were not met
+ txFROZEN_KEY_ACCESSED = -18 // a 'frozen' ledger key is accessed by any operation
};
-
+
// InnerTransactionResult must be binary compatible with TransactionResult
```
@@ -192,33 +228,64 @@ index 9a14d6e..5adc510 100644
#### Configuration settings
-Two new configuration settings types are introduced: `CONFIG_SETTING_FROZEN_LEDGER_KEYS` (`frozenLedgerKeys` setting) and `CONFIG_SETTING_FROZEN_LEDGER_KEYS_DELTA` (`frozenLedgerKeysDelta` setting). On protocol upgrade only `CONFIG_SETTING_FROZEN_LEDGER_KEYS` entry is created and initialized with an empty array.
-
-`frozenLedgerKeys` setting contains a vector of the `LedgerKey` XDR structs encoded as bytes (in order to avoid a circular dependency in XDR definitions). The ledger keys stored in this setting are considered frozen by the protocol, with detailed semantics described in the following section.
-
-`frozenLedgerKeys` can not be upgraded by the regular setting upgrade mechanism that involves overriding the full contents of the configuration entry. Instead, the upgrade is performed via a `frozenLedgerKeysDelta` setting that contains vectors of `LedgerKey`s to freeze and unfreeze, encoded as XDR bytes.
-
-`frozenLedgerKeysDelta` upgrade is considered invalid if any of the ledger keys can't be decoded as `LedgerKey` XDR, or if any of the keys doesn't belong to the following allowed types: `ACCOUNT`, `TRUSTLINE`, `CONTRACT_DATA`, `CONTRACT_CODE`.
-
-When an upgrade to that contains `frozenLedgerKeysDelta` setting is applied to the ledger, all the keys in `keysToFreeze` are encoded and added to `frozenLedgerKeys` and all the keys in `keysToUnfreeze` are removed from `frozenLedgerKeys`. The upgrade process gracefully handles the addition of duplicate keys and removal of non-existent keys, these are simply ignored during the upgrade application process.
+Two new configuration settings types are introduced:
+`CONFIG_SETTING_FROZEN_LEDGER_KEYS` (`frozenLedgerKeys` setting) and
+`CONFIG_SETTING_FROZEN_LEDGER_KEYS_DELTA` (`frozenLedgerKeysDelta` setting). On
+protocol upgrade only `CONFIG_SETTING_FROZEN_LEDGER_KEYS` entry is created and
+initialized with an empty array.
+
+`frozenLedgerKeys` setting contains a vector of the `LedgerKey` XDR structs
+encoded as bytes (in order to avoid a circular dependency in XDR definitions).
+The ledger keys stored in this setting are considered frozen by the protocol,
+with detailed semantics described in the following section.
+
+`frozenLedgerKeys` can not be upgraded by the regular setting upgrade mechanism
+that involves overriding the full contents of the configuration entry. Instead,
+the upgrade is performed via a `frozenLedgerKeysDelta` setting that contains
+vectors of `LedgerKey`s to freeze and unfreeze, encoded as XDR bytes.
+
+`frozenLedgerKeysDelta` upgrade is considered invalid if any of the ledger keys
+can't be decoded as `LedgerKey` XDR, or if any of the keys doesn't belong to
+the following allowed types: `ACCOUNT`, `TRUSTLINE`, `CONTRACT_DATA`,
+`CONTRACT_CODE`.
+
+When an upgrade to that contains `frozenLedgerKeysDelta` setting is applied to
+the ledger, all the keys in `keysToFreeze` are encoded and added to
+`frozenLedgerKeys` and all the keys in `keysToUnfreeze` are removed from
+`frozenLedgerKeys`. The upgrade process gracefully handles the addition of
+duplicate keys and removal of non-existent keys, these are simply ignored
+during the upgrade application process.
#### Frozen key validation
-In general, the transaction that would access a frozen key should be rejected by the network as soon as possible, i.e. at the validation stage. If it's possible to tell if a transaction or one of its operation accesses a frozen key, it is rejected as invalid with `txFROZEN_KEY_ACCESSED` error. Additional details may be provided via the diagnostic events.
+In general, the transaction that would access a frozen key should be rejected
+by the network as soon as possible, i.e. at the validation stage. If it's
+possible to tell if a transaction or one of its operation accesses a frozen
+key, it is rejected as invalid with `txFROZEN_KEY_ACCESSED` error. Additional
+details may be provided via the diagnostic events.
This section contains details for all the supported validation scenarios.
##### Soroban transactions
-Soroban transactions, i.e. those transactions that have `SorobanTransactionData` with resources are considered invalid if any of the keys in either `readOnly`, or `readWrite` footprint are present in the `frozenLedgerKeys` setting. Currently, the Soroban transactions are those that contain a single operation among `InvokeHostFunctionOp`, `ExtendFootprintTTLOp` and `RestoreFootprintOp`.
+Soroban transactions, i.e. those transactions that have
+`SorobanTransactionData` with resources are considered invalid if any of the
+keys in either `readOnly`, or `readWrite` footprint are present in the
+`frozenLedgerKeys` setting. Currently, the Soroban transactions are those that
+contain a single operation among `InvokeHostFunctionOp`, `ExtendFootprintTTLOp`
+and `RestoreFootprintOp`.
##### Source accounts
-If a source account of any transaction, fee bump transaction, or operation is in `frozenLedgerKeys`, then the transaction is considered to be invalid.
+If a source account of any transaction, fee bump transaction, or operation is
+in `frozenLedgerKeys`, then the transaction is considered to be invalid.
##### Source trustlines
-A number of operations modifies the trustline of the source account. The trustline is defined by the source account and the respective `Asset`(s) specified in the operation. If a trustline is frozen, the transaction is considered to be invalid.
+A number of operations modifies the trustline of the source account. The
+trustline is defined by the source account and the respective `Asset`(s)
+specified in the operation. If a trustline is frozen, the transaction is
+considered to be invalid.
The list of operations that explicitly specify source trustline(s):
@@ -233,7 +300,11 @@ The list of operations that explicitly specify source trustline(s):
##### Destination accounts/trustlines
-A number of operations specifies the destination trustline or account, typically via a `MuxedAccount`/`AccountID` identifier and the corresponding `Asset` asset. These unambiguously identify either a trustline, or an account entry (in case of the `NATIVE` asset). If such account or trustline is in the `frozenLedgerKeys`, then the transactions is considered to be invalid.
+A number of operations specifies the destination trustline or account,
+typically via a `MuxedAccount`/`AccountID` identifier and the corresponding
+`Asset` asset. These unambiguously identify either a trustline, or an account
+entry (in case of the `NATIVE` asset). If such account or trustline is in the
+`frozenLedgerKeys`, then the transactions is considered to be invalid.
The list of the operations that explicitly specify the payment destination:
@@ -241,49 +312,99 @@ The list of the operations that explicitly specify the payment destination:
- `PathPaymentStrictReceiveOp`
- `PathPaymentStrictSendOp`
- `AllowTrustOp` (`trustor` + `asset` define a trustline)
-- `RevokeSponsorshipOp` (destination key is directly defined via `LedgerKey`, or an account entry is specified for `REVOKE_SPONSORSHIP_SIGNER`)
+- `RevokeSponsorshipOp` (destination key is directly defined via `LedgerKey`,
+ or an account entry is specified for `REVOKE_SPONSORSHIP_SIGNER`)
- `ClawbackOp` (`from` + `asset` define a trustline)
- `SetTrustLineFlagsOp` (`trustor` + `asset` define a trustline)
#### Apply time validation
-Some operations don't contain the information necessary to identify the trustlines or accounts being modified. For these operations the frozen keys are handled at apply time.
+Some operations don't contain the information necessary to identify the
+trustlines or accounts being modified. For these operations the frozen keys are
+handled at apply time.
##### Operations with opaque source/destination
-The claimable balance and liquidity pool operations use opaque identifiers instead of the asset codes. If one of these operations modifies a frozen trustline or an account (for the XLM balance modification), then the operation will fail with the respective error.
-
-- `ClaimClaimableBalanceOp` - fails with `CLAIM_CLAIMABLE_BALANCE_TRUSTLINE_FROZEN` error if balance is withdrawn to a frozen trustline or account
-- `LiquidityPoolDepositOp` - fails with `LIQUIDITY_POOL_DEPOSIT_TRUSTLINE_FROZEN` error if either of the trustlines used for deposit is frozen, or if the pool share trustline is frozen
-- `LiquidityPoolWithdrawOp` - fails with `LIQUIDITY_POOL_WITHDRAW_TRUSTLINE_FROZEN` error if either of the trustlines used for withdrawal is frozen, or if the pool share trustline is frozen
+The claimable balance and liquidity pool operations use opaque identifiers
+instead of the asset codes. If one of these operations modifies a frozen
+trustline or an account (for the XLM balance modification), then the operation
+will fail with the respective error.
+
+- `ClaimClaimableBalanceOp` - fails with
+ `CLAIM_CLAIMABLE_BALANCE_TRUSTLINE_FROZEN` error if balance is withdrawn to a
+ frozen trustline or account
+- `LiquidityPoolDepositOp` - fails with
+ `LIQUIDITY_POOL_DEPOSIT_TRUSTLINE_FROZEN` error if either of the trustlines
+ used for deposit is frozen, or if the pool share trustline is frozen
+- `LiquidityPoolWithdrawOp` - fails with
+ `LIQUIDITY_POOL_WITHDRAW_TRUSTLINE_FROZEN` error if either of the trustlines
+ used for withdrawal is frozen, or if the pool share trustline is frozen
##### DEX operations
-A DEX offer may be traded when a DEX operation is applied (these operations are `PathPaymentStrictReceiveOp`, `ManageSellOfferOp`, `CreatePassiveSellOfferOp`, `ManageBuyOfferOp`, `PathPaymentStrictSendOp`). When DEX tries to cross an offer that would cause a frozen trustline or account balance change, the offer will be removed instead without moving any assets, and the offer matching will then proceed as usual. Note, that the transactions that have an explicit frozen source or destination defined in the transaction would have already been filtered out at this point, so only the trustline/account owning the offer found by DEX are verified at this stage.
+A DEX offer may be traded when a DEX operation is applied (these operations are
+`PathPaymentStrictReceiveOp`, `ManageSellOfferOp`, `CreatePassiveSellOfferOp`,
+`ManageBuyOfferOp`, `PathPaymentStrictSendOp`). When DEX tries to cross an
+offer that would cause a frozen trustline or account balance change, the offer
+will be removed instead without moving any assets, and the offer matching will
+then proceed as usual. Note, that the transactions that have an explicit frozen
+source or destination defined in the transaction would have already been
+filtered out at this point, so only the trustline/account owning the offer
+found by DEX are verified at this stage.
-The offer removal also results in adjusting the liabilities of the trustline or an account, even though it is frozen. This is one of the few changes allowed to perform for the frozen entries.
+The offer removal also results in adjusting the liabilities of the trustline or
+an account, even though it is frozen. This is one of the few changes allowed to
+perform for the frozen entries.
##### Deletion of sponsored entry
-If an account is frozen, an arbitrary number of sponsored entries might implicitly depend on it. The protocol allows doing that as per the standard sponsorship removal procedure, i.e. the `numSponsoring` field of a frozen account may be still modified when an entry or a signer sponsored by a frozen account is removed.
+If an account is frozen, an arbitrary number of sponsored entries might
+implicitly depend on it. The protocol allows doing that as per the standard
+sponsorship removal procedure, i.e. the `numSponsoring` field of a frozen
+account may be still modified when an entry or a signer sponsored by a frozen
+account is removed.
## Design Rationale
### `CONFIG_SETTING_FROZEN_LEDGER_KEYS` config upgrade
-The bespoke upgrade procedure is introduced in order to work around the limit for the maximum contract data entry size. The entry size limit currently is 128 KB, so if the regular upgrade process was used, then only about 1500 keys may be stored in the setting depending on the key sizes. This might be too limiting and thus the mechanism for incremental updates is introduced.
+The bespoke upgrade procedure is introduced in order to work around the limit
+for the maximum contract data entry size. The entry size limit currently is 128
+KB, so if the regular upgrade process was used, then only about 1500 keys may
+be stored in the setting depending on the key sizes. This might be too limiting
+and thus the mechanism for incremental updates is introduced.
-The validation of the incremental upgrades is also relaxed (duplicates/missing entries are allowed) in order to make the upgrade process faster and more reliable, as the assumption is that if this mechanism is ever used, it needs to be used rather fast. Under that requirement it's better to miss updates to a few keys instead of missing the whole upgrade.
+The validation of the incremental upgrades is also relaxed (duplicates/missing
+entries are allowed) in order to make the upgrade process faster and more
+reliable, as the assumption is that if this mechanism is ever used, it needs to
+be used rather fast. Under that requirement it's better to miss updates to a
+few keys instead of missing the whole upgrade.
### DEX handling
-A simple approach to handling the DEX operations would be to fail the operation if an offer that would update a frozen account/trustline is crossed. However, since DEX greedily picks the best offer to cross, this approach might effectively disable DEX for an asset pair for an unknown period of time and block all the more offers that are more expensive than the affected offer.
-
-In order to avoid DEX disruption the approach described in this CAP is proposed: the offer is automatically removed and liabilities are released. This comes with the minimal DEX disruption with a tradeoff of modifying a frozen entry. However, the modification is minimal (only the liabilities are modified, the actual balance stays the same) and thus is unlikely to make data corruption worse. In the worst case, if an entry is in an arbitrarily broken state and update of liabilities would violate the protocol invariants for the liabilities, the transaction will fail with an internal error, which is equivalent to what would unconditionally happen in the 'simple' approach.
+A simple approach to handling the DEX operations would be to fail the operation
+if an offer that would update a frozen account/trustline is crossed. However,
+since DEX greedily picks the best offer to cross, this approach might
+effectively disable DEX for an asset pair for an unknown period of time and
+block all the more offers that are more expensive than the affected offer.
+
+In order to avoid DEX disruption the approach described in this CAP is
+proposed: the offer is automatically removed and liabilities are released. This
+comes with the minimal DEX disruption with a tradeoff of modifying a frozen
+entry. However, the modification is minimal (only the liabilities are modified,
+the actual balance stays the same) and thus is unlikely to make data corruption
+worse. In the worst case, if an entry is in an arbitrarily broken state and
+update of liabilities would violate the protocol invariants for the
+liabilities, the transaction will fail with an internal error, which is
+equivalent to what would unconditionally happen in the 'simple' approach.
### Sponsorship handling
-Similarly to the DEX handling, the proposal here is a tradeoff that priorities disruption minimization over complete access blocking. The sponsored entries have pretty much nothing to do with a frozen account, and decreasing the `numSponsored` value is a very small and likely safe modification that shouldn't be able to make data corruption issues worse.
+Similarly to the DEX handling, the proposal here is a tradeoff that priorities
+disruption minimization over complete access blocking. The sponsored entries
+have pretty much nothing to do with a frozen account, and decreasing the
+`numSponsored` value is a very small and likely safe modification that
+shouldn't be able to make data corruption issues worse.
## Protocol Upgrade Transition
@@ -293,13 +414,27 @@ This CAP does not introduce any backward incompatibilities.
### Resource Utilization
-If the frozen key list is too large, then it might slow down the transaction validation and execution of some operations, but it's unlikely that this would be ever noticeable in practice if a hash map is used for lookup. While the list is empty/small, there should be no noticeable performance impact at all.
+If the frozen key list is too large, then it might slow down the transaction
+validation and execution of some operations, but it's unlikely that this would
+be ever noticeable in practice if a hash map is used for lookup. While the list
+is empty/small, there should be no noticeable performance impact at all.
## Security Concerns
-Every Stellar validator has an inherent ability to censor the traffic, that doesn't require a consensus of validators to agree on that, and that is also pretty hard to observe. The 'censorship' mechanism introduced in this CAP is easily observable and requires consensus of validators and thus it is not providing any new risks or attack angles.
-
-On the other hand, the observability may be problematic if the freezing mechanism has to be used on a short notice (for example, before someone could abuse a data corruption bug). A malicious party may be able to observe the upgrade and try to abuse it before the upgrade goes through. However, that requires constant monitoring of all the Soroban state changes combined with an ability to react to the upgrade quickly. This impact of this issue can be reduced greatly by reducing the timeline between the upload of the config upgrade entry and the vote.
+Every Stellar validator has an inherent ability to censor the traffic, that
+doesn't require a consensus of validators to agree on that, and that is also
+pretty hard to observe. The 'censorship' mechanism introduced in this CAP is
+easily observable and requires consensus of validators and thus it is not
+providing any new risks or attack angles.
+
+On the other hand, the observability may be problematic if the freezing
+mechanism has to be used on a short notice (for example, before someone could
+abuse a data corruption bug). A malicious party may be able to observe the
+upgrade and try to abuse it before the upgrade goes through. However, that
+requires constant monitoring of all the Soroban state changes combined with an
+ability to react to the upgrade quickly. This impact of this issue can be
+reduced greatly by reducing the timeline between the upload of the config
+upgrade entry and the vote.
## Test Cases
diff --git a/core/cap-0078.md b/core/cap-0078.md
index 3d79c657c..eafa0286a 100644
--- a/core/cap-0078.md
+++ b/core/cap-0078.md
@@ -4,7 +4,7 @@ Title: Host functions for performing limited TTL extensions
Working Group:
Owner: Dmytro Kozhevin <@dmkozh>
Authors: Dmytro Kozhevin <@dmkozh>
- Consulted:
+ Consulted:
Status: Draft
Created: 2025-12-16
Discussion: https://github.com/orgs/stellar/discussions/1825
@@ -13,7 +13,8 @@ Protocol version: 26
## Simple Summary
-Add a way to limit the maximum TTL extension for contract data and code entries.
+Add a way to limit the maximum TTL extension for contract data and code
+entries.
## Working Group
@@ -21,25 +22,45 @@ As specified in the Preamble.
## Motivation
-The existing TTL extension host functions (such as `extend_contract_data_ttl`, `extend_contract_instance_and_code_ttl`) provide a way to extend contract data or code entry TTL by specifying the new value of TTL that the entry has to have and a minimum TTL extension threshold for reducing the frequency of TTL extensions in case if the function is called often. This interface allows developers to specify TTL extension policies such as 'if an entry TTL is less than 29 days, extend it to have 30 days TTL'.
-
-While the current approach allows distributing the fees among the contract users to some degree, in case if contract usage patterns are uneven, some users may end up paying much more than the others. For example, with the policy example above a user may end up paying for TTL extension anywhere between 1 day and 30 days. More fine-grained control over TTL extension would help the developers that want to prioritize the rent fee stability and fairness.
+The existing TTL extension host functions (such as `extend_contract_data_ttl`,
+`extend_contract_instance_and_code_ttl`) provide a way to extend contract data
+or code entry TTL by specifying the new value of TTL that the entry has to have
+and a minimum TTL extension threshold for reducing the frequency of TTL
+extensions in case if the function is called often. This interface allows
+developers to specify TTL extension policies such as 'if an entry TTL is less
+than 29 days, extend it to have 30 days TTL'.
+
+While the current approach allows distributing the fees among the contract
+users to some degree, in case if contract usage patterns are uneven, some users
+may end up paying much more than the others. For example, with the policy
+example above a user may end up paying for TTL extension anywhere between 1 day
+and 30 days. More fine-grained control over TTL extension would help the
+developers that want to prioritize the rent fee stability and fairness.
### Goals Alignment
This CAP is aligned with the following Stellar Network Goals:
- - The Stellar Network should make it easy for developers of Stellar projects to create highly usable products
+- The Stellar Network should make it easy for developers of Stellar projects to
+ create highly usable products
## Abstract
-New host functions `extend_contract_data_ttl_v2` and `extend_contract_instance_and_code_ttl_v2` are introduced for extending the contract data and contract instance and/or code TTLs. The new host functions are similar to the old versions as they allows extending TTL of a contract data or code entry to the target value. Unlike the existing functions, these additionally provide an explicit way to specify the minimum TTL extension (instead of the `threshold` parameter in the old interface), and the maximum TTL extension (not available in the old interface).
+New host functions `extend_contract_data_ttl_v2` and
+`extend_contract_instance_and_code_ttl_v2` are introduced for extending the
+contract data and contract instance and/or code TTLs. The new host functions
+are similar to the old versions as they allows extending TTL of a contract data
+or code entry to the target value. Unlike the existing functions, these
+additionally provide an explicit way to specify the minimum TTL extension
+(instead of the `threshold` parameter in the old interface), and the maximum
+TTL extension (not available in the old interface).
## Specification
### New host functions
-The diff is based on commit `30ab5d1e2a642f18f3ae94cdb3a2798c3123f049` of `rs-soroban-env`.
+The diff is based on commit `30ab5d1e2a642f18f3ae94cdb3a2798c3123f049` of
+`rs-soroban-env`.
```diff mddiffcheck.ignore=true
diff --git a/soroban-env-common/env.json b/soroban-env-common/env.json
@@ -62,7 +83,7 @@ index 945ada2c..96efb4c8 100644
+ {
+ "name": "t",
+ "type": "StorageType"
-+ },
++ },
+ {
+ "name": "extend_to",
+ "type": "U32Val"
@@ -70,7 +91,7 @@ index 945ada2c..96efb4c8 100644
+ {
+ "name": "min_extension",
+ "type": "U32Val"
-+ },
++ },
+ {
+ "name": "max_extension",
+ "type": "U32Val"
@@ -95,11 +116,11 @@ index 945ada2c..96efb4c8 100644
+ {
+ "name": "extend_to",
+ "type": "U32Val"
-+ },
++ },
+ {
+ "name": "min_extension",
+ "type": "U32Val"
-+ },
++ },
+ {
+ "name": "max_extension",
+ "type": "U32Val"
@@ -117,7 +138,7 @@ index d72886e3..3f2d0d48 100644
+++ b/soroban-env-common/src/storage_type.rs
@@ -32,3 +32,13 @@ impl TryFrom for ContractDataDurability {
}
-
+
declare_wasmi_marshal_for_enum!(StorageType);
+
+
@@ -135,65 +156,133 @@ index d72886e3..3f2d0d48 100644
#### `extend_contract_data_ttl_v2` host function
-`extend_contract_data_ttl_v2` adds 0 or more ledgers to the `liveUntilLedgerSeq` of the contract data entry defined by the ID of the current contract, key `k` passed in the argument and storage type `t`. The computation of TTL extension involves the following definitions:
-
-- `liveUntilLedgerSeq` is the last ledger sequence number for which the entry is still considered to be alive, after that it is considered expired.
-- TTL is defined as `TTL = liveUntilLedgerSeq - currentLedgerSeq`, where `currentLedgerSeq` is the sequence number of the ledger where transaction is executed.
-- TTL extension is defined as `TTL_ext = TTL_new - TTL_curr = liveUntilLedgerSeq_new - liveUntilLedgerSeq_curr`, where `_curr/_new` are the values before and after executing the function respectively.
+`extend_contract_data_ttl_v2` adds 0 or more ledgers to the
+`liveUntilLedgerSeq` of the contract data entry defined by the ID of the
+current contract, key `k` passed in the argument and storage type `t`. The
+computation of TTL extension involves the following definitions:
+
+- `liveUntilLedgerSeq` is the last ledger sequence number for which the entry
+ is still considered to be alive, after that it is considered expired.
+- TTL is defined as `TTL = liveUntilLedgerSeq - currentLedgerSeq`, where
+ `currentLedgerSeq` is the sequence number of the ledger where transaction is
+ executed.
+- TTL extension is defined as
+ `TTL_ext = TTL_new - TTL_curr = liveUntilLedgerSeq_new - liveUntilLedgerSeq_curr`,
+ where `_curr/_new` are the values before and after executing the function
+ respectively.
- `maxEntryTTL` is a State Archival network setting.
With these definitions, the extension algorithm is defined as follows:
-- If `t` is `StorageType::Instance`, the function traps
+- If `t` is `StorageType::Instance`, the function traps
- If `max_extension < min_extension`, the function traps
-- If `extend_to <= TTL_curr` the function returns without performing any changes
+- If `extend_to <= TTL_curr` the function returns without performing any
+ changes
- Compute the initial TTL extension `TTL_ext_init = extend_to - TTL_curr`
-- Compute maximum extension allowed by the network `max_network_extension = maxEntryTTL - TTL_curr`
-- If storage type `t` is `Temporary`, and `TTL_ext_init > max_network_extension`, the function traps
-- Clamp the initial extension to not exceed network/argument limits: `TTL_ext_final = min(TTL_ext_init, max_extension, max_network_extension)`
-- If `TTL_ext_final < min_extension`, the function returns without performing any changes
-- `liveUntilLedgerSeq` of the entry is set to be `liveUntilLedgerSeq + TTL_ext_final`
+- Compute maximum extension allowed by the network
+ `max_network_extension = maxEntryTTL - TTL_curr`
+- If storage type `t` is `Temporary`, and
+ `TTL_ext_init > max_network_extension`, the function traps
+- Clamp the initial extension to not exceed network/argument limits:
+ `TTL_ext_final = min(TTL_ext_init, max_extension, max_network_extension)`
+- If `TTL_ext_final < min_extension`, the function returns without performing
+ any changes
+- `liveUntilLedgerSeq` of the entry is set to be
+ `liveUntilLedgerSeq + TTL_ext_final`
#### `extend_contract_instance_and_code_ttl_v2` host function
-`extend_contract_instance_and_code_ttl_v2` TTL extension semantics are the same as for `extend_contract_data_ttl_v2`. The only difference is how the ledger keys to extend are specified.
-`contract` argument identifies the contract ID associated with the ledger entries. `extension_scope` is an enum argument that identifies whether `contract`'s instance (`ContractTTLExtension::Instance`), code (`ContractTTLExtension::Code`), or both (`ContractTTLExtension::InstanceAndCode`) will get extended.
+`extend_contract_instance_and_code_ttl_v2` TTL extension semantics are the same
+as for `extend_contract_data_ttl_v2`. The only difference is how the ledger
+keys to extend are specified. `contract` argument identifies the contract ID
+associated with the ledger entries. `extension_scope` is an enum argument that
+identifies whether `contract`'s instance (`ContractTTLExtension::Instance`),
+code (`ContractTTLExtension::Code`), or both
+(`ContractTTLExtension::InstanceAndCode`) will get extended.
-If a built-in contract instance is being extended (currently, only Stellar Asset contract is built-in), then the code extension requests are ignored without raising an error.
+If a built-in contract instance is being extended (currently, only Stellar
+Asset contract is built-in), then the code extension requests are ignored
+without raising an error.
## Design Rationale
### Minimum and maximum extension arguments
-Minimum extension argument replaces `threshold` argument from the old TTL extension functions. Its role is still to reduce the extension frequency, which reduces the user fees (as every TTL update incurs the write fee) and the ledger write load. However, unlike `threshold`, `min_extension` specifies the minimum number of ledgers to *extend* the entry TTL (i.e. add to its TTL), and not the absolute TTL threshold that must be crossed. This is done for the sake of consistency with maximum extension, and provides arguably more clear way of managing the TTL extension policy. For example, a typical way to use the `threshold` is to prevent extensions that are shorter than e.g. one day. With `threshold` parameters developers needed to compute the `threshold` as `extend_to - 1 day in ledgers`. With `min_extension` parameter `1 day in ledgers` is passed explicitly as `min_extension`, and there is no dependency on the extension target.
-
-Maximum extension argument addresses the extension strategy mentioned in the 'Motivation' section. Developers may set extension strategies like 'extend TTL to 30 days with min extension of 1 day and max extension of 1 day', which would result in any user extending the entry TTL by just 1 day as long as its current TTL is anywhere between 0 and 29 days.
+Minimum extension argument replaces `threshold` argument from the old TTL
+extension functions. Its role is still to reduce the extension frequency, which
+reduces the user fees (as every TTL update incurs the write fee) and the ledger
+write load. However, unlike `threshold`, `min_extension` specifies the minimum
+number of ledgers to _extend_ the entry TTL (i.e. add to its TTL), and not the
+absolute TTL threshold that must be crossed. This is done for the sake of
+consistency with maximum extension, and provides arguably more clear way of
+managing the TTL extension policy. For example, a typical way to use the
+`threshold` is to prevent extensions that are shorter than e.g. one day. With
+`threshold` parameters developers needed to compute the `threshold` as
+`extend_to - 1 day in ledgers`. With `min_extension` parameter
+`1 day in ledgers` is passed explicitly as `min_extension`, and there is no
+dependency on the extension target.
+
+Maximum extension argument addresses the extension strategy mentioned in the
+'Motivation' section. Developers may set extension strategies like 'extend TTL
+to 30 days with min extension of 1 day and max extension of 1 day', which would
+result in any user extending the entry TTL by just 1 day as long as its current
+TTL is anywhere between 0 and 29 days.
### Impact of maximum extension
-Extension strategies that rely on `max_extension` may result in relatively more frequent updates of the TTL entries. For example, with the strategy described in the previous section up to 30 extensions may happen subsequently if the entry TTL has almost expired, while without `max_extension` only 1 extension would happen. However, for the reasonable strategies the absolute difference is not too significant compared to the overall scale of ledger writes, and it's already possible to create spammy strategies even without utilizing `max_extension` by just setting the `min_extension` threshold to 1 ledger.
+Extension strategies that rely on `max_extension` may result in relatively more
+frequent updates of the TTL entries. For example, with the strategy described
+in the previous section up to 30 extensions may happen subsequently if the
+entry TTL has almost expired, while without `max_extension` only 1 extension
+would happen. However, for the reasonable strategies the absolute difference is
+not too significant compared to the overall scale of ledger writes, and it's
+already possible to create spammy strategies even without utilizing
+`max_extension` by just setting the `min_extension` threshold to 1 ledger.
### `max_extension` for temporary entries
-For most of the use cases setting `max_extension` less than `extend_to` for temporary entry would be a mistake, as typically lifetime of temporary entries is very sensitive and must be set precisely (for example, temporary nonce entries must not be archived until the respective signature has expired). Protocol could make setting `max_extension` lower than necessary to reach `extend_to` an error, but there may be a small fraction of use cases where the ability to use lower `max_extension` is actually desired. This is consistent with how `threshold` is treated for the old TTL extension functions: an extension may be skipped for certain `threshold` values.
-
-The justification for failing when the *network* limit is exceeded is that the network limit might change and contract generally can't know about that, which is why it conservatively aborts execution. However, in case of the user-provided arguments there is less motivation for the protocol to make assumptions on behalf of the users.
-
-SDK harness may be provided to reduce possibility of an error when using extension for the temporary entries.
+For most of the use cases setting `max_extension` less than `extend_to` for
+temporary entry would be a mistake, as typically lifetime of temporary entries
+is very sensitive and must be set precisely (for example, temporary nonce
+entries must not be archived until the respective signature has expired).
+Protocol could make setting `max_extension` lower than necessary to reach
+`extend_to` an error, but there may be a small fraction of use cases where the
+ability to use lower `max_extension` is actually desired. This is consistent
+with how `threshold` is treated for the old TTL extension functions: an
+extension may be skipped for certain `threshold` values.
+
+The justification for failing when the _network_ limit is exceeded is that the
+network limit might change and contract generally can't know about that, which
+is why it conservatively aborts execution. However, in case of the
+user-provided arguments there is less motivation for the protocol to make
+assumptions on behalf of the users.
+
+SDK harness may be provided to reduce possibility of an error when using
+extension for the temporary entries.
### `extend_contract_instance_and_code_ttl_v2` using enum to identify extension
-The protocol has already accumulated several functions for extending both contract code and instance, and one of code and instance separately. In order to limit the host interface bloat and also to reduce the number of the necessary host function imports, all these variants were condensed into a single function with an additional enum argument to specify the scope of the extension.
+The protocol has already accumulated several functions for extending both
+contract code and instance, and one of code and instance separately. In order
+to limit the host interface bloat and also to reduce the number of the
+necessary host function imports, all these variants were condensed into a
+single function with an additional enum argument to specify the scope of the
+extension.
## Protocol Upgrade Transition
### Backwards Incompatibilities
-This CAP does not introduce any backward incompatibilities. The existing TTL extension host functions will still be supported in all the future protocols as to not break the existing contracts.
+This CAP does not introduce any backward incompatibilities. The existing TTL
+extension host functions will still be supported in all the future protocols as
+to not break the existing contracts.
### Resource Utilization
-Heavy use of `max_extension` may lead to increase of TTL writes for the protocols that use it, but the overall expected impact should be low. TTL write fees can be increased if necessary in order to encourage lowering the TTL write frequency.
+Heavy use of `max_extension` may lead to increase of TTL writes for the
+protocols that use it, but the overall expected impact should be low. TTL write
+fees can be increased if necessary in order to encourage lowering the TTL write
+frequency.
## Security Concerns
diff --git a/core/cap-0079.md b/core/cap-0079.md
index fc802ab79..b97d7767d 100644
--- a/core/cap-0079.md
+++ b/core/cap-0079.md
@@ -13,7 +13,8 @@ Protocol version: 26
## Simple Summary
-Introduce host functions for converting Stellar strkey format strings to/from Address/MuxedAddress objects.
+Introduce host functions for converting Stellar strkey format strings to/from
+Address/MuxedAddress objects.
## Working Group
@@ -21,25 +22,41 @@ As specified in the Preamble.
## Motivation
-Stellar protocol provides host functions for converting the account and contract Stellar [SEP-23](../ecosystem/sep-0023.md) strkeys to/from the corresponding `AddressObject`. In protocol 23 a new `MuxedAddressObject` type has been added for supporting muxed account addresses ([CAP-67](./cap-0067.md#muxedaddressobject-host-object)). However, the corresponding strkey conversion functions haven't been added.
-
-The use case for the missing conversions is the same as for the original strkey conversion functions: unlike Stellar smart contract specific `ScVal/ScAddress` XDR types, strkeys provide a portable and user-friendly way of supporting Stellar addresses on other chains. That's preferred way for cross-chain protocols (such as bridges) to communicate with the Stellar chain. Muxed address support is necessary to allow these protocols to easily send tokens to muxed destinations across chains.
+Stellar protocol provides host functions for converting the account and
+contract Stellar [SEP-23](../ecosystem/sep-0023.md) strkeys to/from the
+corresponding `AddressObject`. In protocol 23 a new `MuxedAddressObject` type
+has been added for supporting muxed account addresses
+([CAP-67](./cap-0067.md#muxedaddressobject-host-object)). However, the
+corresponding strkey conversion functions haven't been added.
+
+The use case for the missing conversions is the same as for the original strkey
+conversion functions: unlike Stellar smart contract specific `ScVal/ScAddress`
+XDR types, strkeys provide a portable and user-friendly way of supporting
+Stellar addresses on other chains. That's preferred way for cross-chain
+protocols (such as bridges) to communicate with the Stellar chain. Muxed
+address support is necessary to allow these protocols to easily send tokens to
+muxed destinations across chains.
### Goals Alignment
This CAP is aligned with the following Stellar Network Goals:
- - The Stellar Network should make it easy for developers of Stellar projects to create highly usable products
+- The Stellar Network should make it easy for developers of Stellar projects to
+ create highly usable products
## Abstract
-New host functions `strkey_to_muxed_address` and `muxed_address_to_strkey` for performing the conversions. These functions are similar to the existing `strkey_to_address`/`address_to_strkey`, but they can have a `MuxedAddressObject` as an output or input respectively.
+New host functions `strkey_to_muxed_address` and `muxed_address_to_strkey` for
+performing the conversions. These functions are similar to the existing
+`strkey_to_address`/`address_to_strkey`, but they can have a
+`MuxedAddressObject` as an output or input respectively.
## Specification
### New host functions
-The diff is based on commit `30ab5d1e2a642f18f3ae94cdb3a2798c3123f049` of `rs-soroban-env`.
+The diff is based on commit `30ab5d1e2a642f18f3ae94cdb3a2798c3123f049` of
+`rs-soroban-env`.
```diff mddiffcheck.ignore=true
diff --git a/soroban-env-common/env.json b/soroban-env-common/env.json
@@ -85,42 +102,60 @@ index 945ada2c..b8d76d4d 100644
#### `muxed_address_to_strkey` host function
-`muxed_address_to_strkey` accepts a single `Val` argument that can be either `AddressObject`, or `MuxedAddressObject`. `AddressObject` will be converted to an account (`G...`) or contract (`C...`) strkey corresponding to the address. `MuxedAddressObject` will be converted to a muxed account strkey (`M...`).
+`muxed_address_to_strkey` accepts a single `Val` argument that can be either
+`AddressObject`, or `MuxedAddressObject`. `AddressObject` will be converted to
+an account (`G...`) or contract (`C...`) strkey corresponding to the address.
+`MuxedAddressObject` will be converted to a muxed account strkey (`M...`).
The function traps if a `Val` of invalid type is provided.
-
#### `strkey_to_muxed_address` host function
-`strkey_to_muxed_address` accepts a single `Val` containing strkey in `BytesObject` or `StringObject`.
+`strkey_to_muxed_address` accepts a single `Val` containing strkey in
+`BytesObject` or `StringObject`.
-Valid `G...` and `C...` strkeys will be converted to corresponding `AddressObject`. Valid `M...` strkeys will be converted to corresponding `MuxedAddressObject`.
+Valid `G...` and `C...` strkeys will be converted to corresponding
+`AddressObject`. Valid `M...` strkeys will be converted to corresponding
+`MuxedAddressObject`.
-The function traps if a `Val` of invalid type is provided, or if the input doesn't contain a valid strkey of one of the 3 types specified above.
+The function traps if a `Val` of invalid type is provided, or if the input
+doesn't contain a valid strkey of one of the 3 types specified above.
## Design Rationale
### New functions vs update to the old functions
-There is a potential possibility of updating the interface of the existing host functions instead of introducing new ones.
+There is a potential possibility of updating the interface of the existing host
+functions instead of introducing new ones.
-For `strkey_to_address` if `M...` strkey is passed, the contract would fail inside the host function in the older protocols, and fail when trying to interpret the return value as `AddressObject` in the new protocol. Both would result in the contract trapping.
+For `strkey_to_address` if `M...` strkey is passed, the contract would fail
+inside the host function in the older protocols, and fail when trying to
+interpret the return value as `AddressObject` in the new protocol. Both would
+result in the contract trapping.
-For `address_to_strkey` `MuxedAddressObject` generally wouldn't be passed, so the function behavior would remain the same.
+For `address_to_strkey` `MuxedAddressObject` generally wouldn't be passed, so
+the function behavior would remain the same.
-Even though the change seems safe for the base scenarios, there may be more subtle dependencies for the implementation of the existing functions (e.g. if a polymorphic `Val` is expected/used), and thus we err on the side of caution here and leave the semantics of the existing functions as is.
+Even though the change seems safe for the base scenarios, there may be more
+subtle dependencies for the implementation of the existing functions (e.g. if a
+polymorphic `Val` is expected/used), and thus we err on the side of caution
+here and leave the semantics of the existing functions as is.
-The SDKs may just switch to v2 functions unconditionally for the future contracts.
+The SDKs may just switch to v2 functions unconditionally for the future
+contracts.
## Protocol Upgrade Transition
### Backwards Incompatibilities
-This CAP does not introduce any backward incompatibilities. The existing strkey conversion functions will still be supported as is.
+This CAP does not introduce any backward incompatibilities. The existing strkey
+conversion functions will still be supported as is.
### Resource Utilization
-The new host functions will be metered in the same fashion as the existing strkey conversion functions. The metering can rely on the existing cost types and does not require additional benchmarking and calibration.
+The new host functions will be metered in the same fashion as the existing
+strkey conversion functions. The metering can rely on the existing cost types
+and does not require additional benchmarking and calibration.
## Security Concerns
diff --git a/core/cap-0080.md b/core/cap-0080.md
index b4163393b..cbe92b937 100644
--- a/core/cap-0080.md
+++ b/core/cap-0080.md
@@ -15,7 +15,8 @@ Protocol version: 26
## Simple Summary
-This CAP adds host functions for BN254 MSM, BN254 modular arithmetic, and curve membership checks for BLS12-381 and BN254.
+This CAP adds host functions for BN254 MSM, BN254 modular arithmetic, and curve
+membership checks for BLS12-381 and BN254.
## Working Group
@@ -23,26 +24,41 @@ As described in the preamble section.
## Motivation
-Stellar has host support for the BN254 functions available on the EVM (G1 Add, G1 Mul, and Pairing), but there are use cases that require additional functionality.
+Stellar has host support for the BN254 functions available on the EVM (G1 Add,
+G1 Mul, and Pairing), but there are use cases that require additional
+functionality.
-Contracts need modular arithmetic operations on scalar field elements (Fr) and cannot afford to implement these on the guest side. Adding host functions for add, sub, mul, pow, and inv enables these use cases.
+Contracts need modular arithmetic operations on scalar field elements (Fr) and
+cannot afford to implement these on the guest side. Adding host functions for
+add, sub, mul, pow, and inv enables these use cases.
-For use cases that require many G1 additions and scalar multiplications, the cost of repeatedly converting points and scalars between their external encoding and the internal representation can be expensive. Each call to `bn254_g1_add` or `bn254_g1_mul` requires this conversion. With MSM, the conversion happens only once at the beginning, all intermediate operations occur in internal form, and only the final result is converted back. This significantly reduces the overall cost.
-
-Some ZK applications need to verify that a point lies on the curve to validate user input. Adding `is_on_curve` functions for BLS12-381 G1, BLS12-381 G2, and BN254 G1 provides a cheaper way to do this.
+For use cases that require many G1 additions and scalar multiplications, the
+cost of repeatedly converting points and scalars between their external
+encoding and the internal representation can be expensive. Each call to
+`bn254_g1_add` or `bn254_g1_mul` requires this conversion. With MSM, the
+conversion happens only once at the beginning, all intermediate operations
+occur in internal form, and only the final result is converted back. This
+significantly reduces the overall cost.
+Some ZK applications need to verify that a point lies on the curve to validate
+user input. Adding `is_on_curve` functions for BLS12-381 G1, BLS12-381 G2, and
+BN254 G1 provides a cheaper way to do this.
### Goals Alignment
+
This CAP is aligned with the following Stellar Network Goals:
-* The Stellar Network should run at scale and at low cost to all participants of the network.
+- The Stellar Network should run at scale and at low cost to all participants
+ of the network.
## Abstract
+
Nine new host functions are proposed here.
## Specification
### New host functions
+
```
{
"export": "r",
@@ -142,6 +158,7 @@ Nine new host functions are proposed here.
```
### XDR changes
+
```
diff --git a/Stellar-contract-config-setting.x b/Stellar-contract-config-setting.x
index 9a95937da..f1b8a3a78 100644
@@ -156,100 +173,133 @@ index 9a95937da..f1b8a3a78 100644
+ // Cost of performing BN254 G1 multi-scalar multiplication (MSM)
+ Bn254G1Msm = 85
};
-
+
struct ContractCostParamEntry {
```
### Semantics
#### Field and groups
-See [CAP-0074](./cap-0074.md#field-and-groups) for definitions of the BN254 fields and groups. See [CAP-0059](./cap-0059.md#field-and-groups) for definitions of the BLS12-381 fields and groups.
+
+See [CAP-0074](./cap-0074.md#field-and-groups) for definitions of the BN254
+fields and groups. See [CAP-0059](./cap-0059.md#field-and-groups) for
+definitions of the BLS12-381 fields and groups.
#### New host functions introduced
##### `bls12_381_g1_is_on_curve`
-**Description**: checks if a BLS12-381 G1 point is on the curve (does not check subgroup membership).
+**Description**: checks if a BLS12-381 G1 point is on the curve (does not check
+subgroup membership).
+
+**Cost**: includes decoding of the G1 point (`Bls12381DecodeFp`) and the on
+curve check (`Bls12381G1CheckPointOnCurve`).
-**Cost**: includes decoding of the G1 point (`Bls12381DecodeFp`) and the on curve check (`Bls12381G1CheckPointOnCurve`).
+**Error condition**: if the input `BytesObject` does not decode into a valid
+point:
-**Error condition**: if the input `BytesObject` does not decode into a valid point:
- Bytes length is not equal to 96
- The compression flag (the most significant bit) is set.
-- The infinity flag (the second most significant bit) is set, but the remaining bits are *not* all zero.
+- The infinity flag (the second most significant bit) is set, but the remaining
+ bits are _not_ all zero.
- The sort flag (the third most significant bit) is set.
-**Return value**: returns `true` if the point is on the curve, `false` otherwise.
+**Return value**: returns `true` if the point is on the curve, `false`
+otherwise.
##### `bls12_381_g2_is_on_curve`
-**Description**: checks if a BLS12-381 G2 point is on the curve (does not check subgroup membership).
+**Description**: checks if a BLS12-381 G2 point is on the curve (does not check
+subgroup membership).
-**Cost**: includes decoding of the G2 point (`Bls12381DecodeFp`) and the on curve check (`Bls12381G2CheckPointOnCurve`).
+**Cost**: includes decoding of the G2 point (`Bls12381DecodeFp`) and the on
+curve check (`Bls12381G2CheckPointOnCurve`).
+
+**Error condition**: if the input `BytesObject` does not decode into a valid
+point:
-**Error condition**: if the input `BytesObject` does not decode into a valid point:
- Bytes length is not equal to 192
- The compression flag (the most significant bit) is set.
-- The infinity flag (the second most significant bit) is set, but the remaining bits are *not* all zero.
+- The infinity flag (the second most significant bit) is set, but the remaining
+ bits are _not_ all zero.
- The sort flag (the third most significant bit) is set.
-**Return value**: returns `true` if the point is on the curve, `false` otherwise.
+**Return value**: returns `true` if the point is on the curve, `false`
+otherwise.
##### `bn254_g1_is_on_curve`
**Description**: checks if a BN254 G1 point is on the curve.
-**Cost**: includes decoding of the G1 point (`Bn254DecodeFp`) and the on curve check (`Bn254G1CheckPointOnCurve`).
+**Cost**: includes decoding of the G1 point (`Bn254DecodeFp`) and the on curve
+check (`Bn254G1CheckPointOnCurve`).
+
+**Error condition**: if the input `BytesObject` does not decode into a valid
+point:
-**Error condition**: if the input `BytesObject` does not decode into a valid point:
- Bytes length is not equal to 64
- The point is compressed
-**Return value**: returns `true` if the point is on the curve, `false` otherwise.
+**Return value**: returns `true` if the point is on the curve, `false`
+otherwise.
##### `bn254_g1_msm`
**Description**: perform multi-scalar-multiplication (MSM) in G1.
-**Cost**: includes decoding of the G1 vector (`Bn254DecodeFp`), converting `fr` from `U256` (`Bn254FrFromU256`), the MSM operation `Bn254G1Msm`, converting the point from projective to affine (`Bn254G1ProjectiveToAffine`), and encoding of the resulting G1 point (`Bn254EncodeFp`).
+**Cost**: includes decoding of the G1 vector (`Bn254DecodeFp`), converting `fr`
+from `U256` (`Bn254FrFromU256`), the MSM operation `Bn254G1Msm`, converting the
+point from projective to affine (`Bn254G1ProjectiveToAffine`), and encoding of
+the resulting G1 point (`Bn254EncodeFp`).
-**Error condition**:
-1. if the two vectors have different lengths
+**Error condition**:
+
+1. if the two vectors have different lengths
2. if the length of either vector is zero
-3. if any point in the G1 points vector does not decode into a valid G1 point or does not conform to the specified encoding standard:
+3. if any point in the G1 points vector does not decode into a valid G1 point
+ or does not conform to the specified encoding standard:
+
- Bytes length is not equal to 64
- The point is compressed
- The input point does not belong on the G1 curve
##### `bn254_fr_add`
-**Description**: performs addition `(lhs + rhs) mod r` between two BN254 scalar elements (Fr).
+**Description**: performs addition `(lhs + rhs) mod r` between two BN254 scalar
+elements (Fr).
-**Cost**: conversion of fr from U256 (`Bn254FrFromU256`), scalar addition `Bn254FrAddSub`, and conversion back to U256 (`Bn254FrToU256`).
+**Cost**: conversion of fr from U256 (`Bn254FrFromU256`), scalar addition
+`Bn254FrAddSub`, and conversion back to U256 (`Bn254FrToU256`).
**Error condition**: None
##### `bn254_fr_sub`
-**Description**: performs subtraction `(lhs - rhs) mod r` between two BN254 scalar elements (Fr).
+**Description**: performs subtraction `(lhs - rhs) mod r` between two BN254
+scalar elements (Fr).
-**Cost**: conversion of fr from U256 (`Bn254FrFromU256`), scalar subtraction `Bn254FrAddSub`, and conversion back to U256 (`Bn254FrToU256`).
+**Cost**: conversion of fr from U256 (`Bn254FrFromU256`), scalar subtraction
+`Bn254FrAddSub`, and conversion back to U256 (`Bn254FrToU256`).
**Error condition**: None
##### `bn254_fr_mul`
-**Description**: performs multiplication `(lhs * rhs) mod r` between two BN254 scalar elements (Fr).
+**Description**: performs multiplication `(lhs * rhs) mod r` between two BN254
+scalar elements (Fr).
-**Cost**: conversion of fr from U256 (`Bn254FrFromU256`), scalar multiplication `Bn254FrMul`, and conversion back to U256 (`Bn254FrToU256`).
+**Cost**: conversion of fr from U256 (`Bn254FrFromU256`), scalar multiplication
+`Bn254FrMul`, and conversion back to U256 (`Bn254FrToU256`).
**Error condition**: None
##### `bn254_fr_pow`
-**Description**: performs exponentiation `lhs.exp(rhs) mod r` between a BN254 scalar element (Fr) and a u64 exponent.
+**Description**: performs exponentiation `lhs.exp(rhs) mod r` between a BN254
+scalar element (Fr) and a u64 exponent.
-**Cost**: conversion of fr from U256 (`Bn254FrFromU256`), scalar exponentiation `Bn254FrPow`, and conversion back to U256 (`Bn254FrToU256`).
+**Cost**: conversion of fr from U256 (`Bn254FrFromU256`), scalar exponentiation
+`Bn254FrPow`, and conversion back to U256 (`Bn254FrToU256`).
**Error condition**: None
@@ -257,38 +307,55 @@ See [CAP-0074](./cap-0074.md#field-and-groups) for definitions of the BN254 fiel
**Description**: performs inversion of a BN254 scalar element (Fr).
-**Cost**: conversion of fr from U256 (`Bn254FrFromU256`), scalar inversion `Bn254FrInv`, and conversion back to U256 (`Bn254FrToU256`).
+**Cost**: conversion of fr from U256 (`Bn254FrFromU256`), scalar inversion
+`Bn254FrInv`, and conversion back to U256 (`Bn254FrToU256`).
**Error condition**: if the provided input `fr` is zero.
#### New metering `CostType` introduced
-- `Bn254G1Msm` - Cost of performing BN254 G1 multi-scalar multiplication (MSM). Type: linear w.r.t the length of the input vectors.
+- `Bn254G1Msm` - Cost of performing BN254 G1 multi-scalar multiplication (MSM).
+ Type: linear w.r.t the length of the input vectors.
## Design Rationale
-### Adding BN254 specific host functions instead of more general ones.
+### Adding BN254 specific host functions instead of more general ones.
-The BN254 specific modular arithmetic functions (add, sub, mul, pow, inv) were chosen because they map directly to operations provided by the Arkworks library, which is used in the host implementation. This direct mapping simplifies metering since each host function corresponds to a well-defined Arkworks operation with predictable performance characteristics.
+The BN254 specific modular arithmetic functions (add, sub, mul, pow, inv) were
+chosen because they map directly to operations provided by the Arkworks
+library, which is used in the host implementation. This direct mapping
+simplifies metering since each host function corresponds to a well-defined
+Arkworks operation with predictable performance characteristics.
-The alternative would be to add host functions that take the modulus as an input, but the metering and implementation would be more complex. We also already have BLS12-381 specific arithmetic functions, so the BN254 ones bring the two closer to feature parity.
+The alternative would be to add host functions that take the modulus as an
+input, but the metering and implementation would be more complex. We also
+already have BLS12-381 specific arithmetic functions, so the BN254 ones bring
+the two closer to feature parity.
-If we think the general functions will be helpful in the future, we can add those in later.
+If we think the general functions will be helpful in the future, we can add
+those in later.
## Protocol Upgrade Transition
+
The proposed host functions will become available in protocol 26.
### Backwards Incompatibilities
+
This CAP does not introduce any backward incompatibilities.
### Resource Utilization
-The only new cost type is for BN254 G1 MSM, which we will calibrate. The `is_on_curve` functions reuse existing cost types.
+
+The only new cost type is for BN254 G1 MSM, which we will calibrate. The
+`is_on_curve` functions reuse existing cost types.
## Security Concerns
+
- Proper metering to avoid a Denial of Service.
## Test Cases
+
TODO
## Implementation
-TODO
\ No newline at end of file
+
+TODO
diff --git a/ecosystem/README.md b/ecosystem/README.md
index ac7546512..15c6095c0 100644
--- a/ecosystem/README.md
+++ b/ecosystem/README.md
@@ -1,7 +1,7 @@
# Stellar Ecosystem Proposals (SEPs)
SEPs are ideas, standards, and specifications in the form of proposals that the
-author is intending to be adopted by participants in the Stellar ecosystem.
+author would like adopted by participants in the Stellar ecosystem.
## Roles
@@ -12,7 +12,7 @@ All SEPs have individuals fulfilling the following roles:
and the general success of the SEP.
- **Maintainer** - The maintainer is optional. If not present, the maintainer
is the author. The maintainer is responsible for reviewing changes to the
- SEP. For SEPs that have ecosystem adoption, SDF may identify or become a
+ SEP. For SEPs that have ecosystem adoption, the SDF may identify or become a
maintainer of last resort. A maintainer of last resort steps in and acts as
the maintainer if the maintainer ceases to respond or engage.
@@ -21,18 +21,18 @@ All SEPs have individuals fulfilling the following roles:
- **Draft** - A SEP that is currently open for consideration, iteration and
actively being discussed. It may change.
- **FCP** - A SEP that has entered a Final Comment Period (FCP). An author
- places their SEP in FCP when they wish to signal that they plan to cease
- making changes. After at least one week has passed the SEP's status should
- move to `Active` or `Final`, or back to `Draft`. If changes are required, it
- should be moved back to `Draft`.
+ places their SEP in FCP to signal that they plan to cease making changes. The
+ author also decides whether they want a SEP considered for `Active` or
+ `Final` status. After at least one week passes for review and discussion, the
+ SEP's status either moves as elected or back to `Draft`, if it needs changes.
- **Active** - A SEP ready to be adopted, and the proposal is a living document
- and may still receive changes. The author intends the SEP in its current form
- to be actively adopted. Changes can be made without changing the SEP number,
- although in the interest of growing an ecosystem of interopable participants
- the author should endeavor to make changes backwards compatible so that
- participants who have already adopted the SEP can continue to participate.
- Where changes cannot be backwards compatible, the major version should be
- updated to clearly distinguish new incompatible versions.
+ which may still receive changes. The author intends the SEP in its current
+ form to be actively adopted. Changes can be made without changing the SEP
+ number, although in the interest of growing an ecosystem of interoperable
+ participants the author should endeavor to make changes backwards compatible
+ so that participants who have already adopted the SEP can continue to
+ participate. Where changes cannot be backwards compatible, the major version
+ should be updated to clearly distinguish new incompatible versions.
- **Final** - A SEP ready to be adopted, and the proposal is an immutable
document and will no longer receive changes, other than minor errata. The
author intends to make no further changes. Adopters can expect significant
@@ -40,9 +40,9 @@ All SEPs have individuals fulfilling the following roles:
### Additional Statuses
-- **Abandoned** - A SEP has been abandoned by the author. SDF may move a SEP
- into this state if the SEP has no activity, no visible adoption, and the
- author is not responsive.
+- **Abandoned** - A SEP has been abandoned by the author. The SDF may approve
+ moving a SEP into this state if the SEP has no activity, no visible adoption,
+ and the author is not responsive.
## Proposals
@@ -112,18 +112,17 @@ All SEPs have individuals fulfilling the following roles:
# Contribution Process
-The Stellar Ecosystem, like most software ecosystems in the world, continues to
+The Stellar ecosystem, like most software ecosystems in the world, continues to
evolve over time to meet the needs of our network's participants and to drive
technology forward into new territory.
-Unlike Stellar's Core development (CAPs), Stellar's Ecosystem Proposals are
-intended to be a more dynamic way of introducing standards and protocols
-utilized in the ecosystem that are built on top of the Stellar Network. It uses
-a lightweight process.
+Unlike Stellar's Core development (CAPs), Stellar's Ecosystem Proposals are a
+more dynamic way of introducing standards and protocols used in the ecosystem
+and built on top of the network. Thus it uses a lightweight, pragmatic process.
-A SEPs author is responsible for a proposals adoption. Other ecosystem
-participants, including SDF, may encourage adoption of a proposal, but authors
-should expect each proposal to stand on its own merits and authors and
+A SEP's author is responsible for a proposal's adoption. Other ecosystem
+participants, including the SDF, may encourage adoption of a proposal, but
+authors should expect each proposal to stand on its own merits. Authors and
maintainers should plan to drive adoption themselves.
Before contributing, consider the following:
@@ -132,7 +131,7 @@ Before contributing, consider the following:
Dev Discord], or [stellar-dev mailing list], and utilize it to begin a draft
proposal.
- Follow the proposal process listed below. If you're having difficulty moving
- the proposal forward, talk to folks in the ecosystem, or folks at SDF;
+ the proposal forward, talk to folks in the ecosystem, or folks at the SDF;
they'll often have guidance on how to move things forward, as well as
feedback regarding feasibility and how the proposal does or does not align
with the Stellar Network's goals.
@@ -141,8 +140,9 @@ Before contributing, consider the following:
### Pre-SEP (Initial Discussion)
-Introduce your idea on the [GitHub discussion forum], [Stellar Dev Discord], or
-[stellar-dev mailing list] and other community forums dedicated to Stellar.
+Introduce your idea on the [GitHub discussion forum], [Stellar Dev Discord],
+[stellar-dev mailing list], or any other community conversations spaces and
+boards dedicated to Stellar.
- Make sure to gather feedback and alternative ideas — it's useful before
putting together a formal draft!
@@ -180,16 +180,17 @@ following:
- If your SEP requires images or other supporting files, they should be
included in a subdirectory of the `contents` folder for that SEP, such as
`contents/sep_happycoder_b274f73c/`. Links should be relative, for example a
- link to an image from SEP-X would be
+ link to an image from `SEP-X` would be
`../contents/sep_happycoder_b274f73c/image.png`.
### Draft: Further Iteration
-From there, the following process will happen:
+Next, the SDF recommends an iterative-feedback process with the community:
-- You should continue the discussion of the draft SEP on the [GitHub discussion
- forum], [Stellar Dev Discord], or [stellar-dev mailing list] to gather
- additional feedback. We welcome any additional PRs that iterate on the draft.
+- Successful authors polish their designs with help from SEP users through a
+ discussion of the draft SEP on the [GitHub discussion forum], [Stellar Dev
+ Discord], or [stellar-dev mailing list] to gather additional feedback. The
+ SDF also welcomes any additional ecosystem PRs that iterate on the draft.
- Keep the version of the SEP as a `v0.y.z` version while in draft.
- Increment the minor or patch versions on each change while in draft. See [SEP
Versioning].
diff --git a/ecosystem/sep-0023.md b/ecosystem/sep-0023.md
index 8bbca61ec..0e5cdd513 100644
--- a/ecosystem/sep-0023.md
+++ b/ecosystem/sep-0023.md
@@ -125,7 +125,6 @@ security problems.
### Valid test cases
1. Valid non-multiplexed account
-
- Strkey `GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVSGZ`
- type: `KEY_TYPE_ED25519`
- Binary `MuxedAccount`:
@@ -141,7 +140,6 @@ security problems.
```
1. Valid multiplexed account
-
- Strkey:
`MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUQ`
- type: `KEY_TYPE_MUXED_ED25519`
@@ -162,7 +160,6 @@ security problems.
1. Valid multiplexed account in which unsigned id exceeds maximum signed 64-bit
integer
-
- Strkey:
`MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK`
- type: `KEY_TYPE_MUXED_ED25519`
@@ -182,7 +179,6 @@ security problems.
```
1. Valid signed payload with an ed25519 public key and a 32-byte payload.
-
- Strkey:
`PA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAQACAQDAQCQMBYIBEFAWDANBYHRAEISCMKBKFQXDAMRUGY4DUPB6IBZGM`
- type: `KEY_TYPE_ED25519_SIGNED_PAYLOAD`
@@ -192,7 +188,6 @@ security problems.
1. Valid signed payload with an ed25519 public key and a 29-byte payload which
becomes zero padded.
-
- Strkey:
`PA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAOQCAQDAQCQMBYIBEFAWDANBYHRAEISCMKBKFQXDAMRUGY4DUAAAAFGBU`
- type: `KEY_TYPE_ED25519_SIGNED_PAYLOAD`
@@ -200,7 +195,6 @@ security problems.
- payload: 0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d (hex)
1. Valid contract
-
- Strkey `CA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUWDA`
- type: `HASH_TYPE_SHA256`
- hash:
@@ -215,7 +209,6 @@ security problems.
```
1. Valid liquidity pool address
-
- Strkey `LA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUPJN`
- type: `HASH_TYPE_SHA256`
- hash:
@@ -230,7 +223,6 @@ security problems.
```
1. Valid claimable balance address
-
- Strkey `BAAD6DBUX6J22DMZOHIEZTEQ64CVCHEDRKWZONFEUL5Q26QD7R76RGR4TU`
- type: `HASH_TYPE_SHA256`
- claimable balance type: `v0`
@@ -248,74 +240,59 @@ security problems.
### Invalid test cases
1. Invalid length (Ed25519 should be 32 bytes, not 5)
-
- Strkey: `GAAAAAAAACGC6`
1. The unused trailing bit must be zero in the encoding of the last three bytes
(24 bits) as five base-32 symbols (25 bits)
-
- Strkey:
`MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUR`
1. Invalid length (congruent to 1 mod 8)
-
- Strkey: `GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVSGZA`
1. Invalid length (base-32 decoding should yield 35 bytes, not 36)
-
- Strkey: `GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUACUSI`
1. Invalid algorithm (low 3 bits of version byte are 7)
-
- Strkey: `G47QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVP2I`
1. Invalid length (congruent to 6 mod 8)
-
- Strkey:
`MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLKA`
1. Invalid length (base-32 decoding should yield 43 bytes, not 44)
-
- Strkey:
`MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAAV75I`
1. Invalid algorithm (low 3 bits of version byte are 7)
-
- Strkey:
`M47QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUQ`
1. Padding bytes are not allowed
-
- Strkey:
`MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUK===`
1. Invalid checksum
-
- Strkey:
`MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUO`
1. Length prefix specifies length that is shorter than payload in signed
payload
-
- Strkey:
`PA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAQACAQDAQCQMBYIBEFAWDANBYHRAEISCMKBKFQXDAMRUGY4DUPB6IAAAAAAAAPM`
1. Length prefix specifies length that is longer than payload in signed payload
-
- Strkey:
`PA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAOQCAQDAQCQMBYIBEFAWDANBYHRAEISCMKBKFQXDAMRUGY4Z2PQ`
1. No zero padding in signed payload
-
- Strkey:
`PA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAOQCAQDAQCQMBYIBEFAWDANBYHRAEISCMKBKFQXDAMRUGY4DXFH6`
1. The unused trailing 2-bits must be zero in the encoding of the last symbol.
-
- Strkey: `BAAD6DBUX6J22DMZOHIEZTEQ64CVCHEDRKWZONFEUL5Q26QD7R76RGR4TV`
1. Invalid claimable balance type (first byte of binary key is not 0)
-
- Strkey: `BAAT6DBUX6J22DMZOHIEZTEQ64CVCHEDRKWZONFEUL5Q26QD7R76RGXACA`
You can paste these invalid strkeys more conveniently into a unit test using
diff --git a/limits/README.md b/limits/README.md
index de67f4429..8ce1cdd20 100644
--- a/limits/README.md
+++ b/limits/README.md
@@ -2,51 +2,105 @@
## Summary
-This document list the considerations that go into smart contract resource limit updates (mostly increases) and introduces a proposal process for performing the resource limit updates, similar to the CAP/SEP processes.
+This document list the considerations that go into smart contract resource
+limit updates (mostly increases) and introduces a proposal process for
+performing the resource limit updates, similar to the CAP/SEP processes.
## Limits Overview
-Smart contract execution on Stellar is guarded by various resource limits that ensure that:
+Smart contract execution on Stellar is guarded by various resource limits that
+ensure that:
- a transaction doesn't consume more resources than it requested
-- a transaction can't request more resources than allowed by the protocol (per-transaction resource limit)
-- cumulative resources in a given ledger don't exceed the value defined by the protocol (ledger-wide resource limit)
+- a transaction can't request more resources than allowed by the protocol
+ (per-transaction resource limit)
+- cumulative resources in a given ledger don't exceed the value defined by the
+ protocol (ledger-wide resource limit)
-The resource and limit semantics are specified in detail in [CAP-46-07](../core/cap-0046-07.md).
+The resource and limit semantics are specified in detail in
+[CAP-46-07](../core/cap-0046-07.md).
## Reducing the limits
-While the focus of this document is limit increases, it’s important to mention when and why the limits can be decreased.
-
-The reason for the decrease should be network-wide emergency (e.g. significant ledger close time slow-down or nodes getting out of sync). While ideally we’d like to avoid that and do benchmarking and testing, there is always a chance that the behavior in the real network differs from any test performed.
-
-The ledger-wide limits technically can be reduced down to at least the respective per-transaction limit, but since the main reason for decrease is likely to be reverting an overly optimistic increase, these would mostly just be rollbacks. Another prominent reason for decreasing the ledger-wide limits is the potential for the ledger close latency decrease, i.e. if the network will close more ledgers per unit of time, but every ledger includes less transactions than before. In either scenario the proposal is likely to be exempt from the process described in this document.
-
-The per-transaction limits should normally not be decreased at all, making ***per-transaction limit increases basically irreversible***. As a consequence, ***per-transaction limit increases must be done fairly conservatively and thoughtfully***. The reason for this is that after the per-transaction limit is increased, anyone can start sending larger transactions to the network and build protocols that rely on the increased limits. Thus reducing the limit will likely break a number of protocols, which is something that the network should try to avoid at all costs.
-
-Reduction of the per-transaction limit should be viewed as the ‘last resort’ measure in case if it causes serious network issues that can’t be quickly resolved.
+While the focus of this document is limit increases, it’s important to mention
+when and why the limits can be decreased.
+
+The reason for the decrease should be network-wide emergency (e.g. significant
+ledger close time slow-down or nodes getting out of sync). While ideally we’d
+like to avoid that and do benchmarking and testing, there is always a chance
+that the behavior in the real network differs from any test performed.
+
+The ledger-wide limits technically can be reduced down to at least the
+respective per-transaction limit, but since the main reason for decrease is
+likely to be reverting an overly optimistic increase, these would mostly just
+be rollbacks. Another prominent reason for decreasing the ledger-wide limits is
+the potential for the ledger close latency decrease, i.e. if the network will
+close more ledgers per unit of time, but every ledger includes less
+transactions than before. In either scenario the proposal is likely to be
+exempt from the process described in this document.
+
+The per-transaction limits should normally not be decreased at all, making
+**_per-transaction limit increases basically irreversible_**. As a consequence,
+**_per-transaction limit increases must be done fairly conservatively and
+thoughtfully_**. The reason for this is that after the per-transaction limit is
+increased, anyone can start sending larger transactions to the network and
+build protocols that rely on the increased limits. Thus reducing the limit will
+likely break a number of protocols, which is something that the network should
+try to avoid at all costs.
+
+Reduction of the per-transaction limit should be viewed as the ‘last resort’
+measure in case if it causes serious network issues that can’t be quickly
+resolved.
## Key considerations for increasing the limits
### Why increase the limits?
-The increase in resources should generally be motivated by demand. However, the notion of demand differs for ledger-wide and per-transaction limits. Note, that ‘demand’ doesn’t mean that a limit will be increased, it’s just a motivation to consider the increase at all.
-
-- For ledger-wide limits, *the demand can be identified by sufficiently high surge pricing rate due to transactions not fitting into a certain limit*, or at least a significant percentage of ledgers being near the resource limit
- - This is a reactive approach. There is a possibility that Core team might need to take a proactive approach and increase the limits in preparation for launching a protocol that is expected to have high TPS (i.e. not in response to the existing network activity). This should rather be an exception though
- - When the per-transaction limits are increased, it might be necessary to also increase the respective ledger-wide limits in order to maintain a high enough ratio between the limits
-- For per-transaction limits, the demand is based on the needs of the protocols that run on Soroban.
- - It’s not realistic to support each and every possible protocol, but in general Soroban should provide the capabilities for implementing the majority of protocols that exist on the other blockchains, with as little exceptions as possible
- - More specifically, we should first and foremost looks for the protocols that align with the Stellar chain goals like any other permanent protocol changes (see CAP process)
- - Contract developers have some room for optimization. That said, it would be preferential to have optimization just impact the costs and not to be a hard requirement for doing more complex things on Soroban
+The increase in resources should generally be motivated by demand. However, the
+notion of demand differs for ledger-wide and per-transaction limits. Note, that
+‘demand’ doesn’t mean that a limit will be increased, it’s just a motivation to
+consider the increase at all.
+
+- For ledger-wide limits, _the demand can be identified by sufficiently high
+ surge pricing rate due to transactions not fitting into a certain limit_, or
+ at least a significant percentage of ledgers being near the resource limit
+ - This is a reactive approach. There is a possibility that Core team might
+ need to take a proactive approach and increase the limits in preparation
+ for launching a protocol that is expected to have high TPS (i.e. not in
+ response to the existing network activity). This should rather be an
+ exception though
+ - When the per-transaction limits are increased, it might be necessary to
+ also increase the respective ledger-wide limits in order to maintain a high
+ enough ratio between the limits
+- For per-transaction limits, the demand is based on the needs of the protocols
+ that run on Soroban.
+ - It’s not realistic to support each and every possible protocol, but in
+ general Soroban should provide the capabilities for implementing the
+ majority of protocols that exist on the other blockchains, with as little
+ exceptions as possible
+ - More specifically, we should first and foremost looks for the protocols
+ that align with the Stellar chain goals like any other permanent protocol
+ changes (see CAP process)
+ - Contract developers have some room for optimization. That said, it would be
+ preferential to have optimization just impact the costs and not to be a
+ hard requirement for doing more complex things on Soroban
### Limit upper bounds
-When considering the increase of any given limit, we need to make sure it doesn’t exceed some upper bound. In order to determine that upper bound we need to consider both short and long term impact on the network health, protocol development, and downstream systems.
+When considering the increase of any given limit, we need to make sure it
+doesn’t exceed some upper bound. In order to determine that upper bound we need
+to consider both short and long term impact on the network health, protocol
+development, and downstream systems.
-The nature of the impact is significantly different for ledger-wide and per-transaction limits.
+The nature of the impact is significantly different for ledger-wide and
+per-transaction limits.
-Increasing ledger-wide limits has an immediate impact on the network health and downstream systems, but these are easy to reduce in case of emergency. Increasing per-transaction limits has almost no immediate impact on anything, as the overall amount of work done doesn’t change. However, it is almost impossible to reduce the per-transaction limits, so increasing these might have a potential long term impact on the future changes to the protocol.
+Increasing ledger-wide limits has an immediate impact on the network health and
+downstream systems, but these are easy to reduce in case of emergency.
+Increasing per-transaction limits has almost no immediate impact on anything,
+as the overall amount of work done doesn’t change. However, it is almost
+impossible to reduce the per-transaction limits, so increasing these might have
+a potential long term impact on the future changes to the protocol.
Below is the per-resource breakdown of the potential impact factors.
@@ -54,47 +108,62 @@ Below is the per-resource breakdown of the potential impact factors.
Ledger-wide
-- (Immediate) Ledger close time increase
- - The ‘hard’ upper bound is limited by how much time we can dedicate applying the transactions and the minimum hardware requirements for a validator (mostly CPU-driven for the instructions)
+- (Immediate) Ledger close time increase
+ - The ‘hard’ upper bound is limited by how much time we can dedicate applying
+ the transactions and the minimum hardware requirements for a validator
+ (mostly CPU-driven for the instructions)
- (Long-term, minor) Catchup time increase
Per-transaction
-- (Long-term) Long-running transactions might hinder the development of more efficient scheduling algorithms.
- - For example, large transactions might be problematic if we want to introduce synchronization steps during parallel transaction application.
- - Another example would be reducing the interval between ledgers \- while it is possible to achieve higher overall TPS by applying less transactions more frequently, long running transactions may result in too high lower bound for the apply time
+- (Long-term) Long-running transactions might hinder the development of more
+ efficient scheduling algorithms.
+ - For example, large transactions might be problematic if we want to
+ introduce synchronization steps during parallel transaction application.
+ - Another example would be reducing the interval between ledgers \- while it
+ is possible to achieve higher overall TPS by applying less transactions
+ more frequently, long running transactions may result in too high lower
+ bound for the apply time
**Read entries**
Ledger-wide
-- (Immediate) Ledger close time increase
- - This shares the ledger apply time upper bound with the instructions, but is mostly determined by IOPS
-
+- (Immediate) Ledger close time increase
+ - This shares the ledger apply time upper bound with the instructions, but is
+ mostly determined by IOPS
**Read KB**
Ledger-wide
-- (Immediate, minor) Ledger close time increase
- - The intuition is that more random reads are more expensive than a single read of a bigger entry, i.e. ‘read entries’ limit has more impact than the overall data size (given a reasonably small ledger entry size limit)
+
+- (Immediate, minor) Ledger close time increase
+ - The intuition is that more random reads are more expensive than a single
+ read of a bigger entry, i.e. ‘read entries’ limit has more impact than the
+ overall data size (given a reasonably small ledger entry size limit)
**Write entries**
Ledger-wide
+
- (Immediate, minor) Ledger close time increase
**Write KB**
Ledger-wide
-- (Immediate, minor) Ledger close time increase
-- (Immediate & long term, downstream) Ledger close meta size increase
-- (Short to long term) Increased speed of the ledger growth
- - This is bounded by the archival/temp entry eviction rate \- Core should be able to evict the data faster than it can be written
+
+- (Immediate, minor) Ledger close time increase
+- (Immediate & long term, downstream) Ledger close meta size increase
+- (Short to long term) Increased speed of the ledger growth
+ - This is bounded by the archival/temp entry eviction rate \- Core should be
+ able to evict the data faster than it can be written
Per-transaction
-- (Immediate & long term, downstream) Increase in per-transaction meta size \- might be problematic for indexing
+
+- (Immediate & long term, downstream) Increase in per-transaction meta size \-
+ might be problematic for indexing
**Tx size KB**
Ledger-wide
-- (Immediate) Network bandwidth for flooding the transactions between the nodes
-- (Immediate & long term, downstream) Ledger close meta size increase
+- (Immediate) Network bandwidth for flooding the transactions between the nodes
+- (Immediate & long term, downstream) Ledger close meta size increase
- (Long term) History size increase
Per-transaction
@@ -102,103 +171,198 @@ Per-transaction
- (Immediate) Potential issues with flooding larger transactions
**Events size**
-There is currently no ledger-wide limit for the total size of events emitted, however the maximum total size of events per ledger is per-transaction limit times the number of transactions.
+There is currently no ledger-wide limit for the total size of events emitted,
+however the maximum total size of events per ledger is per-transaction limit
+times the number of transactions.
+
- (Immediate & long term, downstream) Ledger close meta size increase
#### Ledger-wide impact summary
-As a quick summary of increasing most of the ledger-wide limits we can ‘transpose’ the per-resource paragraphs above and indicate the dependencies:
+As a quick summary of increasing most of the ledger-wide limits we can
+‘transpose’ the per-resource paragraphs above and indicate the dependencies:
**Total ledger apply time**
-`ledger_apply_time = io_time + tx_apply_time`, where
+`ledger_apply_time = io_time + tx_apply_time`, where
`io_time = read_time(ledger_read_entries, ledger_read_bytes) + write_time(ledger_write_entries, ledger_write_bytes) + meta_emit_time`, and
`tx_apply_time = tx_execution_time(ledger_instructions)`
-`read_time`, `write_time` and `execution_time` functions are proportional to the respective ledger-wide limits.
-`meta_emit_time` has no ledger-wide limit and is proportional to the metadata size per ledger (see below).
+`read_time`,
+`write_time` and `execution_time` functions are proportional to the respective
+ledger-wide limits.
+`meta_emit_time` has no ledger-wide limit and is proportional to the metadata
+size per ledger (see below).
**Total size of metadata per ledger**
`metadata_size = ledger_txs_size + entry_sizes_before_write + ledger_write_bytes + tx_events_size_bytes * num_txs`
-`entry_sizes_before_write` is technically limited by `ledger_read_bytes`, but that’s rather an upper bound and a more realistic estimate for this is `ledger_write_bytes`.
+`entry_sizes_before_write` is technically limited by `ledger_read_bytes`, but
+that’s rather an upper bound and a more realistic estimate for this is
+`ledger_write_bytes`.
**Total history size per ledger**
-`history_size = ledger_txs_size + result_size * num_txs`, where `result_size` is a small constant value
+`history_size = ledger_txs_size + result_size * num_txs`, where `result_size`
+is a small constant value
### Transactions per ledger ratio
-Besides the resource-specific direct impact of any limit increase, there is always a factor of the transaction prioritization and transaction set building.
-
-Core prioritizes Soroban transactions using only the flat inclusion fee that is completely independent of the resources that the transaction demands. This approach makes the fee model simpler and keeps the fees reasonably low for more complex protocols. The downside is that transaction sets that the Core builds are not optimized for the maximum throughput TPS- and fee-wise (for example, Core might build a transaction set with 2 large transactions with 1001 stroops fee instead of a transaction set with 20 small transactions with 1000 stroops fee each). This issue is mostly alleviated by the high enough ratio between the ledger limit and transaction limit for every resource.
-
-Basically, every per-transaction limit increase results in a decrease of the ratio with the ledger-wide limit. Thus most of the time we’ll need to consider increasing the ledger-wide limit together with increasing the per-transaction limit, even if there is no immediate need for that. For example, if the current ratio is 10 and the increase of per-transaction limit moves it down to 7 (that is still above the “target” of 5), we might be fine with changing only the transaction limit.
-For reference, here are the current (‘phase 5’) ratios between the per-transaction and ledger-wide limits:
-
-| Resource | Per-ledger limit | Per-tx limit | Ratio (ledger/tx) |
-| :---- | :---- | :---- | :---- |
-| Instructions | 500M | 100M | 5 |
-| Read entries | 200 | 40 | 5 |
-| Write entries | 125 | 25 | 5 |
-| Read KB | 500 KB | 200 KB | 2.5 |
-| Write KB | 143 KB | 132 KB | 1.08 |
-| Tx size KB | 133 KB | 132 KB | 1.007 |
-
-The write KB and transaction size ratios are currently notably low due to technical limitations; this should be addressed eventually.
+Besides the resource-specific direct impact of any limit increase, there is
+always a factor of the transaction prioritization and transaction set building.
+
+Core prioritizes Soroban transactions using only the flat inclusion fee that is
+completely independent of the resources that the transaction demands. This
+approach makes the fee model simpler and keeps the fees reasonably low for more
+complex protocols. The downside is that transaction sets that the Core builds
+are not optimized for the maximum throughput TPS- and fee-wise (for example,
+Core might build a transaction set with 2 large transactions with 1001 stroops
+fee instead of a transaction set with 20 small transactions with 1000 stroops
+fee each). This issue is mostly alleviated by the high enough ratio between the
+ledger limit and transaction limit for every resource.
+
+Basically, every per-transaction limit increase results in a decrease of the
+ratio with the ledger-wide limit. Thus most of the time we’ll need to consider
+increasing the ledger-wide limit together with increasing the per-transaction
+limit, even if there is no immediate need for that. For example, if the current
+ratio is 10 and the increase of per-transaction limit moves it down to 7 (that
+is still above the “target” of 5), we might be fine with changing only the
+transaction limit.
+For reference, here are the current (‘phase 5’) ratios between the
+per-transaction and ledger-wide limits:
+
+| Resource | Per-ledger limit | Per-tx limit | Ratio (ledger/tx) |
+| :------------ | :--------------- | :----------- | :---------------- |
+| Instructions | 500M | 100M | 5 |
+| Read entries | 200 | 40 | 5 |
+| Write entries | 125 | 25 | 5 |
+| Read KB | 500 KB | 200 KB | 2.5 |
+| Write KB | 143 KB | 132 KB | 1.08 |
+| Tx size KB | 133 KB | 132 KB | 1.007 |
+
+The write KB and transaction size ratios are currently notably low due to
+technical limitations; this should be addressed eventually.
## Process for increasing the limits
-*Note: initially, most of the process will be handled by the Core development team, as the tools and methodologies are still under development. Eventually the benchmarking tooling might become mature enough to allow for mostly 'self-service' proposals.*
+_Note: initially, most of the process will be handled by the Core development
+team, as the tools and methodologies are still under development. Eventually
+the benchmarking tooling might become mature enough to allow for mostly
+'self-service' proposals._
-*Note: the thresholds here are not final and are subject to change.*
+_Note: the thresholds here are not final and are subject to change._
-Based on the key considerations above, here is a sketch for the limit update process that will be documented in a dedicated document: Stellar Limits Proposal (SLP). The exact template is TBD.
+Based on the key considerations above, here is a sketch for the limit update
+process that will be documented in a dedicated document: Stellar Limits
+Proposal (SLP). The exact template is TBD.
**Step 1 \- Identify the resources in demand**
There are a few options for this:
-- Based on the ecosystem requests (may affect both per-transaction and ledger-wide limits)
- - Ecosystem requests should demonstrate the necessity of the increase, specifically:
- - How the protocol aligns with Stellar goals (similar to CAP alignment section)
- - Why the existing limits are not sufficient (e.g. protocol requirements upper bound is clearly above the existing limit)
-- For ledger-wide limits, based on the observed ledger utilization
- - E.g. The resource is at 90+% of capacity for 10+% of the ledgers
- - While it’s intentional for some ledgers to be surge priced (especially in case of spam/arbitrage activity), it’s important to analyze the network activity and scale up in case of actual organic growth (or to at least allow non-spammy traffic to be applied as well)
-- For per-transaction limits, based on the network usage patterns
- - E.g. The resource is at 80+% of transaction limit for 10+% of transactions
- - Besides straightforward indication of the presence of more complex protocols on the network, we need to keep custom accounts in mind \- using the protocol that’s already ‘on the edge’ of the resource limit may be simply impossible for some custom accounts, which is why some leeway is necessary
-- Based on Core protocol advancements
- - E.g. we should be able to ‘fix’ the write limit ratios with the full launch of State Archival
+- Based on the ecosystem requests (may affect both per-transaction and
+ ledger-wide limits)
+ - Ecosystem requests should demonstrate the necessity of the increase,
+ specifically:
+ - How the protocol aligns with Stellar goals (similar to CAP alignment
+ section)
+ - Why the existing limits are not sufficient (e.g. protocol requirements
+ upper bound is clearly above the existing limit)
+- For ledger-wide limits, based on the observed ledger utilization
+ - E.g. The resource is at 90+% of capacity for 10+% of the ledgers
+ - While it’s intentional for some ledgers to be surge priced (especially in
+ case of spam/arbitrage activity), it’s important to analyze the network
+ activity and scale up in case of actual organic growth (or to at least
+ allow non-spammy traffic to be applied as well)
+- For per-transaction limits, based on the network usage patterns
+ - E.g. The resource is at 80+% of transaction limit for 10+% of transactions
+ - Besides straightforward indication of the presence of more complex
+ protocols on the network, we need to keep custom accounts in mind \-
+ using the protocol that’s already ‘on the edge’ of the resource limit may
+ be simply impossible for some custom accounts, which is why some leeway
+ is necessary
+- Based on Core protocol advancements
+ - E.g. we should be able to ‘fix’ the write limit ratios with the full launch
+ of State Archival
**Step 2 \- Come up with the desired numbers**
-Based on the signal from step 1 we need to come up with the actual limits to adjust. If the demand is based on the network usage, we need to figure out the desired increase that should satisfy the needs. Even if there is a straightforward request (e.g. raise a certain per-transaction limit by 50%), we need to consider increasing the respective ledger limit in order to maintain a high enough ratio.
+Based on the signal from step 1 we need to come up with the actual limits to
+adjust. If the demand is based on the network usage, we need to figure out the
+desired increase that should satisfy the needs. Even if there is a
+straightforward request (e.g. raise a certain per-transaction limit by 50%), we
+need to consider increasing the respective ledger limit in order to maintain a
+high enough ratio.
Here are some basic rule ideas:
-- For the ledger capacity (in case of resource causing surge pricing), we can go with as high value as technically feasible within e.g. 2x of the current limit
-- For the per transaction capacity, aim at making an increase that would make 95+% of transactions to have \<70% resource utilization
-- Maintain the ledger/transaction ratio high enough
- - 5x is a reasonable lower bound
- - Until the lower ratios from the initial launch haven’t been fixed, we should try to at least not reduce these further
+- For the ledger capacity (in case of resource causing surge pricing), we can
+ go with as high value as technically feasible within e.g. 2x of the current
+ limit
+- For the per transaction capacity, aim at making an increase that would make
+ 95+% of transactions to have \<70% resource utilization
+- Maintain the ledger/transaction ratio high enough
+ - 5x is a reasonable lower bound
+ - Until the lower ratios from the initial launch haven’t been fixed, we
+ should try to at least not reduce these further
**Step 3 \- Evaluate the proposed numbers**
-The proposed numbers have to be thoroughly verified against the actual network capabilities. Note, that the whole limits increase process assumes that the network doesn’t operate at the full potential capacity at the moment, e.g. due to performance optimizations or just due to conservative current limits.
-
-- Most of the ledger-wide limits can be evaluated via apply-time benchmarks
- - The goal is to be able to close 95+% of the ledgers within N ms on a model validator (where N is a moving target that is based on the available processing time for closing the ledgers). Currently the tentative value of N is 500 ms
- - ‘Model validator’ is a bit imprecise, but we can’t benchmark every validator on one hand and expect them to have a proportional change in apply time on the other
- - Note, that N might need to go *down* together with the ledger-wide limits in case if we prioritize the ledger close latency over individual ledger throughput. The important implication is that this adds a potential future limit to maximum *per-transaction* limits. This just reinforces the requirements around maintaining high enough ledger/transaction ratios, though in this case it’s likely mostly relevant for the resources that impact the ledger close time the most (CPU instructions, read entries)
-- Going forward we might also consider additional evaluations based on models (such as [this](https://github.com/stellar/stellar-core/blob/master/scripts/resource-calc.ipynb)), though this will likely only become relevant when we start getting closer to the hardware limits
-- The transaction size limits (both per-ledger and per-transaction) need to be exercised in Supercluster tests to ensure there is no significant flooding TPS degradation
-- In case of changes that impact the downstream teams significantly, come up with estimated impact and reach out to the affected parties for approval (such as indexers, RPC providers, block explorers etc.)
-- In case of less tangible long term impact, evaluate on case-by-case basis
- - For example, in case of instructions we need to care about the ratio between the *sequential* per-ledger instructions and per-transaction instructions and keep it high enough (say, at least 4x)
- - We also need to care about some known limitations of the future protocols, e.g. synchronization stages in the proposed parallelization approach introduce yet another limit on maximum per-transaction instructions
- - We should be able to use benchmarking capabilities to ensure that per transaction limits are compatible with future work. As of right now, we would be looking at a “stage” taking in the order of 125ms (with a stage having enough capacity for 2-3 transactions as to ensure a sane ratio at the ledger level). Benchmarks can be run using stage limits as ledger limits and ensure that timing goals are met.
- - This doesn’t mean though that we can’t also work around these issues while designing the protocol changes
+The proposed numbers have to be thoroughly verified against the actual network
+capabilities. Note, that the whole limits increase process assumes that the
+network doesn’t operate at the full potential capacity at the moment, e.g. due
+to performance optimizations or just due to conservative current limits.
+
+- Most of the ledger-wide limits can be evaluated via apply-time benchmarks
+ - The goal is to be able to close 95+% of the ledgers within N ms on a model
+ validator (where N is a moving target that is based on the available
+ processing time for closing the ledgers). Currently the tentative value of
+ N is 500 ms
+ - ‘Model validator’ is a bit imprecise, but we can’t benchmark every
+ validator on one hand and expect them to have a proportional change in
+ apply time on the other
+ - Note, that N might need to go _down_ together with the ledger-wide limits
+ in case if we prioritize the ledger close latency over individual ledger
+ throughput. The important implication is that this adds a potential
+ future limit to maximum _per-transaction_ limits. This just reinforces
+ the requirements around maintaining high enough ledger/transaction
+ ratios, though in this case it’s likely mostly relevant for the resources
+ that impact the ledger close time the most (CPU instructions, read
+ entries)
+- Going forward we might also consider additional evaluations based on models
+ (such as
+ [this](https://github.com/stellar/stellar-core/blob/master/scripts/resource-calc.ipynb)),
+ though this will likely only become relevant when we start getting closer to
+ the hardware limits
+- The transaction size limits (both per-ledger and per-transaction) need to be
+ exercised in Supercluster tests to ensure there is no significant flooding
+ TPS degradation
+- In case of changes that impact the downstream teams significantly, come up
+ with estimated impact and reach out to the affected parties for approval
+ (such as indexers, RPC providers, block explorers etc.)
+- In case of less tangible long term impact, evaluate on case-by-case basis
+ - For example, in case of instructions we need to care about the ratio
+ between the _sequential_ per-ledger instructions and per-transaction
+ instructions and keep it high enough (say, at least 4x)
+ - We also need to care about some known limitations of the future protocols,
+ e.g. synchronization stages in the proposed parallelization approach
+ introduce yet another limit on maximum per-transaction instructions
+ - We should be able to use benchmarking capabilities to ensure that per
+ transaction limits are compatible with future work. As of right now, we
+ would be looking at a “stage” taking in the order of 125ms (with a stage
+ having enough capacity for 2-3 transactions as to ensure a sane ratio at
+ the ledger level). Benchmarks can be run using stage limits as ledger
+ limits and ensure that timing goals are met.
+ - This doesn’t mean though that we can’t also work around these issues
+ while designing the protocol changes
The evaluation may have several outcomes:
-- Everything is within the acceptable bounds, the proposal goes through unchanged
-- Limits can be increased only to a fraction of the desired number \- modify the proposal respectively
- - This often may still be a satisfactory outcome, given that in a lot of cases any increase should improve the situation (e.g. reduce the surge pricing rate or reduce the risk of custom account not being able to interact with a protocol)
-- The limits can’t be increased at all or can be moved just marginally \- the proposal has to be rejected and reviewed again if/when Core is better suited to handle it.
+- Everything is within the acceptable bounds, the proposal goes through
+ unchanged
+- Limits can be increased only to a fraction of the desired number \- modify
+ the proposal respectively
+ - This often may still be a satisfactory outcome, given that in a lot of
+ cases any increase should improve the situation (e.g. reduce the surge
+ pricing rate or reduce the risk of custom account not being able to
+ interact with a protocol)
+- The limits can’t be increased at all or can be moved just marginally \- the
+ proposal has to be rejected and reviewed again if/when Core is better suited
+ to handle it.
**Step 4 \- Nominate the proposal**
-In case if the proposal has gone through evaluation, it can be nominated for the vote by the validators. The nomination can be supported by the evaluation results summary.
+In case if the proposal has gone through evaluation, it can be nominated for
+the vote by the validators. The nomination can be supported by the evaluation
+results summary.
diff --git a/limits/slp-0001.md b/limits/slp-0001.md
index 35f62dbcc..a3ac3ac98 100644
--- a/limits/slp-0001.md
+++ b/limits/slp-0001.md
@@ -2,7 +2,7 @@
```
SLP: 0001
-Title: Increase Events and Read/Write Ledger Entry Limits for Soroban
+Title: Increase Events and Read/Write Ledger Entry Limits for Soroban
Authors: Tim Baker <@silence48>, Attiss Ngo (Interop Labs), Mootz12 (Blend Protocol), Siddharth Suresh <@sisuresh>
Consulted: Soroswap, Hoops Finance, Yieldblox, Blend Protocol, Interop Labs, Squid Router
Status: Final
@@ -10,82 +10,149 @@ Created: 2024-12-04
```
## Summary
-This proposal suggests increasing the per-transaction and ledger-wide limits for read and write ledger entries in Soroban smart contracts. The adjustment aims to support more complex protocols, enhance cross-contract interactions, and enable existing protocols to operate at their full potential on the Stellar network.
-This proposal also suggests increasing the Soroban event size limit to accommodate events containing message payloads of > 8KB for use with the Axelar Interchain Amplifier protocol.
+This proposal suggests increasing the per-transaction and ledger-wide limits
+for read and write ledger entries in Soroban smart contracts. The adjustment
+aims to support more complex protocols, enhance cross-contract interactions,
+and enable existing protocols to operate at their full potential on the Stellar
+network.
+
+This proposal also suggests increasing the Soroban event size limit to
+accommodate events containing message payloads of > 8KB for use with the Axelar
+Interchain Amplifier protocol.
## Goals Alignment
-Increasing the events and read/write ledger entry limits is a necessary step to support existing protocols and enable future innovation on Soroban. This adjustment aligns with Stellar's goals of facilitating efficient and scalable financial services, providing tangible benefits to developers and users without compromising network integrity.
+Increasing the events and read/write ledger entry limits is a necessary step to
+support existing protocols and enable future innovation on Soroban. This
+adjustment aligns with Stellar's goals of facilitating efficient and scalable
+financial services, providing tangible benefits to developers and users without
+compromising network integrity.
## Limit Change Rationale
### Ledger read/write entries per transaction
-Developers are encountering limitations with current per-transaction read/write ledger entry limits when building advanced protocols on Soroban. Specific use cases include:
-
-- **Hoops Finance**: Account contracts managing liquidity across multiple Automated Market Maker (AMM) pool contracts require calling multiple contracts resulting in many reads during auth and gathering data. Current limits force inefficient workarounds and data bloating.
-- **Soroswap**: Swap aggregator contracts performing path calculations need to read from multiple contracts simultaneously. Existing limits restrict the number of hops, limiting the effectiveness of swap routes.
-- **Yieldblox and Blend Protocol**: Lending platforms aim to support additional collateral assets face constraints due to ledger entry limits, hindering the expansion of collateral options.
-Increasing these limits will directly benefit these protocols by allowing more efficient and natural implementations without resorting to complex or inefficient solutions.
+Developers are encountering limitations with current per-transaction read/write
+ledger entry limits when building advanced protocols on Soroban. Specific use
+cases include:
+
+- **Hoops Finance**: Account contracts managing liquidity across multiple
+ Automated Market Maker (AMM) pool contracts require calling multiple
+ contracts resulting in many reads during auth and gathering data. Current
+ limits force inefficient workarounds and data bloating.
+- **Soroswap**: Swap aggregator contracts performing path calculations need to
+ read from multiple contracts simultaneously. Existing limits restrict the
+ number of hops, limiting the effectiveness of swap routes.
+- **Yieldblox and Blend Protocol**: Lending platforms aim to support additional
+ collateral assets face constraints due to ledger entry limits, hindering the
+ expansion of collateral options.
+
+Increasing these limits will directly benefit these protocols by allowing more
+efficient and natural implementations without resorting to complex or
+inefficient solutions.
### Event size per transaction
-Squid Router is reporting occasional message payloads of 7KB for their cross-chain swaps. This indicates that a significantly complex swap could be untenable on Stellar due to the event size limit.
+Squid Router is reporting occasional message payloads of 7KB for their
+cross-chain swaps. This indicates that a significantly complex swap could be
+untenable on Stellar due to the event size limit.
-Although the Gateway itself only stores a hash of the message payload, it needs to emit an event containing the full payload for Relayers to discover and forward to the Axelar Network. Failing to make the full message payload available on-chain has the following consequences:
+Although the Gateway itself only stores a hash of the message payload, it needs
+to emit an event containing the full payload for Relayers to discover and
+forward to the Axelar Network. Failing to make the full message payload
+available on-chain has the following consequences:
-- It adds a new data availability assumption: that some Relayer has stored the message payload off-chain.
-- It negatively affects UX, as individual apps will have to compute and post the payload to be sent to some relayer API.
+- It adds a new data availability assumption: that some Relayer has stored the
+ message payload off-chain.
+- It negatively affects UX, as individual apps will have to compute and post
+ the payload to be sent to some relayer API.
## Proposed Limits
-To meet the demand, we propose the following increases:
-| Resource | Current Per-tx | Proposed Per-tx | Current Ledger | Proposed Ledger | Ratio (Ledger/Tx) |
-|----------------|----------------|-----------------|----------------|-----------------|-------------------|
-| Read Entries | 40 | 100 | 200 | 500 | 5 |
-| Write Entries | 25 | 50 | 125 | 250 | 5 |
-| Events Size | 8198 | 16384 | | | |
-By raising the per-transaction read entries from 40 to 100, contracts can perform more extensive operations within a single transaction. Ledger-wide limits are adjusted accordingly to maintain the ledger/transaction limit ratio of 5x defined by the SLP process in order to ensure fair ledger resource distribution among multiple transactions.
+To meet the demand, we propose the following increases: | Resource | Current
+Per-tx | Proposed Per-tx | Current Ledger | Proposed Ledger | Ratio (Ledger/Tx)
+|
+|----------------|----------------|-----------------|----------------|-----------------|-------------------|
+| Read Entries | 40 | 100 | 200 | 500 | 5 | | Write Entries | 25 | 50 | 125 |
+250 | 5 | | Events Size | 8198 | 16384 | | | |
+By raising the per-transaction read entries from 40 to 100, contracts can
+perform more extensive operations within a single transaction. Ledger-wide
+limits are adjusted accordingly to maintain the ledger/transaction limit ratio
+of 5x defined by the SLP process in order to ensure fair ledger resource
+distribution among multiple transactions.
## Evaluation
### Benchmarking
-- The benchmarking was done using the apply-load command in stellar-core. The version we're using is currently in a branch as we make improvements to it (PR and branch with changes on top of that PR).
-- The benchmark populates the bucket list with 9 levels of data, applies 100 ledgers with Soroban transactions that use as much of the resources available as possible, write meta to a file, and then output performance related data. The tool outputs a lot of information, but we only show max, min and mean ledger close times in this doc.
-- The benchmarking was done on an AWS c5d.2xlarge instance using docker so we could limit the iops for a given run using the --device-write-iops DEVICE_NAME:10000 --device-read-iops DEVICE_NAME:10000 docker options. We limited the runs to 10k iops.
+- The benchmarking was done using the apply-load command in stellar-core. The
+ version we're using is currently in a branch as we make improvements to it
+ (PR and branch with changes on top of that PR).
+- The benchmark populates the bucket list with 9 levels of data, applies 100
+ ledgers with Soroban transactions that use as much of the resources available
+ as possible, write meta to a file, and then output performance related data.
+ The tool outputs a lot of information, but we only show max, min and mean
+ ledger close times in this doc.
+- The benchmarking was done on an AWS c5d.2xlarge instance using docker so we
+ could limit the iops for a given run using the --device-write-iops
+ DEVICE_NAME:10000 --device-read-iops DEVICE_NAME:10000 docker options. We
+ limited the runs to 10k iops.
#### Evaluation of current limits
-- Benchmark transactions configured with 100M instructions, 25 RO entries, 15 RO entries, 500 bytes per entry, 100 80-byte events. 5 TPL.
-1. Max ledger close: 197 milliseconds
-2. Min ledger close: 181 milliseconds
+
+- Benchmark transactions configured with 100M instructions, 25 RO entries, 15
+ RO entries, 500 bytes per entry, 100 80-byte events. 5 TPL.
+
+1. Max ledger close: 197 milliseconds
+2. Min ledger close: 181 milliseconds
3. Mean ledger close: 187 milliseconds
#### Evaluation of proposed limits
-- Benchmark increase to the entry limits. Transactions configured with 100M instructions, 50 RO entries, 50 RW entries, 240 bytes per entry, 100 80-byte events. 5 TPL.
-1. Max ledger close: 215 milliseconds
-2. Min ledger close: 199 milliseconds
-3. Mean ledger close: 205 milliseconds
-- Benchmark the increase to the event limit. Transactions configured with 100M instructions, 25 RO entries, 15 RW entries, 500 bytes per entry, 800 80-byte events. 5 TPL.
+- Benchmark increase to the entry limits. Transactions configured with 100M
+ instructions, 50 RO entries, 50 RW entries, 240 bytes per entry, 100 80-byte
+ events. 5 TPL.
+
+1. Max ledger close: 215 milliseconds
+2. Min ledger close: 199 milliseconds
+3. Mean ledger close: 205 milliseconds
+
+- Benchmark the increase to the event limit. Transactions configured with 100M
+ instructions, 25 RO entries, 15 RW entries, 500 bytes per entry, 800 80-byte
+ events. 5 TPL.
+
1. Max ledger close: 195 milliseconds
2. Min ledger close: 176 milliseconds
-3. Mean ledger close: 181 milliseconds
+3. Mean ledger close: 181 milliseconds
### Long-term protocol impact
-- The increase amount of entries read/written per transaction doesn't have any significant long term impact given the moderate absolute number in this proposal. The overall number of reads and writes only has impact at the ledger level and the per-ledger limit may be decreased in case of performance degradation in the future.
-- 2 times per-transaction event size growth in theory leads to proportional (2x) growth of the maximum event size per ledger, which will affect all the future protocol versions that might also come with organic increase of the transaction volume per ledger.
- - However, in practice not every transaction will emit 2 times more event data and thanks to the event fee there is an incentive to not emit unnecessary events.
- - Thus the expectation is that the change will mostly affect certain protocol (like bridges) and likely won't affect the overall volume of events significantly
-
+- The increase amount of entries read/written per transaction doesn't have any
+ significant long term impact given the moderate absolute number in this
+ proposal. The overall number of reads and writes only has impact at the
+ ledger level and the per-ledger limit may be decreased in case of performance
+ degradation in the future.
+- 2 times per-transaction event size growth in theory leads to proportional
+ (2x) growth of the maximum event size per ledger, which will affect all the
+ future protocol versions that might also come with organic increase of the
+ transaction volume per ledger.
+ - However, in practice not every transaction will emit 2 times more event
+ data and thanks to the event fee there is an incentive to not emit
+ unnecessary events.
+ - Thus the expectation is that the change will mostly affect certain protocol
+ (like bridges) and likely won't affect the overall volume of events
+ significantly
## Evaluation Outcome
-The overall impact of the proposed changes on the ledger close time (187 milliseconds to 205 milliseconds) is acceptable given that we increase the ledger I/O limits more than 2x. The time is still below the boundary of 250ms dedicated to Soroban transaction processing currently. There is also no significant long-term impact.
+The overall impact of the proposed changes on the ledger close time (187
+milliseconds to 205 milliseconds) is acceptable given that we increase the
+ledger I/O limits more than 2x. The time is still below the boundary of 250ms
+dedicated to Soroban transaction processing currently. There is also no
+significant long-term impact.
The change can be considered acceptable and safe for the network health.
-## References
\ No newline at end of file
+## References
diff --git a/limits/slp-0002.md b/limits/slp-0002.md
index c752d08b4..cd54f641c 100644
--- a/limits/slp-0002.md
+++ b/limits/slp-0002.md
@@ -2,7 +2,7 @@
```
SLP: 0002
-Title: Increase Ledger Wide Read Bytes Limit for Soroban
+Title: Increase Ledger Wide Read Bytes Limit for Soroban
Authors: Tomer Weller <@tomerweller>
Consulted:
Status: Final
@@ -11,59 +11,83 @@ Created: 2025-02-06
## Summary
-This proposal suggests increasing the ledger-wide read byte limit which is a bottleneck for existing protocols.
+This proposal suggests increasing the ledger-wide read byte limit which is a
+bottleneck for existing protocols.
## Goals Alignment
-Increasing the limit will support existing protocols to operate at scale, faciliating efficient and scalable everyday financial services.
+Increasing the limit will support existing protocols to operate at scale,
+faciliating efficient and scalable everyday financial services.
## Limit Change Rationale
### Ledger read bytes limit
-Soroban is often in surge pricing (20%-40% of Ledgers, Early February 2025) and the only resource getting exhausted is the read bytes.
+Soroban is often in surge pricing (20%-40% of Ledgers, Early February 2025) and
+the only resource getting exhausted is the read bytes.
-Examining a simple [SoroSwap invocation](https://stellar.expert/explorer/public/tx/238820179026497536#238820179026497537) reveals that 64kb are read, mostly due to reading Wasm (two contracts in this instance). With the current ledger limit of 500kb this amounts to a theoretical limit of 7 swaps per ledger. Other ledger wide limits (read entries, write entries, instructions, ledger write bytes) allow for more than 50 similar swaps per ledger.
+Examining a simple
+[SoroSwap invocation](https://stellar.expert/explorer/public/tx/238820179026497536#238820179026497537)
+reveals that 64kb are read, mostly due to reading Wasm (two contracts in this
+instance). With the current ledger limit of 500kb this amounts to a theoretical
+limit of 7 swaps per ledger. Other ledger wide limits (read entries, write
+entries, instructions, ledger write bytes) allow for more than 50 similar swaps
+per ledger.
-We propose increasing the ledger read bytes limit to 3.5mb which will align with the other limits in allowing for 50 such swaps per ledger.
+We propose increasing the ledger read bytes limit to 3.5mb which will align
+with the other limits in allowing for 50 such swaps per ledger.
-SoroSwap is one simple example, however increasing the limit will increase capacity for all Soroban protocols.
+SoroSwap is one simple example, however increasing the limit will increase
+capacity for all Soroban protocols.
#### Evaluation of current limits
-- Benchmark transactions configured with 100M instructions, 50 RO entries, 50 RO entries, 500 bytes per entry, 100 80-byte events. 5 TPL.
-Max ledger close: 211ms
-Min ledger close: 194ms
-Mean ledger close: 199ms
-### Benchmarking with 3.5mb Ledger Read Bytes Limit
+- Benchmark transactions configured with 100M instructions, 50 RO entries, 50
+ RO entries, 500 bytes per entry, 100 80-byte events. 5 TPL. Max ledger close:
+ 211ms Min ledger close: 194ms Mean ledger close: 199ms
-Due to the various ledger limits, we test a combination of different resources being saturated.
+### Benchmarking with 3.5mb Ledger Read Bytes Limit
+
+Due to the various ledger limits, we test a combination of different resources
+being saturated.
+
+- Transactions configured with 27.7M instructions, 6 RO entries (two of which
+ are the contract code and instance), 0 RW entries, 48,000 bytes per entry. 18
+ TPL.
-- Transactions configured with 27.7M instructions, 6 RO entries (two of which are the contract code and instance), 0 RW entries, 48,000 bytes per entry. 18 TPL.
1. Max ledger close: 132ms
2. Min ledger close: 128ms
3. Mean ledger close: 129ms
-- Transactions configured with 100M instructions, 88 RO entries, 12 RW entries, 2000 bytes per entry. 5 TPL.
+- Transactions configured with 100M instructions, 88 RO entries, 12 RW entries,
+ 2000 bytes per entry. 5 TPL.
+
1. Max ledger close: 185ms
2. Min ledger close: 176ms
3. Mean ledger close: 179ms
-- Transactions configured with 100M instructions, 75 RO entries, 25 RW entries, 1100 bytes per entry. 5 TPL.
+- Transactions configured with 100M instructions, 75 RO entries, 25 RW entries,
+ 1100 bytes per entry. 5 TPL.
+
1. Max ledger close: 195ms
2. Min ledger close: 184ms
3. Mean ledger close: 187ms
#### Evaluation of ledger read byte limits
-The close times in the benchmarks are all well below the 250ms limit we need to stay below. The read bytes do not have as much of an impact on close times as writes, as you can see above. The close times increase as well allocated more resources to writing.
+The close times in the benchmarks are all well below the 250ms limit we need to
+stay below. The read bytes do not have as much of an impact on close times as
+writes, as you can see above. The close times increase as well allocated more
+resources to writing.
### Long-term protocol impact
-This change has no long term impact because [CAP-0066](../core/cap-0066.md) will remove the in-memory read-byte limit all together.
+This change has no long term impact because [CAP-0066](../core/cap-0066.md)
+will remove the in-memory read-byte limit all together.
## Evaluation Outcome
-The increase in the read bytes limit does not have a significant impact on close times, so the proposed limit of 3.5mb is acceptable.
+The increase in the read bytes limit does not have a significant impact on
+close times, so the proposed limit of 3.5mb is acceptable.
## References
diff --git a/limits/slp-0003.md b/limits/slp-0003.md
index a6e28fde9..98b09b56a 100644
--- a/limits/slp-0003.md
+++ b/limits/slp-0003.md
@@ -2,83 +2,99 @@
```
SLP: 0003
-Title: Increase Ledger Wide Limits for Soroban
+Title: Increase Ledger Wide Limits for Soroban
Authors: Tomer Weller <@tomerweller>
-Consulted: Dmytro Kozhevin <@dmkozh>
+Consulted: Dmytro Kozhevin <@dmkozh>
Status: Final
Created: 2025-04-23
```
## Summary
-This proposal suggests increasing certain soroban ledger limits due to high demand and low utilization of resources.
+This proposal suggests increasing certain soroban ledger limits due to high
+demand and low utilization of resources.
## Goals Alignment
-Increasing the limits will support existing protocols while allowing room for efficient and scalable everyday financial services.
+Increasing the limits will support existing protocols while allowing room for
+efficient and scalable everyday financial services.
## Limit Change Rationale
-The network is in Soroban surge pricing close to 50% of the time while validator hardware is underutilized.
+The network is in Soroban surge pricing close to 50% of the time while
+validator hardware is underutilized.
This proposal is for the following increases:
-- soroban transaction count: 100 -> 1000 (10x, this is just a sanity limit and should be set high)
+- soroban transaction count: 100 -> 1000 (10x, this is just a sanity limit and
+ should be set high)
- entries read: 500 -> 1000
- entries write: 250 -> 500
- ledger read: 3500000 -> 7000000
- ledger write: keep at 143360 (to avoid performance degradation)
-- instructions: 500000000 -> 600000000 (20% increase to avoid performance degradation)
+- instructions: 500000000 -> 600000000 (20% increase to avoid performance
+ degradation)
-### Capacity analysis
+### Capacity analysis
-Current soroban traffic is heavy on Kale's `plant()` invocations submitted through Launchtube. The following table illustrates the
-impact of the proposed changes on chain capacity for these invocations:
+Current soroban traffic is heavy on Kale's `plant()` invocations submitted
+through Launchtube. The following table illustrates the impact of the proposed
+changes on chain capacity for these invocations:
-| | plant() | current-limits | current-max-invocations | proposed-limits | proposed-max-invocations |
-|----------------|----------------|----------------|-------------------------|-----------------|--------------------------|
-| soroban-transaction-count| 2* | 100 | 50 | 1000 | 500 |
-| entries-read | 6 | 500 | 83 | 1000 | 166 |
-| entries-write | 4 | 250 | 62 | 500 | 125 |
-| ledger-write | 1,588 | 143,360 | 90 | 143,360 | 90 |
-| ledger-read | 21,412 | 3,500,000 | 163 | 7,000,000 | 326 |
-| instructions | 4,294,959 | 500,000,000 | 116 | 600,000,000 | 139 |
+| | plant() | current-limits | current-max-invocations | proposed-limits | proposed-max-invocations |
+| ------------------------- | --------- | -------------- | ----------------------- | --------------- | ------------------------ |
+| soroban-transaction-count | 2\* | 100 | 50 | 1000 | 500 |
+| entries-read | 6 | 500 | 83 | 1000 | 166 |
+| entries-write | 4 | 250 | 62 | 500 | 125 |
+| ledger-write | 1,588 | 143,360 | 90 | 143,360 | 90 |
+| ledger-read | 21,412 | 3,500,000 | 163 | 7,000,000 | 326 |
+| instructions | 4,294,959 | 500,000,000 | 116 | 600,000,000 | 139 |
-*Invocations submitted via launhctube are counted as two invocations due to fee sponsorship and current counting semantics
+\*Invocations submitted via launhctube are counted as two invocations due to
+fee sponsorship and current counting semantics
### Evaluation of current limits
-- Benchmark transactions configured with 100M instructions, 50 RO entries, 50 RO entries, 500 bytes per entry, 100 80-byte events. 5 TPL.
-Max ledger close: 237ms
-Min ledger close: 215ms
-Mean ledger close: 221ms
+- Benchmark transactions configured with 100M instructions, 50 RO entries, 50
+ RO entries, 500 bytes per entry, 100 80-byte events. 5 TPL. Max ledger close:
+ 237ms Min ledger close: 215ms Mean ledger close: 221ms
### Benchmarking with proposed limits
-Due to the various ledger limits, we test a combination of different resources being saturated.
+Due to the various ledger limits, we test a combination of different resources
+being saturated.
+
+- Transactions configured with 100M instructions, 50 RO entries, 5 RW entries,
+ 500 bytes per entry. 6 TPL.
-- Transactions configured with 100M instructions, 50 RO entries, 5 RW entries, 500 bytes per entry. 6 TPL.
1. Max ledger close: 252ms
2. Min ledger close: 242ms
3. Mean ledger close: 245ms
-- Transactions configured with 17M instructions, 6 RO entries, 0 RW entries, 48,000 bytes per entry. 35 TPL.
+- Transactions configured with 17M instructions, 6 RO entries, 0 RW entries,
+ 48,000 bytes per entry. 35 TPL.
+
1. Max ledger close: 151ms
2. Min ledger close: 138ms
3. Mean ledger close: 142ms
-- Transactions configured with 50M instructions, 50 RO entries, 50 RW entries, 240 bytes per entry. 10 TPL.
+- Transactions configured with 50M instructions, 50 RO entries, 50 RW entries,
+ 240 bytes per entry. 10 TPL.
+
1. Max ledger close: 247ms
2. Min ledger close: 229ms
3. Mean ledger close: 237ms
### Long-term protocol impact
-- Protocol 23 is expected later this year and will drastically change resource utilization. This will require rethinking these limits.
-- If these ledger limits prove to be too taxing on validator hardware we can dial them back in a subsequent vote with minimal impact to app developers.
+- Protocol 23 is expected later this year and will drastically change resource
+ utilization. This will require rethinking these limits.
+- If these ledger limits prove to be too taxing on validator hardware we can
+ dial them back in a subsequent vote with minimal impact to app developers.
## Evaluation Outcome
-The proposed limits increase is acceptable as its impact on close times is within the desired range.
+The proposed limits increase is acceptable as its impact on close times is
+within the desired range.
## References
diff --git a/limits/slp-0004.md b/limits/slp-0004.md
index b9e45b9b4..5275045b1 100644
--- a/limits/slp-0004.md
+++ b/limits/slp-0004.md
@@ -2,8 +2,8 @@
```
SLP: 0004
-Title: Increase Ledger Wide Limits for Soroban
-Authors: Dmytro Kozhevin <@dmkozh>
+Title: Increase Ledger Wide Limits for Soroban
+Authors: Dmytro Kozhevin <@dmkozh>
Consulted: Tomer Weller <@tomerweller>, Nicolas Barry <@monsieurnicolas>, Tamir Sen <@tamirms>
Status: Final
Created: 2026-01-12
@@ -12,24 +12,26 @@ Discussion: https://github.com/orgs/stellar/discussions/1850
## Summary
-This proposal suggests increasing most of the Soroban ledger and transaction limits, and adjusts the disk reads that have been overlooked since p23 update.
+This proposal suggests increasing most of the Soroban ledger and transaction
+limits, and adjusts the disk reads that have been overlooked since p23 update.
## Goals Alignment
-Increasing the limits will support existing protocols while allowing room for efficient and scalable everyday financial services.
+Increasing the limits will support existing protocols while allowing room for
+efficient and scalable everyday financial services.
## Proposed Limits
The summary of the proposed changes is as follows:
-| Resource | Current Per-tx | Proposed Per-tx | Current Ledger | Proposed Ledger | New Ledger/Tx Ratio |
-|---------------------------|----------------|-----------------|----------------|-----------------|---------------------|
-| Disk Read Entries | 100 | 200 | 1000 | 1000 | 5 |
-| Disk Read Bytes | 200000 | 200000 | 7000000 | 400000 | 2 |
-| Write Entries | 50 | 200 | 500 | 1000 | 5 |
-| Write Bytes | 132096 | 132096 | 143360 | 286720 | 2.17 |
-| Tx Size Bytes | 132096 | 132096 | 133120 | 266240 | 2.01 |
-| Instructions/threads | 100'000'000 | 400'000'000 | 600'000'000 / 1 thread | 580'000'000 / 2 threads | 2.9 |
+| Resource | Current Per-tx | Proposed Per-tx | Current Ledger | Proposed Ledger | New Ledger/Tx Ratio |
+| -------------------- | -------------- | --------------- | ---------------------- | ----------------------- | ------------------- |
+| Disk Read Entries | 100 | 200 | 1000 | 1000 | 5 |
+| Disk Read Bytes | 200000 | 200000 | 7000000 | 400000 | 2 |
+| Write Entries | 50 | 200 | 500 | 1000 | 5 |
+| Write Bytes | 132096 | 132096 | 143360 | 286720 | 2.17 |
+| Tx Size Bytes | 132096 | 132096 | 133120 | 266240 | 2.01 |
+| Instructions/threads | 100'000'000 | 400'000'000 | 600'000'000 / 1 thread | 580'000'000 / 2 threads | 2.9 |
Additionally, footprint size increase from 100 to 200 entries is proposed.
@@ -37,107 +39,190 @@ Additionally, footprint size increase from 100 to 200 entries is proposed.
### Increase of the ledger limits
-Transaction size and write bytes ledger limits are often utilized at ~100%, and write entries are at up to 85%, and thus the increase is proposed to all of these in order to increase the network throughput.
+Transaction size and write bytes ledger limits are often utilized at ~100%, and
+write entries are at up to 85%, and thus the increase is proposed to all of
+these in order to increase the network throughput.
-Instruction utilization is usually at 50%, but with introduction of heavy-weight ZK functions in protocol 25 an increase in demand is anticipated. Since a lot of headroom has been unlocked in protocol 23 thanks to parallel transaction application, we propose increasing the number of apply threads to proactively address the potential demand and allow for faster protocol scaling. The per-thread instructions have been decreased slightly as a result of benchmarking.
+Instruction utilization is usually at 50%, but with introduction of
+heavy-weight ZK functions in protocol 25 an increase in demand is anticipated.
+Since a lot of headroom has been unlocked in protocol 23 thanks to parallel
+transaction application, we propose increasing the number of apply threads to
+proactively address the potential demand and allow for faster protocol scaling.
+The per-thread instructions have been decreased slightly as a result of
+benchmarking.
-Transaction count can be increased proportionally as well to accommodate for the increased throughput.
+Transaction count can be increased proportionally as well to accommodate for
+the increased throughput.
### Increase of the transaction limits
-With the increase of the ledger limits, it's safe to increase the corresponding transaction limits as well, as now they occupy a smaller percentage of the ledger capacity.
+With the increase of the ledger limits, it's safe to increase the corresponding
+transaction limits as well, as now they occupy a smaller percentage of the
+ledger capacity.
-Larger footprint sizes allow for better batch processing via Soroban (such as batch payments or mints).
+Larger footprint sizes allow for better batch processing via Soroban (such as
+batch payments or mints).
-The increase of the instructions limit specifically also accommodates for the potential computation-heavy protocols, such as ZK-based protocols.
+The increase of the instructions limit specifically also accommodates for the
+potential computation-heavy protocols, such as ZK-based protocols.
### Decrease of the ledger disk read bytes
-With introduction of protocol 23 read bytes limit has been removed, and only disk read bytes are counted towards the ledger limits. Disk reads are only reads of the restored entries (which are also implicitly limited by the write bytes), and reads of the 'classic' entries (which have a small and bounded size). Thus it's safe to set the ledger disk read bytes limit slightly above the ledger write bytes limit without affecting any legitimate use case.
+With introduction of protocol 23 read bytes limit has been removed, and only
+disk read bytes are counted towards the ledger limits. Disk reads are only
+reads of the restored entries (which are also implicitly limited by the write
+bytes), and reads of the 'classic' entries (which have a small and bounded
+size). Thus it's safe to set the ledger disk read bytes limit slightly above
+the ledger write bytes limit without affecting any legitimate use case.
## Evaluation
### Benchmarking
-Benchmarking has been performed on AWS c5d.2xlarge instance with Stellar Core 25.1.0 build.
+Benchmarking has been performed on AWS c5d.2xlarge instance with Stellar Core
+25.1.0 build.
### Evaluation of current limits
-SLP-3 scenarios for the current ledger limits have been re-evaluated, as the benchmark has changed since protocol 23 (e.g. the modelled/real instructions ratio has changed due to module cache).
+SLP-3 scenarios for the current ledger limits have been re-evaluated, as the
+benchmark has changed since protocol 23 (e.g. the modelled/real instructions
+ratio has changed due to module cache).
-1. Transactions configured with 100M instructions, 5 RW entries, 500 bytes per entry. 6 TPL.
+1. Transactions configured with 100M instructions, 5 RW entries, 500 bytes per
+ entry. 6 TPL.
- Ledger close min/avg/max: 277/281/287 milliseconds
-3. Transactions configured with 50M instructions, 50 RO entries, 50 RW entries, 240 bytes per entry. 10 TPL.
+2. Transactions configured with 50M instructions, 50 RO entries, 50 RW entries,
+ 240 bytes per entry. 10 TPL.
- Ledger close min/avg/max: 276/288/306 milliseconds
-RO entries are omitted, as most of the reads are in-memory and non-metered now. Scenario 2 is skipped for the same reason, as it tests RO-only entries, which don't make an interesting scenario anymore.
-
+RO entries are omitted, as most of the reads are in-memory and non-metered now.
+Scenario 2 is skipped for the same reason, as it tests RO-only entries, which
+don't make an interesting scenario anymore.
### Update to benchmarking targets
-Note, that the close time has increased since SLP-3. Given that the network performance hasn't degraded since then, we propose to relax the threshold for the Soroban apply time to 300ms. The previous threshold of 250ms was very conservative, and a relatively small increase to it shouldn't be able to compromise the network stability.
+Note, that the close time has increased since SLP-3. Given that the network
+performance hasn't degraded since then, we propose to relax the threshold for
+the Soroban apply time to 300ms. The previous threshold of 250ms was very
+conservative, and a relatively small increase to it shouldn't be able to
+compromise the network stability.
### Benchmarking with proposed limits
#### Execution
-The following scenarios have been evaluated to cover the 'edge' cases of a small number of large transactions, and high number of small transactions:
+The following scenarios have been evaluated to cover the 'edge' cases of a
+small number of large transactions, and high number of small transactions:
-1. Transactions configured with 96.6M instructions, 83 RW entries, 280 bytes per entry. 12 TPL.
- - Ledger close min/avg/max: 290/298/307 milliseconds
+1. Transactions configured with 96.6M instructions, 83 RW entries, 280 bytes
+ per entry. 12 TPL.
-2. Transactions configured with 2.32M instructions, 2 RW entries, 286 bytes per entry. 493 TPL.
- - Ledger close min/avg/max: 258/273/287 milliseconds
+- Ledger close min/avg/max: 290/298/307 milliseconds
-#### Impact of tx size on overlay
+2. Transactions configured with 2.32M instructions, 2 RW entries, 286 bytes per
+ entry. 493 TPL.
- In addition to execution benchmarking, end-to-end simulation testing was performed on a 591-node network using the supercluster PubnetNetworkLimitsBench mission with a connectivity snapshot from Mainnet. To imitate current Mainnet traffic, tests always generated 200 TPS of classic load, plus varying amounts of InvokeHost load to completely saturate the transaction size ledger limit. Ledger close time remained stable at ~300ms. The following profiles were evaluated:
+- Ledger close min/avg/max: 258/273/287 milliseconds
- 1. Max-size transactions: 132,096 bytes, 2 TPL
- 2. Medium-size transactions: 40,000 bytes, 6 TPL
- 3. Small-size transactions: 2,000 bytes, 132 TPL
+#### Impact of tx size on overlay
- Overall, in all three scenarios the network remained healthy, with ledger age mostly comparable to behavior under the SLP3 transaction size limit (i.e., 7 seconds or less). Slight degradations in individual subsystems, such as nomination, were observed on some non-validating nodes, along with occasional nomination spikes on Tier 1 nodes in p99 latency. The health of the system overall, and Tier 1 in particular, remained largely unchanged.
+In addition to execution benchmarking, end-to-end simulation testing was
+performed on a 591-node network using the supercluster PubnetNetworkLimitsBench
+mission with a connectivity snapshot from Mainnet. To imitate current Mainnet
+traffic, tests always generated 200 TPS of classic load, plus varying amounts
+of InvokeHost load to completely saturate the transaction size ledger limit.
+Ledger close time remained stable at ~300ms. The following profiles were
+evaluated:
+
+1. Max-size transactions: 132,096 bytes, 2 TPL
+2. Medium-size transactions: 40,000 bytes, 6 TPL
+3. Small-size transactions: 2,000 bytes, 132 TPL
+
+Overall, in all three scenarios the network remained healthy, with ledger age
+mostly comparable to behavior under the SLP3 transaction size limit (i.e., 7
+seconds or less). Slight degradations in individual subsystems, such as
+nomination, were observed on some non-validating nodes, along with occasional
+nomination spikes on Tier 1 nodes in p99 latency. The health of the system
+overall, and Tier 1 in particular, remained largely unchanged.
### Downstream impact
-The increase in the total size of transactions, as well as increase in the total write size leads to the proportional increase in the ledger close metadata emitted by the Stellar Core.
+The increase in the total size of transactions, as well as increase in the
+total write size leads to the proportional increase in the ledger close
+metadata emitted by the Stellar Core.
-Stellar Horizon has been benchmarked as one of the known slower consumers of the metadata. Benchmark was set up with the maxed-out transaction size and ledger write dimensions, as well as traffic of 1000 additional 'classic' payment transactions to simulate the 'classic' metadata. The ingestion of the benchmark data took ~1.3s on average, which can be considered sufficient given the 5s ledger close cadence.
+Stellar Horizon has been benchmarked as one of the known slower consumers of
+the metadata. Benchmark was set up with the maxed-out transaction size and
+ledger write dimensions, as well as traffic of 1000 additional 'classic'
+payment transactions to simulate the 'classic' metadata. The ingestion of the
+benchmark data took ~1.3s on average, which can be considered sufficient given
+the 5s ledger close cadence.
-Note, that there are many metadata consumers, so this benchmark can't be considered exhaustive. However, it provides a representative data point - even for a complex ingestion system it still takes <30% of the ledger time to ingest the increased metadata.
+Note, that there are many metadata consumers, so this benchmark can't be
+considered exhaustive. However, it provides a representative data point - even
+for a complex ingestion system it still takes <30% of the ledger time to ingest
+the increased metadata.
-### Transaction
+### Transaction
### Long-term protocol impact
-This SLP proposes increases to a number of per-transaction limits. Footprint-related changes are unlikely to have significant long-term impact, as it's unlikely that the respective ledger limits will need to go down significantly, and thus these should remain low enough relatively to the ledger limits in the long term.
-
-The instruction limit increase is significant and it limits the ability to reduce per-thread instruction count. Specifically, if ledgers are closed more often and with more threads, every thread still must be able to apply at least 300M instructions. However, even with a rather conservative estimate of 50% per-thread overhead (due to imperfect parallelization), we would still end up with ~300ms ledger close time, which should be acceptable even given much higher ledger close frequency. As a reference point, with the current build of Stellar Core a benchmark for 300M instructions per thread and 8 threads, 1000 transactions per ledger, and 1000 entries/286KB of writes takes 342ms on average. Note, that some amount of future optimization is assumed here (such as parallelizing some steps that are currently sequential).
+This SLP proposes increases to a number of per-transaction limits.
+Footprint-related changes are unlikely to have significant long-term impact, as
+it's unlikely that the respective ledger limits will need to go down
+significantly, and thus these should remain low enough relatively to the ledger
+limits in the long term.
+
+The instruction limit increase is significant and it limits the ability to
+reduce per-thread instruction count. Specifically, if ledgers are closed more
+often and with more threads, every thread still must be able to apply at least
+300M instructions. However, even with a rather conservative estimate of 50%
+per-thread overhead (due to imperfect parallelization), we would still end up
+with ~300ms ledger close time, which should be acceptable even given much
+higher ledger close frequency. As a reference point, with the current build of
+Stellar Core a benchmark for 300M instructions per thread and 8 threads, 1000
+transactions per ledger, and 1000 entries/286KB of writes takes 342ms on
+average. Note, that some amount of future optimization is assumed here (such as
+parallelizing some steps that are currently sequential).
## Evaluation Outcome
-The proposed limits increase can be considered acceptable with the following caveats detailed above:
+The proposed limits increase can be considered acceptable with the following
+caveats detailed above:
- Average ledger close time benchmark requirement is relaxed to 300ms
-- There are certain assumptions about the long term impact of the instruction increase, which must be valid for the proposal to make sense
+- There are certain assumptions about the long term impact of the instruction
+ increase, which must be valid for the proposal to make sense
## Fees update
-In addition to increasing the resource capacity, this proposes to also lower the fees for non-refundable resources 4 times. There is currently no evidence that would suggest that the current non-refundable fees are too low, as there is no significant spammy activity on-chain that would cause real surge pricing pressure (i.e. even if some transactions are dropped, the inclusion fees normally stay at the base level of 100 stroops). Thus we propose to reduce non-refundable fees.
-
-The motivation for specifically 4 times reduction is that it's a straightforward strategy (vs per-resource fee adjustment), and the resource capacity has grown ~4x since Soroban launch for most resources. Some resources went up more or less, but that also has something to do with more or less conservative approaches to different resources on launch. The event cost reduction is just a rather conservative step - there is no limited capacity for this, so we could lower the cost while it still remains safe enough to prevent abuse.
+In addition to increasing the resource capacity, this proposes to also lower
+the fees for non-refundable resources 4 times. There is currently no evidence
+that would suggest that the current non-refundable fees are too low, as there
+is no significant spammy activity on-chain that would cause real surge pricing
+pressure (i.e. even if some transactions are dropped, the inclusion fees
+normally stay at the base level of 100 stroops). Thus we propose to reduce
+non-refundable fees.
+
+The motivation for specifically 4 times reduction is that it's a
+straightforward strategy (vs per-resource fee adjustment), and the resource
+capacity has grown ~4x since Soroban launch for most resources. Some resources
+went up more or less, but that also has something to do with more or less
+conservative approaches to different resources on launch. The event cost
+reduction is just a rather conservative step - there is no limited capacity for
+this, so we could lower the cost while it still remains safe enough to prevent
+abuse.
Summary of the fee updates:
-| Resource | Current fee | Discount rate | Proposed Fee |
-|------------------------|-------------|---------------|--------------|
-| Insns (fee per 10K) | 25 | 4 | 7 |
-| Disk read 1 entry | 6,250 | 4 | 1,563 |
-| Disk read 1 KB | 1,786 | 4 | 447 |
-| Write 1 entry | 10,000 | 4 | 2,500 |
-| Write 1 KB | 3,500 | 4 | 875 |
-| 1 KB tx size (bandwidth) | 1,624 | 4 | 406 |
-| 1 KB tx size (history) | 16,235 | 4 | 4,059 |
-| 1 KB events | 10,000 | 2 | 5,000 |
+| Resource | Current fee | Discount rate | Proposed Fee |
+| ------------------------ | ----------- | ------------- | ------------ |
+| Insns (fee per 10K) | 25 | 4 | 7 |
+| Disk read 1 entry | 6,250 | 4 | 1,563 |
+| Disk read 1 KB | 1,786 | 4 | 447 |
+| Write 1 entry | 10,000 | 4 | 2,500 |
+| Write 1 KB | 3,500 | 4 | 875 |
+| 1 KB tx size (bandwidth) | 1,624 | 4 | 406 |
+| 1 KB tx size (history) | 16,235 | 4 | 4,059 |
+| 1 KB events | 10,000 | 2 | 5,000 |
diff --git a/package.json b/package.json
index 1ae21b194..e96c8d46b 100644
--- a/package.json
+++ b/package.json
@@ -4,7 +4,7 @@
"sep-check": "prettier ecosystem/*.md --check"
},
"devDependencies": {
- "prettier": "2.8.8"
+ "prettier": "^3.8.1"
},
"packageManager": "yarn@1.22.19+sha1.4ba7fc5c6e704fce2066ecbfb0b0d8976fe62447"
}
diff --git a/sep-template.md b/sep-template.md
index d6d3b4435..d1f1d073b 100644
--- a/sep-template.md
+++ b/sep-template.md
@@ -12,41 +12,62 @@ Discussion: