diff --git a/Builds/VisualStudio/stellar-core.vcxproj b/Builds/VisualStudio/stellar-core.vcxproj
index 6526cc50ad..fdc74e2000 100644
--- a/Builds/VisualStudio/stellar-core.vcxproj
+++ b/Builds/VisualStudio/stellar-core.vcxproj
@@ -756,6 +756,7 @@ exit /b 0
+
@@ -1175,6 +1176,7 @@ exit /b 0
+
diff --git a/Builds/VisualStudio/stellar-core.vcxproj.filters b/Builds/VisualStudio/stellar-core.vcxproj.filters
index 812079ae8a..ec9faa1813 100644
--- a/Builds/VisualStudio/stellar-core.vcxproj.filters
+++ b/Builds/VisualStudio/stellar-core.vcxproj.filters
@@ -1432,6 +1432,9 @@
util
+
+ util
+
@@ -2542,6 +2545,9 @@
util
+
+ util
+
diff --git a/docs/apply-load-limits-for-model-tx.cfg b/docs/apply-load-limits-for-model-tx.cfg
new file mode 100644
index 0000000000..2dfe9f6259
--- /dev/null
+++ b/docs/apply-load-limits-for-model-tx.cfg
@@ -0,0 +1,114 @@
+# This is the Stellar Core configuration example for using the load generation
+# (apply-load) tool for finding the maximum ledger limits by applying a number
+# of the equivalent 'model' transactions.
+#
+# The mode will find the maximum value of N, such that closing a ledger
+# with N 'model' transactions takes less than a certain target time. Then
+# it will find the actual ledger limits by multiplying the 'model' transaction
+# dimensions by N.
+#
+# This is not meant to be used in any production contexts.
+#
+# The core with this configuration should be run using `./stellar-core apply-load --mode limits-for-model-tx`
+
+# Enable load generation
+ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING=true
+
+# Diagnostic events should generally be disabled, but can be enabled for debug
+ENABLE_SOROBAN_DIAGNOSTIC_EVENTS = false
+
+# Target average ledger close time.
+APPLY_LOAD_TARGET_CLOSE_TIME_MS = 600
+
+# Network configuration section
+
+# Most of the network configuration will be inferred automatically from the 'model'
+# transaction (for transaction limits) and from the search itself (for the ledger)
+# limits. Only the following limits need to be set:
+
+# Maximum number of Soroban transactions to apply. This is the upper bound for the
+# search.
+APPLY_LOAD_MAX_SOROBAN_TX_COUNT = 2000
+
+# Number of the transaction clusters and thus apply threads. This will stay constant
+# during the search, unlike all the other ledger limits.
+APPLY_LOAD_LEDGER_MAX_DEPENDENT_TX_CLUSTERS = 8
+
+# The following section contains various parameters for the generated load.
+
+# Number of ledgers to close for benchmarking each iteration of search.
+# The average close time will then be compared to APPLY_LOAD_TARGET_CLOSE_TIME_MS.
+APPLY_LOAD_NUM_LEDGERS = 10
+
+# Generate that many simple Classic payment transactions in every benchmark ledger.
+# Note, that this will affect the close time.
+APPLY_LOAD_CLASSIC_TXS_PER_LEDGER = 0
+
+# Size of every synthetic data entry generated.
+# This setting affects both the size of the pre-generated Bucket List entries,
+# and the size of every entry that a Soroban transaction reads/writes.
+APPLY_LOAD_DATA_ENTRY_SIZE = 250
+
+# Bucket list pre-generation
+
+# The benchmark will pre-generate ledger entries using the simplified ledger
+# close process; the generated ledgers won't be reflected in the meta or
+# history checkpoints.
+
+# Faster settings, more shallow BL (up to level 6)
+# Number of ledgers to close
+APPLY_LOAD_BL_SIMULATED_LEDGERS = 10000
+# Write a batch of entries every that many ledgers
+APPLY_LOAD_BL_WRITE_FREQUENCY = 1000
+# Write that many entries in every batch
+APPLY_LOAD_BL_BATCH_SIZE = 1000
+# Write entry batches in every ledger of this many last ledgers
+APPLY_LOAD_BL_LAST_BATCH_SIZE = 100
+# Write that many entries in every 'last' ledger
+APPLY_LOAD_BL_LAST_BATCH_LEDGERS = 300
+
+# Slower settings, deeper BL (up to level 9)
+#APPLY_LOAD_BL_SIMULATED_LEDGERS = 300000
+#APPLY_LOAD_BL_WRITE_FREQUENCY = 10000
+#APPLY_LOAD_BL_BATCH_SIZE = 10000
+#APPLY_LOAD_BL_LAST_BATCH_LEDGERS = 300
+#APPLY_LOAD_BL_LAST_BATCH_SIZE = 100
+
+# Settings for the generated 'model' transaction.
+# Unlike the 'limit-based' apply-load mode, only a single value
+# with `[1]` as distribution is allowed, thus only a single kind
+# of transaction will be generated.
+
+# Number of *disk* reads a transaction performs. Every disk read is restoration,
+# so it's also a write (accounted for in NUM_RW_ENTRIES).
+APPLY_LOAD_NUM_DISK_READ_ENTRIES = [1]
+APPLY_LOAD_NUM_DISK_READ_ENTRIES_DISTRIBUTION = [1]
+
+# Number of writes a transaction performs.
+APPLY_LOAD_NUM_RW_ENTRIES = [5]
+APPLY_LOAD_NUM_RW_ENTRIES_DISTRIBUTION = [1]
+
+# Number of 80-byte events a transaction emits.
+APPLY_LOAD_EVENT_COUNT = [15]
+APPLY_LOAD_EVENT_COUNT_DISTRIBUTION = [1]
+
+# Size of a generated transaction.
+APPLY_LOAD_TX_SIZE_BYTES = [1650]
+APPLY_LOAD_TX_SIZE_BYTES_DISTRIBUTION = [1]
+
+# Number of instructions a transaction will use.
+APPLY_LOAD_INSTRUCTIONS = [4250000]
+APPLY_LOAD_INSTRUCTIONS_DISTRIBUTION = [1]
+
+
+# Minimal core config boilerplate
+
+RUN_STANDALONE=true
+NODE_IS_VALIDATOR=true
+UNSAFE_QUORUM=true
+NETWORK_PASSPHRASE="Standalone Network ; February 2017"
+NODE_SEED="SDQVDISRYN2JXBS7ICL7QJAEKB3HWBJFP2QECXG7GZICAHBK4UNJCWK2 self"
+
+[QUORUM_SET]
+THRESHOLD_PERCENT=100
+VALIDATORS=["$self"]
diff --git a/docs/software/commands.md b/docs/software/commands.md
index 70b535b27c..37a61ce114 100644
--- a/docs/software/commands.md
+++ b/docs/software/commands.md
@@ -29,13 +29,18 @@ Command options can only by placed after command.
synthetic ledger close metadata emitted during the benchmark, and then use
it for benchmarking the meta consumers.
* This can only be used when `ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING=true`
+ * The command supports several modes:
+ - **--mode limit-based**: the default mode that measures the
+ ledger close time for applying transactions.
+ - **--mode max-sac-tps**: determines maximum TPS for the load consisting
+ only of fast SAC transfer
+ - **--mode limits-for-model-tx**: determines maximum ledger limits for the
+ load consisting only of a customizable 'model' transaction.
* Load generation is configured in the Core config file. The relevant settings
all begin with `APPLY_LOAD_`. See full example configurations with
per-setting documentation in the `docs` directory
- (`apply-load.cfg`, `apply-load-for-meta.cfg`).
- * The command also supports the special mode for determining max apply 'TPS'
- using SAC transfers. It can be invoked by passing `max-sac-tps` as
- `apply-load` argument.
+ (all the `apply-load-*.cfg` files demonstrate different modes and use
+ cases).
* **calculate-asset-supply**: Calculates total supply of an asset from the live and hot archive bucket lists IF the total supply fits in a 64 bit signed integer. Also validates against totalCoins for the native asset. Uses `--code ` and `--issuer ` to specify the asset. Uses the native asset if neither `--code` nor `--issuer` is given.
* **catchup **: Perform catchup from history
diff --git a/src/main/CommandLine.cpp b/src/main/CommandLine.cpp
index 2d11e99a7f..b562e71127 100644
--- a/src/main/CommandLine.cpp
+++ b/src/main/CommandLine.cpp
@@ -1903,6 +1903,11 @@ applyLoadModeParser(std::string& modeArg, ApplyLoadMode& mode)
mode = ApplyLoadMode::MAX_SAC_TPS;
return "";
}
+ if (iequals(modeArg, "limits-for-model-tx"))
+ {
+ mode = ApplyLoadMode::FIND_LIMITS_FOR_MODEL_TX;
+ return "";
+ }
return "Unrecognized apply-load mode. Please select 'ledger-limits' "
"or 'max-sac-tps'.";
};
@@ -1931,7 +1936,11 @@ runApplyLoad(CommandLineArgs const& args)
config.TESTING_UPGRADE_MAX_TX_SET_SIZE = 1000;
config.LEDGER_PROTOCOL_VERSION =
Config::CURRENT_LEDGER_PROTOCOL_VERSION;
-
+ if (config.APPLY_LOAD_NUM_LEDGERS == 0)
+ {
+ throw std::runtime_error(
+ "APPLY_LOAD_NUM_LEDGERS must be greater than 0");
+ }
if (mode == ApplyLoadMode::MAX_SAC_TPS)
{
if (config.APPLY_LOAD_MAX_SAC_TPS_MIN_TPS >=
@@ -2024,109 +2033,7 @@ runApplyLoad(CommandLineArgs const& args)
{"ledger", "transaction", "total-apply"});
totalTxApplyTime.Clear();
- if (mode == ApplyLoadMode::MAX_SAC_TPS)
- {
- al.findMaxSacTps();
- return 0;
- }
-
- if (config.APPLY_LOAD_NUM_LEDGERS == 0)
- {
- throw std::runtime_error(
- "APPLY_LOAD_NUM_LEDGERS must be greater than 0");
- }
-
- for (size_t i = 0; i < config.APPLY_LOAD_NUM_LEDGERS; ++i)
- {
- app.getBucketManager()
- .getLiveBucketList()
- .resolveAllFutures();
- releaseAssert(app.getBucketManager()
- .getLiveBucketList()
- .futuresAllResolved());
- al.benchmark();
- }
-
- CLOG_INFO(Perf, "Max ledger close: {} milliseconds",
- ledgerClose.max());
- CLOG_INFO(Perf, "Min ledger close: {} milliseconds",
- ledgerClose.min());
- CLOG_INFO(Perf, "Mean ledger close: {} milliseconds",
- ledgerClose.mean());
- CLOG_INFO(Perf, "stddev ledger close: {} milliseconds",
- ledgerClose.std_dev());
-
- CLOG_INFO(Perf, "Max CPU ins ratio: {}",
- cpuInsRatio.max() / 1000000);
- CLOG_INFO(Perf, "Mean CPU ins ratio: {}",
- cpuInsRatio.mean() / 1000000);
-
- CLOG_INFO(Perf, "Max CPU ins ratio excl VM: {}",
- cpuInsRatioExclVm.max() / 1000000);
- CLOG_INFO(Perf, "Mean CPU ins ratio excl VM: {}",
- cpuInsRatioExclVm.mean() / 1000000);
- CLOG_INFO(Perf, "stddev CPU ins ratio excl VM: {}",
- cpuInsRatioExclVm.std_dev() / 1000000);
-
- CLOG_INFO(Perf, "Ledger Max CPU ins ratio: {}",
- ledgerCpuInsRatio.max() / 1000000);
- CLOG_INFO(Perf, "Ledger Mean CPU ins ratio: {}",
- ledgerCpuInsRatio.mean() / 1000000);
- CLOG_INFO(Perf, "Ledger stddev CPU ins ratio: {}",
- ledgerCpuInsRatio.std_dev() / 1000000);
-
- CLOG_INFO(Perf, "Ledger Max CPU ins ratio excl VM: {}",
- ledgerCpuInsRatioExclVm.max() / 1000000);
- CLOG_INFO(Perf, "Ledger Mean CPU ins ratio excl VM: {}",
- ledgerCpuInsRatioExclVm.mean() / 1000000);
- CLOG_INFO(
- Perf,
- "Ledger stddev CPU ins ratio excl VM: {} milliseconds",
- ledgerCpuInsRatioExclVm.std_dev() / 1000000);
- // Utilization metrics are relevant only in limit-based
- // mode.
- if (mode == ApplyLoadMode::LIMIT_BASED)
- {
- CLOG_INFO(Perf,
- "Tx count utilization min/avg/max {}/{}/{}%",
- al.getTxCountUtilization().min() / 1000.0,
- al.getTxCountUtilization().mean() / 1000.0,
- al.getTxCountUtilization().max() / 1000.0);
- CLOG_INFO(Perf,
- "Instruction utilization min/avg/max {}/{}/{}%",
- al.getInstructionUtilization().min() / 1000.0,
- al.getInstructionUtilization().mean() / 1000.0,
- al.getInstructionUtilization().max() / 1000.0);
- CLOG_INFO(Perf, "Tx size utilization min/avg/max {}/{}/{}%",
- al.getTxSizeUtilization().min() / 1000.0,
- al.getTxSizeUtilization().mean() / 1000.0,
- al.getTxSizeUtilization().max() / 1000.0);
- CLOG_INFO(
- Perf,
- "Disk read bytes utilization min/avg/max {}/{}/{}%",
- al.getDiskReadByteUtilization().min() / 1000.0,
- al.getDiskReadByteUtilization().mean() / 1000.0,
- al.getDiskReadByteUtilization().max() / 1000.0);
- CLOG_INFO(Perf,
- "Write bytes utilization min/avg/max {}/{}/{}%",
- al.getDiskWriteByteUtilization().min() / 1000.0,
- al.getDiskWriteByteUtilization().mean() / 1000.0,
- al.getDiskWriteByteUtilization().max() / 1000.0);
- CLOG_INFO(
- Perf,
- "Disk read entry utilization min/avg/max {}/{}/{}%",
- al.getDiskReadEntryUtilization().min() / 1000.0,
- al.getDiskReadEntryUtilization().mean() / 1000.0,
- al.getDiskReadEntryUtilization().max() / 1000.0);
- CLOG_INFO(Perf,
- "Write entry utilization min/avg/max {}/{}/{}%",
- al.getWriteEntryUtilization().min() / 1000.0,
- al.getWriteEntryUtilization().mean() / 1000.0,
- al.getWriteEntryUtilization().max() / 1000.0);
- }
-
- CLOG_INFO(Perf, "Tx Success Rate: {:f}%",
- al.successRate() * 100);
+ al.execute();
}
return 0;
diff --git a/src/main/Config.cpp b/src/main/Config.cpp
index 962794d13b..0ab629f58a 100644
--- a/src/main/Config.cpp
+++ b/src/main/Config.cpp
@@ -1742,19 +1742,18 @@ Config::processConfig(std::shared_ptr t)
}},
{"APPLY_LOAD_NUM_LEDGERS",
[&]() { APPLY_LOAD_NUM_LEDGERS = readInt(item); }},
- {"APPLY_LOAD_MAX_SAC_TPS_TARGET_CLOSE_TIME_MS",
+ {"APPLY_LOAD_TARGET_CLOSE_TIME_MS",
[&]() {
- APPLY_LOAD_MAX_SAC_TPS_TARGET_CLOSE_TIME_MS =
+ APPLY_LOAD_TARGET_CLOSE_TIME_MS =
readInt(item, 1);
- if (APPLY_LOAD_MAX_SAC_TPS_TARGET_CLOSE_TIME_MS %
- ApplyLoad::MAX_SAC_TPS_TIME_STEP_MS !=
+ if (APPLY_LOAD_TARGET_CLOSE_TIME_MS %
+ ApplyLoad::TARGET_CLOSE_TIME_STEP_MS !=
0)
{
- throw std::invalid_argument(
- fmt::format(FMT_STRING("APPLY_LOAD_MAX_SAC_TPS_"
- "TARGET_CLOSE_TIME_MS must "
- "be a multiple of {}."),
- ApplyLoad::MAX_SAC_TPS_TIME_STEP_MS));
+ throw std::invalid_argument(fmt::format(
+ FMT_STRING("APPLY_LOAD_TARGET_CLOSE_TIME_MS "
+ "must be a multiple of {}."),
+ ApplyLoad::TARGET_CLOSE_TIME_STEP_MS));
}
}},
{"APPLY_LOAD_MAX_SAC_TPS_MIN_TPS",
diff --git a/src/main/Config.h b/src/main/Config.h
index c6de59c0a1..e2b72b20a0 100644
--- a/src/main/Config.h
+++ b/src/main/Config.h
@@ -386,8 +386,16 @@ class Config : public std::enable_shared_from_this
uint32_t APPLY_LOAD_LEDGER_MAX_DEPENDENT_TX_CLUSTERS = 1;
+ // Number of ledgers to apply in apply-load.
+ // Depending on the mode this represents either the total number of ledgers
+ // to close for benchmarking, or the number of ledgers to apply per
+ // iteration of binary search for modes that perform search.
uint32_t APPLY_LOAD_NUM_LEDGERS = 100;
+ // Target ledger close time in milliseconds for modes that perform binary
+ // search of TPS or limits.
+ uint32_t APPLY_LOAD_TARGET_CLOSE_TIME_MS = 1000;
+
// Number of classic transactions to include in each ledger in ledger limit
// based apply-load mode.
uint32_t APPLY_LOAD_CLASSIC_TXS_PER_LEDGER = 0;
@@ -412,7 +420,6 @@ class Config : public std::enable_shared_from_this
std::vector APPLY_LOAD_EVENT_COUNT_DISTRIBUTION;
// MAX_SAC_TPS mode specific parameters
- uint32_t APPLY_LOAD_MAX_SAC_TPS_TARGET_CLOSE_TIME_MS = 1000;
uint32_t APPLY_LOAD_MAX_SAC_TPS_MIN_TPS = 100;
uint32_t APPLY_LOAD_MAX_SAC_TPS_MAX_TPS = 50000;
diff --git a/src/simulation/ApplyLoad.cpp b/src/simulation/ApplyLoad.cpp
index a2b979bf17..bf87de5ce4 100644
--- a/src/simulation/ApplyLoad.cpp
+++ b/src/simulation/ApplyLoad.cpp
@@ -25,6 +25,7 @@
#include "util/GlobalChecks.h"
#include "util/Logging.h"
#include "util/XDRCereal.h"
+#include "xdrpp/printer.h"
#include
namespace stellar
@@ -32,7 +33,7 @@ namespace stellar
namespace
{
SorobanUpgradeConfig
-getUpgradeConfig(Config const& cfg)
+getUpgradeConfig(Config const& cfg, bool validate = true)
{
SorobanUpgradeConfig upgradeConfig;
upgradeConfig.maxContractSizeBytes = 65536;
@@ -80,20 +81,23 @@ getUpgradeConfig(Config const& cfg)
// These values are set above using values from Config, so the assertions
// will fail if the config file is missing any of these values.
- releaseAssert(*upgradeConfig.ledgerMaxInstructions > 0);
- releaseAssert(*upgradeConfig.txMaxInstructions > 0);
- releaseAssert(*upgradeConfig.ledgerMaxDiskReadEntries > 0);
- releaseAssert(*upgradeConfig.ledgerMaxDiskReadBytes > 0);
- releaseAssert(*upgradeConfig.ledgerMaxWriteLedgerEntries > 0);
- releaseAssert(*upgradeConfig.ledgerMaxWriteBytes > 0);
- releaseAssert(*upgradeConfig.ledgerMaxTxCount > 0);
- releaseAssert(*upgradeConfig.txMaxDiskReadEntries > 0);
- releaseAssert(*upgradeConfig.txMaxDiskReadBytes > 0);
- releaseAssert(*upgradeConfig.txMaxWriteLedgerEntries > 0);
- releaseAssert(*upgradeConfig.txMaxWriteBytes > 0);
- releaseAssert(*upgradeConfig.txMaxContractEventsSizeBytes > 0);
- releaseAssert(*upgradeConfig.ledgerMaxTransactionsSizeBytes > 0);
- releaseAssert(*upgradeConfig.txMaxSizeBytes > 0);
+ if (validate)
+ {
+ releaseAssert(*upgradeConfig.ledgerMaxInstructions > 0);
+ releaseAssert(*upgradeConfig.ledgerMaxDiskReadEntries > 0);
+ releaseAssert(*upgradeConfig.ledgerMaxDiskReadBytes > 0);
+ releaseAssert(*upgradeConfig.ledgerMaxWriteLedgerEntries > 0);
+ releaseAssert(*upgradeConfig.ledgerMaxWriteBytes > 0);
+ releaseAssert(*upgradeConfig.ledgerMaxTransactionsSizeBytes > 0);
+ releaseAssert(*upgradeConfig.ledgerMaxTxCount > 0);
+ releaseAssert(*upgradeConfig.txMaxInstructions > 0);
+ releaseAssert(*upgradeConfig.txMaxDiskReadEntries > 0);
+ releaseAssert(*upgradeConfig.txMaxDiskReadBytes > 0);
+ releaseAssert(*upgradeConfig.txMaxWriteLedgerEntries > 0);
+ releaseAssert(*upgradeConfig.txMaxWriteBytes > 0);
+ releaseAssert(*upgradeConfig.txMaxContractEventsSizeBytes > 0);
+ releaseAssert(*upgradeConfig.txMaxSizeBytes > 0);
+ }
return upgradeConfig;
}
@@ -220,7 +224,8 @@ ApplyLoad::getKeyForArchivedEntry(uint64_t index)
}
uint32_t
-ApplyLoad::calculateRequiredHotArchiveEntries(Config const& cfg)
+ApplyLoad::calculateRequiredHotArchiveEntries(ApplyLoadMode mode,
+ Config const& cfg)
{
// If no RO entries are configured, return 0
if (cfg.APPLY_LOAD_NUM_DISK_READ_ENTRIES.empty())
@@ -254,15 +259,25 @@ ApplyLoad::calculateRequiredHotArchiveEntries(Config const& cfg)
// to scale the expected number of restores by the transaction queue size.
totalExpectedRestores *= cfg.SOROBAN_TRANSACTION_QUEUE_SIZE_MULTIPLIER;
+ // In FIND_LIMITS_FOR_MODEL_TX mode, we perform a binary search that uses
+ // new restores and thus we need to additionally scale the restores by
+ // log2 of max tx count (which approximates the maximum number of binary
+ // search iterations).
+ if (mode == ApplyLoadMode::FIND_LIMITS_FOR_MODEL_TX)
+ {
+ totalExpectedRestores *= log2(cfg.APPLY_LOAD_MAX_SOROBAN_TX_COUNT);
+ }
+
// Add some generous buffer since actual distributions may vary.
return totalExpectedRestores * 1.5;
}
ApplyLoad::ApplyLoad(Application& app, ApplyLoadMode mode)
: mApp(app)
+ , mMode(mode)
, mRoot(app.getRoot())
, mTotalHotArchiveEntries(
- calculateRequiredHotArchiveEntries(app.getConfig()))
+ calculateRequiredHotArchiveEntries(mode, app.getConfig()))
, mTxCountUtilization(
mApp.getMetrics().NewHistogram({"soroban", "apply-load", "tx-count"}))
, mInstructionUtilization(mApp.getMetrics().NewHistogram(
@@ -277,13 +292,14 @@ ApplyLoad::ApplyLoad(Application& app, ApplyLoadMode mode)
{"soroban", "apply-load", "disk-read-entry"}))
, mWriteEntryUtilization(mApp.getMetrics().NewHistogram(
{"soroban", "apply-load", "write-entry"}))
- , mMode(mode)
, mTxGenerator(app, mTotalHotArchiveEntries)
{
auto const& config = mApp.getConfig();
+
switch (mMode)
{
case ApplyLoadMode::LIMIT_BASED:
+ case ApplyLoadMode::FIND_LIMITS_FOR_MODEL_TX:
mNumAccounts = config.APPLY_LOAD_MAX_SOROBAN_TX_COUNT *
config.SOROBAN_TRANSACTION_QUEUE_SIZE_MULTIPLIER +
config.APPLY_LOAD_CLASSIC_TXS_PER_LEDGER *
@@ -293,10 +309,14 @@ ApplyLoad::ApplyLoad(Application& app, ApplyLoadMode mode)
case ApplyLoadMode::MAX_SAC_TPS:
mNumAccounts = config.APPLY_LOAD_MAX_SAC_TPS_MAX_TPS *
config.SOROBAN_TRANSACTION_QUEUE_SIZE_MULTIPLIER *
- config.APPLY_LOAD_MAX_SAC_TPS_TARGET_CLOSE_TIME_MS /
- 1000.0;
+ config.APPLY_LOAD_TARGET_CLOSE_TIME_MS / 1000.0;
break;
}
+ if (config.APPLY_LOAD_LEDGER_MAX_DEPENDENT_TX_CLUSTERS == 0)
+ {
+ throw std::runtime_error(
+ "APPLY_LOAD_LEDGER_MAX_DEPENDENT_TX_CLUSTERS cannot be zero");
+ }
setup();
}
@@ -325,15 +345,17 @@ ApplyLoad::setup()
setupUpgradeContract();
- if (mMode == ApplyLoadMode::MAX_SAC_TPS)
+ switch (mMode)
{
+ case ApplyLoadMode::MAX_SAC_TPS:
+ case ApplyLoadMode::FIND_LIMITS_FOR_MODEL_TX:
// Just upgrade to a placeholder number of TXs, we'll
// upgrade again before each TPS run.
upgradeSettingsForMaxTPS(100000);
- }
- else
- {
+ break;
+ case ApplyLoadMode::LIMIT_BASED:
upgradeSettings();
+ break;
}
setupLoadContract();
@@ -343,7 +365,8 @@ ApplyLoad::setup()
{
setupBatchTransferContracts();
}
- if (mMode == ApplyLoadMode::LIMIT_BASED)
+ if (mMode == ApplyLoadMode::LIMIT_BASED ||
+ mMode == ApplyLoadMode::FIND_LIMITS_FOR_MODEL_TX)
{
setupBucketList();
}
@@ -358,7 +381,6 @@ ApplyLoad::closeLedger(std::vector const& txs,
if (recordSorobanUtilization)
{
- releaseAssert(mMode == ApplyLoadMode::LIMIT_BASED);
auto ledgerResources = mApp.getLedgerManager().maxLedgerResources(true);
auto txSetResources =
txSet.second->getPhases()
@@ -400,6 +422,23 @@ ApplyLoad::closeLedger(std::vector const& txs,
stellar::txtest::closeLedger(mApp, txs, /* strictOrder */ false, upgrades);
}
+void
+ApplyLoad::execute()
+{
+ switch (mMode)
+ {
+ case ApplyLoadMode::LIMIT_BASED:
+ benchmarkLimits();
+ break;
+ case ApplyLoadMode::MAX_SAC_TPS:
+ findMaxSacTps();
+ break;
+ case ApplyLoadMode::FIND_LIMITS_FOR_MODEL_TX:
+ findMaxLimitsForModelTransaction();
+ break;
+ }
+}
+
void
ApplyLoad::setupAccounts()
{
@@ -477,9 +516,27 @@ ApplyLoad::applyConfigUpgrade(SorobanUpgradeConfig const& upgradeConfig)
resources.diskReadBytes = 0;
resources.writeBytes = 3'100;
- auto invokeTx = mTxGenerator.invokeSorobanCreateUpgradeTransaction(
- lm.getLastClosedLedgerNum() + 1, 0, upgradeBytes, mUpgradeCodeKey,
- mUpgradeInstanceKey, std::nullopt, resources);
+ auto [_, invokeTx] = mTxGenerator.invokeSorobanCreateUpgradeTransaction(
+ lm.getLastClosedLedgerNum() + 1, TxGenerator::ROOT_ACCOUNT_ID,
+ upgradeBytes, mUpgradeCodeKey, mUpgradeInstanceKey, std::nullopt,
+ resources);
+ {
+ LedgerSnapshot ls(mApp);
+ auto diagnostics =
+ DiagnosticEventManager::createForValidation(mApp.getConfig());
+ auto validationRes = invokeTx->checkValid(mApp.getAppConnector(), ls, 0,
+ 0, 0, diagnostics);
+ if (!validationRes->isSuccess())
+ {
+ if (validationRes->getResultCode() == txSOROBAN_INVALID)
+ {
+ diagnostics.debugLogEvents();
+ }
+ CLOG_FATAL(Perf, "Created invalid upgrade settings transaction: {}",
+ validationRes->getResultCode());
+ releaseAssert(validationRes->isSuccess());
+ }
+ }
auto upgradeSetKey = mTxGenerator.getConfigUpgradeSetKey(
upgradeConfig,
@@ -491,13 +548,130 @@ ApplyLoad::applyConfigUpgrade(SorobanUpgradeConfig const& upgradeConfig)
auto v = xdr::xdr_to_opaque(ledgerUpgrade);
upgrade.push_back(UpgradeType{v.begin(), v.end()});
- closeLedger({invokeTx.second}, upgrade);
+ closeLedger({invokeTx}, upgrade);
releaseAssert(mTxGenerator.getApplySorobanSuccess().count() -
currApplySorobanSuccess ==
1);
}
+std::pair
+ApplyLoad::updateSettingsForTxCount(uint64_t txsPerLedger)
+{
+ // Round the configuration values down to be a multiple of the respective
+ // step in order to get more readable configurations, and also to speeed
+ // up the binary search significantly.
+ uint64_t const INSTRUCTIONS_ROUNDING_STEP = 5'000'000;
+ uint64_t const SIZE_ROUNDING_STEP = 500;
+ uint64_t const ENTRIES_ROUNDING_STEP = 10;
+
+ auto const& config = mApp.getConfig();
+ uint64_t insns =
+ roundDown(txsPerLedger * config.APPLY_LOAD_INSTRUCTIONS[0] /
+ config.APPLY_LOAD_LEDGER_MAX_DEPENDENT_TX_CLUSTERS,
+ INSTRUCTIONS_ROUNDING_STEP);
+ uint64_t txSize = roundDown(
+ txsPerLedger * config.APPLY_LOAD_TX_SIZE_BYTES[0], SIZE_ROUNDING_STEP);
+
+ uint64_t writeEntries =
+ roundDown(txsPerLedger * config.APPLY_LOAD_NUM_RW_ENTRIES[0],
+ ENTRIES_ROUNDING_STEP);
+ uint64_t writeBytes = roundDown(
+ writeEntries * config.APPLY_LOAD_DATA_ENTRY_SIZE, SIZE_ROUNDING_STEP);
+
+ uint64_t diskReadEntries =
+ roundDown(txsPerLedger * config.APPLY_LOAD_NUM_DISK_READ_ENTRIES[0],
+ ENTRIES_ROUNDING_STEP);
+ uint64_t diskReadBytes =
+ roundDown(diskReadEntries * config.APPLY_LOAD_DATA_ENTRY_SIZE,
+ SIZE_ROUNDING_STEP);
+
+ if (diskReadEntries == 0)
+ {
+ diskReadEntries =
+ MinimumSorobanNetworkConfig::TX_MAX_READ_LEDGER_ENTRIES;
+ diskReadBytes = MinimumSorobanNetworkConfig::TX_MAX_READ_BYTES;
+ }
+
+ uint64_t actualMaxTxs = txsPerLedger;
+ actualMaxTxs =
+ std::min(actualMaxTxs,
+ insns * config.APPLY_LOAD_LEDGER_MAX_DEPENDENT_TX_CLUSTERS /
+ config.APPLY_LOAD_INSTRUCTIONS[0]);
+ actualMaxTxs =
+ std::min(actualMaxTxs, txSize / config.APPLY_LOAD_TX_SIZE_BYTES[0]);
+ if (config.APPLY_LOAD_NUM_DISK_READ_ENTRIES[0] > 0)
+ {
+ actualMaxTxs = std::min(actualMaxTxs,
+ diskReadEntries /
+ config.APPLY_LOAD_NUM_DISK_READ_ENTRIES[0]);
+ actualMaxTxs = std::min(
+ actualMaxTxs,
+ diskReadBytes / (config.APPLY_LOAD_NUM_DISK_READ_ENTRIES[0] *
+ config.APPLY_LOAD_DATA_ENTRY_SIZE));
+ }
+ actualMaxTxs = std::min(actualMaxTxs,
+ writeEntries / config.APPLY_LOAD_NUM_RW_ENTRIES[0]);
+
+ actualMaxTxs = std::min(actualMaxTxs,
+ writeBytes / (config.APPLY_LOAD_NUM_RW_ENTRIES[0] *
+ config.APPLY_LOAD_DATA_ENTRY_SIZE));
+ CLOG_INFO(Perf,
+ "Resources after rounding for testing {} actual max txs per "
+ "ledger: "
+ "instructions {}, tx size {}, disk read entries {}, "
+ "disk read bytes {}, rw entries {}, rw bytes {}",
+ actualMaxTxs, insns, txSize, diskReadEntries, diskReadBytes,
+ writeEntries, writeBytes);
+
+ auto upgradeConfig = getUpgradeConfig(mApp.getConfig(),
+ /* validate */ false);
+ // Set tx limits to the respective resources of the 'model'
+ // transaction.
+ upgradeConfig.txMaxInstructions =
+ std::max(MinimumSorobanNetworkConfig::TX_MAX_INSTRUCTIONS,
+ config.APPLY_LOAD_INSTRUCTIONS[0]);
+ upgradeConfig.txMaxSizeBytes =
+ std::max(MinimumSorobanNetworkConfig::TX_MAX_SIZE_BYTES,
+ config.APPLY_LOAD_TX_SIZE_BYTES[0]);
+ upgradeConfig.txMaxDiskReadEntries =
+ std::max(MinimumSorobanNetworkConfig::TX_MAX_READ_LEDGER_ENTRIES,
+ config.APPLY_LOAD_NUM_DISK_READ_ENTRIES[0]);
+ upgradeConfig.txMaxWriteLedgerEntries =
+ std::max(MinimumSorobanNetworkConfig::TX_MAX_WRITE_LEDGER_ENTRIES,
+ config.APPLY_LOAD_NUM_RW_ENTRIES[0]);
+ upgradeConfig.txMaxDiskReadBytes =
+ std::max(MinimumSorobanNetworkConfig::TX_MAX_READ_BYTES,
+ config.APPLY_LOAD_NUM_DISK_READ_ENTRIES[0] *
+ config.APPLY_LOAD_DATA_ENTRY_SIZE);
+ upgradeConfig.txMaxWriteBytes =
+ std::max(MinimumSorobanNetworkConfig::TX_MAX_WRITE_BYTES,
+ config.APPLY_LOAD_NUM_RW_ENTRIES[0] *
+ config.APPLY_LOAD_DATA_ENTRY_SIZE);
+ upgradeConfig.txMaxContractEventsSizeBytes =
+ std::max(MinimumSorobanNetworkConfig::TX_MAX_CONTRACT_EVENTS_SIZE_BYTES,
+ config.APPLY_LOAD_EVENT_COUNT[0] *
+ TxGenerator::SOROBAN_LOAD_V2_EVENT_SIZE_BYTES +
+ 100);
+ upgradeConfig.txMaxFootprintEntries =
+ *upgradeConfig.txMaxDiskReadEntries +
+ *upgradeConfig.txMaxWriteLedgerEntries;
+
+ // Set the ledger-wide limits to the compute values calculated above.
+ // Note, that in theory we could end up with ledger limits lower than
+ // the transaction limits, but in normally would be just
+ // mis-configuration (using a model transaction that is too large to
+ // be applied within the target close time).
+ upgradeConfig.ledgerMaxInstructions = insns;
+ upgradeConfig.ledgerMaxTransactionsSizeBytes = txSize;
+ upgradeConfig.ledgerMaxDiskReadEntries = diskReadEntries;
+ upgradeConfig.ledgerMaxWriteLedgerEntries = writeEntries;
+ upgradeConfig.ledgerMaxDiskReadBytes = diskReadBytes;
+ upgradeConfig.ledgerMaxWriteBytes = writeBytes;
+
+ return std::make_pair(upgradeConfig, actualMaxTxs);
+}
+
void
ApplyLoad::upgradeSettings()
{
@@ -727,9 +901,10 @@ ApplyLoad::setupBucketList()
// remaining entries over APPLY_LOAD_BL_LAST_BATCH_LEDGERS ledgers.
uint32_t hotArchiveLastBatchSize =
mTotalHotArchiveEntries > 0
- ? (mTotalHotArchiveEntries -
- (hotArchiveBatchSize * hotArchiveBatchCount)) /
- cfg.APPLY_LOAD_BL_LAST_BATCH_LEDGERS
+ ? ceil(static_cast(
+ mTotalHotArchiveEntries -
+ (hotArchiveBatchSize * hotArchiveBatchCount)) /
+ cfg.APPLY_LOAD_BL_LAST_BATCH_LEDGERS)
: 0;
CLOG_INFO(Perf,
@@ -838,23 +1013,126 @@ ApplyLoad::setupBucketList()
}
void
-ApplyLoad::benchmark()
+ApplyLoad::benchmarkLimits()
{
- releaseAssertOrThrow(mMode != ApplyLoadMode::MAX_SAC_TPS);
+ auto& ledgerClose =
+ mApp.getMetrics().NewTimer({"ledger", "ledger", "close"});
+ ledgerClose.Clear();
+
+ auto& cpuInsRatio = mApp.getMetrics().NewHistogram(
+ {"soroban", "host-fn-op", "invoke-time-fsecs-cpu-insn-ratio"});
+ cpuInsRatio.Clear();
+
+ auto& cpuInsRatioExclVm = mApp.getMetrics().NewHistogram(
+ {"soroban", "host-fn-op", "invoke-time-fsecs-cpu-insn-ratio-excl-vm"});
+ cpuInsRatioExclVm.Clear();
+
+ auto& ledgerCpuInsRatio = mApp.getMetrics().NewHistogram(
+ {"soroban", "host-fn-op", "ledger-cpu-insns-ratio"});
+ ledgerCpuInsRatio.Clear();
+
+ auto& ledgerCpuInsRatioExclVm = mApp.getMetrics().NewHistogram(
+ {"soroban", "host-fn-op", "ledger-cpu-insns-ratio-excl-vm"});
+ ledgerCpuInsRatioExclVm.Clear();
+
+ auto& totalTxApplyTime =
+ mApp.getMetrics().NewTimer({"ledger", "transaction", "total-apply"});
+ totalTxApplyTime.Clear();
+
+ for (size_t i = 0; i < mApp.getConfig().APPLY_LOAD_NUM_LEDGERS; ++i)
+ {
+ benchmarkLimitsIteration();
+ }
+ CLOG_INFO(Perf,
+ "Ledger close min/avg/max: {}/{}/{} milliseconds "
+ "(stddev={})",
+ ledgerClose.min(), ledgerClose.mean(), ledgerClose.max(),
+ ledgerClose.std_dev());
+ CLOG_INFO(Perf,
+ "Tx apply time min/avg/max: {}/{}/{} milliseconds "
+ "(stddev={})",
+ totalTxApplyTime.min(), totalTxApplyTime.mean(),
+ totalTxApplyTime.max(), totalTxApplyTime.std_dev());
+
+ CLOG_INFO(Perf, "Max CPU ins ratio: {}", cpuInsRatio.max() / 1000000);
+ CLOG_INFO(Perf, "Mean CPU ins ratio: {}", cpuInsRatio.mean() / 1000000);
+
+ CLOG_INFO(Perf, "Max CPU ins ratio excl VM: {}",
+ cpuInsRatioExclVm.max() / 1000000);
+ CLOG_INFO(Perf, "Mean CPU ins ratio excl VM: {}",
+ cpuInsRatioExclVm.mean() / 1000000);
+ CLOG_INFO(Perf, "stddev CPU ins ratio excl VM: {}",
+ cpuInsRatioExclVm.std_dev() / 1000000);
+
+ CLOG_INFO(Perf, "Ledger Max CPU ins ratio: {}",
+ ledgerCpuInsRatio.max() / 1000000);
+ CLOG_INFO(Perf, "Ledger Mean CPU ins ratio: {}",
+ ledgerCpuInsRatio.mean() / 1000000);
+ CLOG_INFO(Perf, "Ledger stddev CPU ins ratio: {}",
+ ledgerCpuInsRatio.std_dev() / 1000000);
+
+ CLOG_INFO(Perf, "Ledger Max CPU ins ratio excl VM: {}",
+ ledgerCpuInsRatioExclVm.max() / 1000000);
+ CLOG_INFO(Perf, "Ledger Mean CPU ins ratio excl VM: {}",
+ ledgerCpuInsRatioExclVm.mean() / 1000000);
+ CLOG_INFO(Perf, "Ledger stddev CPU ins ratio excl VM: {} milliseconds",
+ ledgerCpuInsRatioExclVm.std_dev() / 1000000);
+ CLOG_INFO(Perf, "Tx count utilization min/avg/max {}/{}/{}%",
+ getTxCountUtilization().min() / 1000.0,
+ getTxCountUtilization().mean() / 1000.0,
+ getTxCountUtilization().max() / 1000.0);
+ CLOG_INFO(Perf, "Instruction utilization min/avg/max {}/{}/{}%",
+ getInstructionUtilization().min() / 1000.0,
+ getInstructionUtilization().mean() / 1000.0,
+ getInstructionUtilization().max() / 1000.0);
+ CLOG_INFO(Perf, "Tx size utilization min/avg/max {}/{}/{}%",
+ getTxSizeUtilization().min() / 1000.0,
+ getTxSizeUtilization().mean() / 1000.0,
+ getTxSizeUtilization().max() / 1000.0);
+ CLOG_INFO(Perf, "Disk read bytes utilization min/avg/max {}/{}/{}%",
+ getDiskReadByteUtilization().min() / 1000.0,
+ getDiskReadByteUtilization().mean() / 1000.0,
+ getDiskReadByteUtilization().max() / 1000.0);
+ CLOG_INFO(Perf, "Write bytes utilization min/avg/max {}/{}/{}%",
+ getDiskWriteByteUtilization().min() / 1000.0,
+ getDiskWriteByteUtilization().mean() / 1000.0,
+ getDiskWriteByteUtilization().max() / 1000.0);
+ CLOG_INFO(Perf, "Disk read entry utilization min/avg/max {}/{}/{}%",
+ getDiskReadEntryUtilization().min() / 1000.0,
+ getDiskReadEntryUtilization().mean() / 1000.0,
+ getDiskReadEntryUtilization().max() / 1000.0);
+ CLOG_INFO(Perf, "Write entry utilization min/avg/max {}/{}/{}%",
+ getWriteEntryUtilization().min() / 1000.0,
+ getWriteEntryUtilization().mean() / 1000.0,
+ getWriteEntryUtilization().max() / 1000.0);
+
+ CLOG_INFO(Perf, "Tx Success Rate: {:f}%", successRate() * 100);
+}
+
+void
+ApplyLoad::benchmarkLimitsIteration()
+{
+ releaseAssert(mMode != ApplyLoadMode::MAX_SAC_TPS);
+
+ mApp.getBucketManager().getLiveBucketList().resolveAllFutures();
+ releaseAssert(
+ mApp.getBucketManager().getLiveBucketList().futuresAllResolved());
auto& lm = mApp.getLedgerManager();
auto const& config = mApp.getConfig();
std::vector txs;
auto maxResourcesToGenerate = lm.maxLedgerResources(true);
- // The TxSet validation will compare the ledger instruction limit against
- // the sum of the instructions of the slowest cluster in each stage, so we
- // just multiply the instructions limit by the max number of clusters.
+ // The TxSet validation will compare the ledger instruction limit
+ // against the sum of the instructions of the slowest cluster in each
+ // stage, so we just multiply the instructions limit by the max number
+ // of clusters.
maxResourcesToGenerate.setVal(
Resource::Type::INSTRUCTIONS,
maxResourcesToGenerate.getVal(Resource::Type::INSTRUCTIONS) *
config.APPLY_LOAD_LEDGER_MAX_DEPENDENT_TX_CLUSTERS);
- // Scale the resources by the tx queue multipler to emulate filled mempool.
+ // Scale the resources by the tx queue multipler to emulate filled
+ // mempool.
maxResourcesToGenerate =
multiplyByDouble(maxResourcesToGenerate,
config.SOROBAN_TRANSACTION_QUEUE_SIZE_MULTIPLIER);
@@ -864,7 +1142,8 @@ ApplyLoad::benchmark()
auto resourcesLeft = maxResourcesToGenerate;
auto const& accounts = mTxGenerator.getAccounts();
- std::vector shuffledAccounts(accounts.size());
+ // Omit root account
+ std::vector shuffledAccounts(accounts.size() - 1);
std::iota(shuffledAccounts.begin(), shuffledAccounts.end(), 0);
stellar::shuffle(std::begin(shuffledAccounts), std::end(shuffledAccounts),
getGlobalRandomEngine());
@@ -942,6 +1221,130 @@ ApplyLoad::benchmark()
closeLedger(txs, {}, /* recordSorobanUtilization */ true);
}
+void
+ApplyLoad::findMaxLimitsForModelTransaction()
+{
+ auto const& config = mApp.getConfig();
+
+ auto validateTxParam = [&config](std::string const& paramName,
+ auto const& values, auto const& weights,
+ bool allowZeroValue = false) {
+ if (values.size() != 1)
+ {
+ throw std::runtime_error(
+ fmt::format(FMT_STRING("{} must have exactly one entry for "
+ "'limits-for-model-tx' mode"),
+ paramName));
+ }
+ if (!allowZeroValue && values[0] == 0)
+ {
+ throw std::runtime_error(fmt::format(
+ FMT_STRING("{} cannot be zero for 'limits-for-model-tx' mode"),
+ paramName));
+ }
+ if (weights.size() != 1 || weights[0] != 1)
+ {
+ throw std::runtime_error(
+ fmt::format(FMT_STRING("{}_DISTRIBUTION must have exactly one "
+ "entry with the value of 1 for "
+ "'limits-for-model-tx' mode"),
+ paramName));
+ }
+ };
+ validateTxParam("APPLY_LOAD_INSTRUCTIONS", config.APPLY_LOAD_INSTRUCTIONS,
+ config.APPLY_LOAD_INSTRUCTIONS_DISTRIBUTION);
+ validateTxParam("APPLY_LOAD_TX_SIZE_BYTES", config.APPLY_LOAD_TX_SIZE_BYTES,
+ config.APPLY_LOAD_TX_SIZE_BYTES_DISTRIBUTION);
+ validateTxParam("APPLY_LOAD_NUM_DISK_READ_ENTRIES",
+ config.APPLY_LOAD_NUM_DISK_READ_ENTRIES,
+ config.APPLY_LOAD_NUM_DISK_READ_ENTRIES_DISTRIBUTION, true);
+ validateTxParam("APPLY_LOAD_NUM_RW_ENTRIES",
+ config.APPLY_LOAD_NUM_RW_ENTRIES,
+ config.APPLY_LOAD_NUM_RW_ENTRIES_DISTRIBUTION);
+ validateTxParam("APPLY_LOAD_EVENT_COUNT", config.APPLY_LOAD_EVENT_COUNT,
+ config.APPLY_LOAD_EVENT_COUNT_DISTRIBUTION, true);
+
+ auto roundDown = [](uint64_t value, uint64_t step) {
+ return value - value % step;
+ };
+
+ auto& ledgerCloseTime =
+ mApp.getMetrics().NewTimer({"ledger", "ledger", "close"});
+
+ uint64_t minTxsPerLedger = 1;
+ uint64_t maxTxsPerLedger = mApp.getConfig().APPLY_LOAD_MAX_SOROBAN_TX_COUNT;
+ SorobanUpgradeConfig maxLimitsConfig;
+ uint64_t maxLimitsTxsPerLedger = 0;
+ uint64_t prevTxsPerLedger = 0;
+
+ double targetTimeMs = mApp.getConfig().APPLY_LOAD_TARGET_CLOSE_TIME_MS;
+
+ while (minTxsPerLedger <= maxTxsPerLedger)
+ {
+ uint64_t testTxsPerLedger = (minTxsPerLedger + maxTxsPerLedger) / 2;
+
+ CLOG_INFO(Perf,
+ "Testing ledger max model txs: {}, generated limits: "
+ "instructions {}, tx size {}, disk read entries {}, rw "
+ "entries {}",
+ testTxsPerLedger,
+ testTxsPerLedger * config.APPLY_LOAD_INSTRUCTIONS[0],
+ testTxsPerLedger * config.APPLY_LOAD_TX_SIZE_BYTES[0],
+ testTxsPerLedger * config.APPLY_LOAD_NUM_DISK_READ_ENTRIES[0],
+ testTxsPerLedger * config.APPLY_LOAD_NUM_RW_ENTRIES[0]);
+ auto [upgradeConfig, actualMaxTxsPerLedger] =
+ updateSettingsForTxCount(testTxsPerLedger);
+ // Break when due to rounding we've arrived at the same actual txs to
+ // test as in the previous iteration, or at the value lower than the
+ // best found so far.
+ if (actualMaxTxsPerLedger == prevTxsPerLedger ||
+ actualMaxTxsPerLedger <= maxLimitsTxsPerLedger)
+ {
+ CLOG_INFO(Perf, "No change in generated limits after update due to "
+ "rounding, ending search.");
+ break;
+ }
+ applyConfigUpgrade(upgradeConfig);
+
+ prevTxsPerLedger = actualMaxTxsPerLedger;
+ ledgerCloseTime.Clear();
+ for (size_t i = 0; i < mApp.getConfig().APPLY_LOAD_NUM_LEDGERS; ++i)
+ {
+ benchmarkLimitsIteration();
+ }
+ releaseAssert(successRate() == 1.0);
+ if (ledgerCloseTime.mean() > targetTimeMs)
+ {
+ CLOG_INFO(
+ Perf,
+ "Failed: {} model txs per ledger (avg close time: {:.2f}ms)",
+ actualMaxTxsPerLedger, ledgerCloseTime.mean());
+ maxTxsPerLedger = testTxsPerLedger - 1;
+ }
+ else
+ {
+ CLOG_INFO(Perf,
+ "Success: {} model txs per ledger (avg close time: "
+ "{:.2f}ms)",
+ actualMaxTxsPerLedger, ledgerCloseTime.mean());
+ minTxsPerLedger = testTxsPerLedger + 1;
+ maxLimitsTxsPerLedger = actualMaxTxsPerLedger;
+ maxLimitsConfig = upgradeConfig;
+ }
+ }
+ CLOG_INFO(Perf,
+ "Maximum limits found for model transaction ({} TPL): "
+ "instructions {}, "
+ "tx size {}, disk read entries {}, disk read bytes {}, "
+ "write entries {}, write bytes {}",
+ maxLimitsTxsPerLedger, *maxLimitsConfig.ledgerMaxInstructions,
+ *maxLimitsConfig.ledgerMaxTransactionsSizeBytes,
+ *maxLimitsConfig.ledgerMaxDiskReadEntries,
+ *maxLimitsConfig.ledgerMaxDiskReadBytes,
+ *maxLimitsConfig.ledgerMaxWriteLedgerEntries,
+ *maxLimitsConfig.ledgerMaxWriteBytes);
+}
+
double
ApplyLoad::successRate()
{
@@ -1018,8 +1421,8 @@ ApplyLoad::findMaxSacTps()
std::ceil(static_cast(MIN_TXS_PER_STEP) / txsPerStep) *
txsPerStep;
}
- uint32_t stepsPerSecond = 1000 / ApplyLoad::MAX_SAC_TPS_TIME_STEP_MS;
- // Round min and max rate of txs per step of MAX_SAC_TPS_TIME_STEP_MS
+ uint32_t stepsPerSecond = 1000 / ApplyLoad::TARGET_CLOSE_TIME_STEP_MS;
+ // Round min and max rate of txs per step of TARGET_CLOSE_TIME_STEP_MS
// duration to be multiple of txsPerStep.
uint32_t minTxRateSteps =
std::max(1u, mApp.getConfig().APPLY_LOAD_MAX_SAC_TPS_MIN_TPS /
@@ -1029,11 +1432,10 @@ ApplyLoad::findMaxSacTps()
stepsPerSecond / txsPerStep);
uint32_t bestTps = 0;
- double targetCloseTimeMs =
- mApp.getConfig().APPLY_LOAD_MAX_SAC_TPS_TARGET_CLOSE_TIME_MS;
+ double targetCloseTimeMs = mApp.getConfig().APPLY_LOAD_TARGET_CLOSE_TIME_MS;
uint32_t targetCloseTimeSteps =
- mApp.getConfig().APPLY_LOAD_MAX_SAC_TPS_TARGET_CLOSE_TIME_MS /
- ApplyLoad::MAX_SAC_TPS_TIME_STEP_MS;
+ mApp.getConfig().APPLY_LOAD_TARGET_CLOSE_TIME_MS /
+ ApplyLoad::TARGET_CLOSE_TIME_STEP_MS;
auto txsPerLedgerToTPS =
[targetCloseTimeMs](uint32_t txsPerLedger) -> uint32_t {
diff --git a/src/simulation/ApplyLoad.h b/src/simulation/ApplyLoad.h
index 06cfdf3998..fa460e9590 100644
--- a/src/simulation/ApplyLoad.h
+++ b/src/simulation/ApplyLoad.h
@@ -17,6 +17,8 @@ enum class ApplyLoadMode
{
// Generate load within the configured ledger limits.
LIMIT_BASED,
+ // Generate load that finds max ledger limits for the 'model' transaction.
+ FIND_LIMITS_FOR_MODEL_TX,
// Generate load that only finds max TPS for the cheap operations (SAC
// transfers), ignoring ledger limits.
MAX_SAC_TPS
@@ -25,33 +27,23 @@ enum class ApplyLoadMode
class ApplyLoad
{
public:
- ApplyLoad(Application& app,
- ApplyLoadMode mode = ApplyLoadMode::LIMIT_BASED);
+ ApplyLoad(Application& app, ApplyLoadMode mode);
- void closeLedger(std::vector const& txs,
- xdr::xvector const& upgrades = {},
- bool recordSorobanUtilization = false);
-
- // Fills up a list of transactions with
- // SOROBAN_TRANSACTION_QUEUE_SIZE_MULTIPLIER * the max ledger resources
- // specified in the ApplyLoad constructor, create a TransactionSet out of
- // those transactions, and then close a ledger with that TransactionSet. The
- // generated transactions are generated using the LOADGEN_* config
- // parameters.
- void benchmark();
-
- // Generates SAC transactions and times just the application phase (fee and
- // sequence number processing, tx execution, and post process, but no disk
- // writes). This will do a binary search from APPLY_LOAD_MAX_SAC_TPS_MIN_TPS
- // to APPLY_LOAD_MAX_SAC_TPS_MAX_TPS, attempting to find the largest
- // transaction set we can execute in under
- // APPLY_LOAD_MAX_SAC_TPS_TARGET_CLOSE_TIME_MS.
- void findMaxSacTps();
+ // Execute the benchmark according to the mode specified in the constructor.
+ void execute();
// Returns the % of transactions that succeeded during apply time. The range
// of values is [0,1.0].
double successRate();
+ // Closes a ledger with the given transactions and optional upgrades.
+ // `recordSorobanUtilization` indicates whether to record utilization of
+ // Soroban resources in transaction set, this should only be necessary for
+ // the benchmark runs.
+ void closeLedger(std::vector const& txs,
+ xdr::xvector const& upgrades = {},
+ bool recordSorobanUtilization = false);
+
// These metrics track what percentage of available resources were used when
// creating the list of transactions in benchmark().
// Histogram uses integers, so the values are scaled up by 100,000
@@ -66,11 +58,12 @@ class ApplyLoad
// Returns LedgerKey for pre-populated archived state at the given index.
static LedgerKey getKeyForArchivedEntry(uint64_t index);
- static uint32_t calculateRequiredHotArchiveEntries(Config const& cfg);
+ static uint32_t calculateRequiredHotArchiveEntries(ApplyLoadMode mode,
+ Config const& cfg);
// The target time to close a ledger when running in MAX_SAC_TPS mode must
- // be a multiple of MAX_SAC_TPS_TIME_STEP_MS.
- static uint32_t const MAX_SAC_TPS_TIME_STEP_MS = 50;
+ // be a multiple of TARGET_CLOSE_TIME_STEP_MS.
+ static uint32_t const TARGET_CLOSE_TIME_STEP_MS = 50;
private:
void setup();
@@ -82,10 +75,45 @@ class ApplyLoad
void setupBatchTransferContracts();
void setupBucketList();
+ // Runs for `execute() in `ApplyLoadMode::LIMIT_BASED` mode.
+ // Runs APPLY_LOAD_NUM_LEDGERS iterations of `benchmarkLimitsIteration` and
+ // outputs the measured ledger close time metrics, as well as some other
+ // support metrics.
+ void benchmarkLimits();
+
+ // Runs for `execute() in `ApplyLoadMode::FIND_LIMITS_FOR_MODEL_TX` mode.
+ // Generates transactions according to the 'model' transaction parameters
+ // (specified via the transaction generation config), and does a binary
+ // search for the maximum number of such transactions that can fit into
+ // ledger while not exceeding APPLY_LOAD_TARGET_CLOSE_TIME_MS ledger close
+ // time.
+ // After finding the maximum number of model transactions, outputs the
+ // respective ledger limits.
+ // This also performs some rounding on the ledger limits to make the binary
+ // search faster, and also to produce more readable limits.
+ void findMaxLimitsForModelTransaction();
+
+ // Runs for `execute() in `ApplyLoadMode::MAX_SAC_TPS` mode.
+ // Generates SAC transactions and times just the application phase (fee and
+ // sequence number processing, tx execution, and post process, but no disk
+ // writes). This will do a binary search from APPLY_LOAD_MAX_SAC_TPS_MIN_TPS
+ // to APPLY_LOAD_MAX_SAC_TPS_MAX_TPS, attempting to find the largest
+ // transaction set we can execute in under
+ // APPLY_LOAD_TARGET_CLOSE_TIME_MS.
+ void findMaxSacTps();
+
// Run iterations at the given TPS. Reports average time over all runs, in
// milliseconds.
double benchmarkSacTps(uint32_t targetTps);
+ // Fills up a list of transactions with
+ // SOROBAN_TRANSACTION_QUEUE_SIZE_MULTIPLIER * the max ledger resources
+ // specified in the ApplyLoad constructor, create a TransactionSet out of
+ // those transactions, and then close a ledger with that TransactionSet. The
+ // generated transactions are generated using the LOADGEN_* config
+ // parameters.
+ void benchmarkLimitsIteration();
+
// Generates the given number of native asset SAC payment TXs with no
// conflicts.
void generateSacPayments(std::vector& txs,
@@ -108,20 +136,17 @@ class ApplyLoad
// Helper method to apply a config upgrade
void applyConfigUpgrade(SorobanUpgradeConfig const& upgradeConfig);
- LedgerKey mUpgradeCodeKey;
- LedgerKey mUpgradeInstanceKey;
-
- LedgerKey mLoadCodeKey;
- // Used to generate soroban load transactions
- TxGenerator::ContractInstance mLoadInstance;
- // Used to generate XLM payments
- TxGenerator::ContractInstance mSACInstanceXLM;
- // Used for batch transfers, one instance for each cluster
- std::vector mBatchTransferInstances;
- size_t mDataEntryCount = 0;
- size_t mDataEntrySize = 0;
+ // Updates the configuration settings such a way to accommodate around
+ // `txsPerLedger` 'model' transactions per ledger for the
+ // `FIND_LIMITS_FOR_MODEL_TX` mode.
+ // Returns the network configuration to use for upgrade and the actual
+ // number of transactions that can fit withing the limits (it may be
+ // slightly lower than `txsPerLedger` due to rounding).
+ std::pair
+ updateSettingsForTxCount(uint64_t txsPerLedger);
Application& mApp;
+ ApplyLoadMode mMode;
TxGenerator::TestAccountPtr mRoot;
uint32_t mNumAccounts;
@@ -135,9 +160,21 @@ class ApplyLoad
medida::Histogram& mDiskReadEntryUtilization;
medida::Histogram& mWriteEntryUtilization;
- ApplyLoadMode mMode;
TxGenerator mTxGenerator;
+ LedgerKey mUpgradeCodeKey;
+ LedgerKey mUpgradeInstanceKey;
+
+ LedgerKey mLoadCodeKey;
+ // Used to generate soroban load transactions
+ TxGenerator::ContractInstance mLoadInstance;
+ // Used to generate XLM payments
+ TxGenerator::ContractInstance mSACInstanceXLM;
+ // Used for batch transfers, one instance for each cluster
+ std::vector mBatchTransferInstances;
+ size_t mDataEntryCount = 0;
+ size_t mDataEntrySize = 0;
+
// Counter for generating unique destination addresses for SAC payments
uint32_t mDestCounter = 0;
};
diff --git a/src/simulation/TxGenerator.cpp b/src/simulation/TxGenerator.cpp
index 496051e22c..c909218edf 100644
--- a/src/simulation/TxGenerator.cpp
+++ b/src/simulation/TxGenerator.cpp
@@ -559,7 +559,7 @@ TxGenerator::invokeSorobanLoadTransactionV2(
// functions (maybe with a small constant factor as well).
uint32_t const baseInstructionCount = 737'119;
uint32_t const baselineTxSizeBytes = 256;
- uint32_t const eventSize = 80;
+ uint32_t const eventSize = TxGenerator::SOROBAN_LOAD_V2_EVENT_SIZE_BYTES;
uint32_t const instructionsPerGuestCycle = 40;
uint32_t const instructionsPerHostCycle = 4'875;
uint32_t const instructionsPerAuthByte = 35;
@@ -656,6 +656,9 @@ TxGenerator::invokeSorobanLoadTransactionV2(
uint32_t targetInstructions =
sampleDiscrete(appCfg.APPLY_LOAD_INSTRUCTIONS,
appCfg.APPLY_LOAD_INSTRUCTIONS_DISTRIBUTION, 0u);
+ resources.instructions = targetInstructions;
+ resources.writeBytes = entriesWriteSize;
+ resources.diskReadBytes = dataEntrySize * archiveEntriesToRestore;
auto numEntries =
(rwEntries + archiveEntriesToRestore + instance.readOnlyKeys.size());
@@ -707,15 +710,8 @@ TxGenerator::invokeSorobanLoadTransactionV2(
ihf.invokeContract().args = {makeU32(guestCycles), makeU32(hostCycles),
makeU32(eventCount)};
- resources.writeBytes = entriesWriteSize;
- resources.diskReadBytes = dataEntrySize * archiveEntriesToRestore;
-
increaseOpSize(op, paddingBytes);
- resources.instructions = instructionsWithoutCpuLoad +
- hostCycles * instructionsPerHostCycle +
- guestCycles * instructionsPerGuestCycle;
-
auto resourceFee =
sorobanResourceFee(mApp, resources, txOverheadBytes + paddingBytes,
eventSize * eventCount);
diff --git a/src/simulation/TxGenerator.h b/src/simulation/TxGenerator.h
index b2261177fa..9de4887a4e 100644
--- a/src/simulation/TxGenerator.h
+++ b/src/simulation/TxGenerator.h
@@ -95,6 +95,7 @@ class TxGenerator
// Instructions per SAC transaction
static constexpr uint64_t SAC_TX_INSTRUCTIONS = 250'000;
static constexpr uint64_t BATCH_TRANSFER_TX_INSTRUCTIONS = 500'000;
+ static constexpr uint32_t SOROBAN_LOAD_V2_EVENT_SIZE_BYTES = 80;
// Special account ID to represent the root account
static uint64_t const ROOT_ACCOUNT_ID;
diff --git a/src/simulation/test/LoadGeneratorTests.cpp b/src/simulation/test/LoadGeneratorTests.cpp
index a3ca6eacf3..849f44c265 100644
--- a/src/simulation/test/LoadGeneratorTests.cpp
+++ b/src/simulation/test/LoadGeneratorTests.cpp
@@ -884,6 +884,7 @@ TEST_CASE("apply load", "[loadgen][applyload][acceptance]")
cfg.USE_CONFIG_FOR_GENESIS = true;
cfg.LEDGER_PROTOCOL_VERSION = Config::CURRENT_LEDGER_PROTOCOL_VERSION;
cfg.MANUAL_CLOSE = true;
+ cfg.ENABLE_SOROBAN_DIAGNOSTIC_EVENTS = false;
cfg.APPLY_LOAD_CLASSIC_TXS_PER_LEDGER = 100;
@@ -936,18 +937,19 @@ TEST_CASE("apply load", "[loadgen][applyload][acceptance]")
cfg.APPLY_LOAD_MAX_CONTRACT_EVENT_SIZE_BYTES = 8198;
cfg.APPLY_LOAD_MAX_SOROBAN_TX_COUNT = 50;
- cfg.APPLY_LOAD_NUM_LEDGERS = 100;
+ cfg.APPLY_LOAD_NUM_LEDGERS = 10;
cfg.ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING = true;
VirtualClock clock(VirtualClock::REAL_TIME);
auto app = createTestApplication(clock, cfg);
- ApplyLoad al(*app);
+ ApplyLoad al(*app, ApplyLoadMode::LIMIT_BASED);
// Sample a few indices to verify hot archive is properly initialized
uint32_t expectedArchivedEntries =
- ApplyLoad::calculateRequiredHotArchiveEntries(cfg);
+ ApplyLoad::calculateRequiredHotArchiveEntries(
+ ApplyLoadMode::LIMIT_BASED, cfg);
std::vector sampleIndices = {0, expectedArchivedEntries / 2,
expectedArchivedEntries - 1};
std::set sampleKeys;
@@ -964,68 +966,71 @@ TEST_CASE("apply load", "[loadgen][applyload][acceptance]")
auto sampleEntries = hotArchive->loadKeys(sampleKeys);
REQUIRE(sampleEntries.size() == sampleKeys.size());
- auto& ledgerClose =
- app->getMetrics().NewTimer({"ledger", "ledger", "close"});
- ledgerClose.Clear();
+ al.execute();
- auto& cpuInsRatio = app->getMetrics().NewHistogram(
- {"soroban", "host-fn-op", "invoke-time-fsecs-cpu-insn-ratio"});
- cpuInsRatio.Clear();
+ REQUIRE(1.0 - al.successRate() < std::numeric_limits::epsilon());
+}
- auto& cpuInsRatioExclVm = app->getMetrics().NewHistogram(
- {"soroban", "host-fn-op", "invoke-time-fsecs-cpu-insn-ratio-excl-vm"});
- cpuInsRatioExclVm.Clear();
+TEST_CASE("apply load find max limits for model tx",
+ "[loadgen][applyload][acceptance]")
+{
+ auto cfg = getTestConfig();
+ cfg.TESTING_UPGRADE_MAX_TX_SET_SIZE = 1000;
+ cfg.USE_CONFIG_FOR_GENESIS = true;
+ cfg.LEDGER_PROTOCOL_VERSION = Config::CURRENT_LEDGER_PROTOCOL_VERSION;
+ cfg.MANUAL_CLOSE = true;
+ cfg.ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING = true;
- auto& declaredInsnsUsageRatio = app->getMetrics().NewHistogram(
- {"soroban", "host-fn-op", "declared-cpu-insns-usage-ratio"});
- declaredInsnsUsageRatio.Clear();
+ // Also generate that many classic simple payments.
+ cfg.APPLY_LOAD_CLASSIC_TXS_PER_LEDGER = 100;
- for (size_t i = 0; i < cfg.APPLY_LOAD_NUM_LEDGERS; ++i)
- {
- app->getBucketManager().getLiveBucketList().resolveAllFutures();
- releaseAssert(
- app->getBucketManager().getLiveBucketList().futuresAllResolved());
+ // Close 3 ledgers per iteration.
+ cfg.APPLY_LOAD_NUM_LEDGERS = 3;
+ // The target close time is 500ms.
+ cfg.APPLY_LOAD_TARGET_CLOSE_TIME_MS = 500;
+
+ // Size of each data entry to be used in the test.
+ cfg.APPLY_LOAD_DATA_ENTRY_SIZE = 100;
+
+ // BL generation parameters
+ cfg.APPLY_LOAD_BL_SIMULATED_LEDGERS = 1000;
+ cfg.APPLY_LOAD_BL_WRITE_FREQUENCY = 1000;
+ cfg.APPLY_LOAD_BL_BATCH_SIZE = 1000;
+ cfg.APPLY_LOAD_BL_LAST_BATCH_LEDGERS = 300;
+ cfg.APPLY_LOAD_BL_LAST_BATCH_SIZE = 100;
+
+ // Load generation parameters
+ cfg.APPLY_LOAD_NUM_DISK_READ_ENTRIES = {1};
+ cfg.APPLY_LOAD_NUM_DISK_READ_ENTRIES_DISTRIBUTION = {1};
+
+ cfg.APPLY_LOAD_NUM_RW_ENTRIES = {4};
+ cfg.APPLY_LOAD_NUM_RW_ENTRIES_DISTRIBUTION = {1};
+
+ cfg.APPLY_LOAD_EVENT_COUNT = {2};
+ cfg.APPLY_LOAD_EVENT_COUNT_DISTRIBUTION = {1};
+
+ cfg.APPLY_LOAD_TX_SIZE_BYTES = {1000};
+ cfg.APPLY_LOAD_TX_SIZE_BYTES_DISTRIBUTION = {1};
+
+ cfg.APPLY_LOAD_INSTRUCTIONS = {2'000'000};
+ cfg.APPLY_LOAD_INSTRUCTIONS_DISTRIBUTION = {1};
+
+ // Only a few ledger limits need to be specified, the rest will be found by
+ // the benchmark itself.
+ // Number of soroban txs per ledger is the upper bound of the binary
+ // search for the number of the model txs to include in each ledger.
+ cfg.APPLY_LOAD_MAX_SOROBAN_TX_COUNT = 1000;
+ // Use 2 clusters/threads.
+ cfg.APPLY_LOAD_LEDGER_MAX_DEPENDENT_TX_CLUSTERS = 2;
+
+ VirtualClock clock(VirtualClock::REAL_TIME);
+ auto app = createTestApplication(clock, cfg);
+
+ ApplyLoad al(*app, ApplyLoadMode::FIND_LIMITS_FOR_MODEL_TX);
+
+ al.execute();
- al.benchmark();
- }
REQUIRE(1.0 - al.successRate() < std::numeric_limits::epsilon());
- CLOG_INFO(Perf, "Max ledger close: {} milliseconds", ledgerClose.max());
- CLOG_INFO(Perf, "Min ledger close: {} milliseconds", ledgerClose.min());
- CLOG_INFO(Perf, "Mean ledger close: {} milliseconds", ledgerClose.mean());
- CLOG_INFO(Perf, "stddev ledger close: {} milliseconds",
- ledgerClose.std_dev());
-
- CLOG_INFO(Perf, "Max CPU ins ratio: {}", cpuInsRatio.max() / 1000000);
- CLOG_INFO(Perf, "Mean CPU ins ratio: {}", cpuInsRatio.mean() / 1000000);
-
- CLOG_INFO(Perf, "Max CPU ins ratio excl VM: {}",
- cpuInsRatioExclVm.max() / 1000000);
- CLOG_INFO(Perf, "Mean CPU ins ratio excl VM: {}",
- cpuInsRatioExclVm.mean() / 1000000);
- CLOG_INFO(Perf, "stddev CPU ins ratio excl VM: {}",
- cpuInsRatioExclVm.std_dev() / 1000000);
-
- CLOG_INFO(Perf, "Min CPU declared insns ratio: {}",
- declaredInsnsUsageRatio.min() / 1000000.0);
- CLOG_INFO(Perf, "Mean CPU declared insns ratio: {}",
- declaredInsnsUsageRatio.mean() / 1000000.0);
- CLOG_INFO(Perf, "stddev CPU declared insns ratio: {}",
- declaredInsnsUsageRatio.std_dev() / 1000000.0);
-
- CLOG_INFO(Perf, "Tx count utilization {}%",
- al.getTxCountUtilization().mean() / 1000.0);
- CLOG_INFO(Perf, "Instruction utilization {}%",
- al.getInstructionUtilization().mean() / 1000.0);
- CLOG_INFO(Perf, "Tx size utilization {}%",
- al.getTxSizeUtilization().mean() / 1000.0);
- CLOG_INFO(Perf, "Read bytes utilization {}%",
- al.getDiskReadByteUtilization().mean() / 1000.0);
- CLOG_INFO(Perf, "Write bytes utilization {}%",
- al.getDiskWriteByteUtilization().mean() / 1000.0);
- CLOG_INFO(Perf, "Read entry utilization {}%",
- al.getDiskReadEntryUtilization().mean() / 1000.0);
- CLOG_INFO(Perf, "Write entry utilization {}%",
- al.getWriteEntryUtilization().mean() / 1000.0);
}
TEST_CASE("basic MAX_SAC_TPS functionality",
@@ -1039,7 +1044,7 @@ TEST_CASE("basic MAX_SAC_TPS functionality",
cfg.IGNORE_MESSAGE_LIMITS_FOR_TESTING = true;
// Configure test parameters for MAX_SAC_TPS mode
- cfg.APPLY_LOAD_MAX_SAC_TPS_TARGET_CLOSE_TIME_MS = 1500;
+ cfg.APPLY_LOAD_TARGET_CLOSE_TIME_MS = 1500;
cfg.APPLY_LOAD_LEDGER_MAX_DEPENDENT_TX_CLUSTERS = 2;
cfg.APPLY_LOAD_MAX_SAC_TPS_MIN_TPS = 1;
cfg.APPLY_LOAD_MAX_SAC_TPS_MAX_TPS = 1000;
@@ -1052,7 +1057,7 @@ TEST_CASE("basic MAX_SAC_TPS functionality",
ApplyLoad al(*app, ApplyLoadMode::MAX_SAC_TPS);
// Run the MAX_SAC_TPS test
- al.findMaxSacTps();
+ al.execute();
// Verify that we actually applied something in parallel
auto& maxClustersMetric = app->getMetrics().NewCounter(