From e657465aa6f705b5b0f94127698b878b352e4279 Mon Sep 17 00:00:00 2001 From: Michelangelo Riccobene Date: Fri, 27 Jun 2025 15:31:32 +0200 Subject: [PATCH 01/10] iteratively calc Maximum Sustainable Throughput --- .../tx_pool_throughput_analysis/config.go | 8 +- .../tasks/tx_pool_throughput_analysis/task.go | 91 ++++++++++++------- 2 files changed, 66 insertions(+), 33 deletions(-) diff --git a/pkg/coordinator/tasks/tx_pool_throughput_analysis/config.go b/pkg/coordinator/tasks/tx_pool_throughput_analysis/config.go index 3d9c6521..2164972e 100644 --- a/pkg/coordinator/tasks/tx_pool_throughput_analysis/config.go +++ b/pkg/coordinator/tasks/tx_pool_throughput_analysis/config.go @@ -3,7 +3,9 @@ package txpoolthroughputanalysis type Config struct { PrivateKey string `yaml:"privateKey" json:"privateKey"` - TPS int `yaml:"tps" json:"tps"` + StartingTPS int `yaml:"tps" json:"tps"` + EndingTPS int `yaml:"endingTps" json:"endingTps"` + IncrementTPS int `yaml:"incrementTps" json:"incrementTps"` DurationS int `yaml:"durationS" json:"durationS"` LogInterval int `yaml:"logInterval" json:"logInterval"` SecondsBeforeRunning int `yaml:"secondsBeforeRunning" json:"secondsBeforeRunning"` @@ -11,7 +13,9 @@ type Config struct { func DefaultConfig() Config { return Config{ - TPS: 100, + StartingTPS: 100, + EndingTPS: 1000, + IncrementTPS: 100, DurationS: 60, LogInterval: 100, SecondsBeforeRunning: 0, diff --git a/pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go b/pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go index b1d7d3e9..2b3551f4 100644 --- a/pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go +++ b/pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go @@ -25,6 +25,11 @@ var ( } ) +type ThroughoutMeasure struct { + LoadTPS int `json:"load_tps"` + ProcessedTPS int `json:"processed_tps"` +} + type Task struct { ctx *types.TaskContext options *types.TaskOptions @@ -102,8 +107,8 @@ func (t *Task) Execute(ctx context.Context) error { client := executionClients[n.Int64()] t.logger.Infof("Measuring TxPool transaction propagation *throughput*") - t.logger.Infof("Targeting client: %s, TPS: %d, Duration: %d seconds", - client.GetName(), t.config.TPS, t.config.DurationS) + t.logger.Infof("Targeting client: %s, Starting TPS: %d, Ending TPS: %d, Increment TPS: %d, Duration: %d seconds", + client.GetName(), t.config.StartingTPS, t.config.EndingTPS, t.config.IncrementTPS, t.config.DurationS) // Wait for the specified seconds before starting the task if t.config.SecondsBeforeRunning > 0 { @@ -117,11 +122,53 @@ func (t *Task) Execute(ctx context.Context) error { } } - // Prepare to collect transaction latencies - testDeadline := time.Now().Add(time.Duration(t.config.DurationS+60*30) * time.Second) - + // Create a new load target for the transaction propagation measurement loadTarget := txloadtool.NewLoadTarget(ctx, t.ctx, t.logger, t.wallet, client) - load := txloadtool.NewLoad(loadTarget, t.config.TPS, t.config.DurationS, testDeadline, t.config.LogInterval) + + percentile := 0.95 + singleMeasureDeadline := time.Now().Add(time.Duration(t.config.DurationS+60*30) * time.Second) + + // slice of pairs: sending tps, processed TPS values + var throughoutMeasures []ThroughoutMeasure + + // Iterate over the TPS range and crate a plot processedTps vs sendingTps + for sendingTps := t.config.StartingTPS; sendingTps <= t.config.EndingTPS; sendingTps += t.config.IncrementTPS { + + // measure the throughput with the current sendingTps + processedTps, err := t.measureTpsWithLoad(loadTarget, sendingTps, t.config.DurationS, percentile, singleMeasureDeadline) + if err != nil { + t.logger.Errorf("Error during throughput measurement with sendingTps=%d, duration=%d: %v", sendingTps, t.config.DurationS, err) + t.ctx.SetResult(types.TaskResultFailure) + return err + } + + // add to throughoutMeasures + throughoutMeasures = append(throughoutMeasures, ThroughoutMeasure{ + LoadTPS: sendingTps, + ProcessedTPS: processedTps, + }) + } + + t.ctx.Outputs.SetVar("throughput_measures", throughoutMeasures) + // todo: log coordinated_omission_event_count and missed_p2p_event_count? + + t.ctx.SetResult(types.TaskResultSuccess) + + outputs := map[string]interface{}{ + "throughput_measures": throughoutMeasures, + } + + outputsJSON, _ := json.Marshal(outputs) + t.logger.Infof("outputs_json: %s", string(outputsJSON)) + + return nil +} + +func (t *Task) measureTpsWithLoad(loadTarget *txloadtool.LoadTarget, sendingTps int, durationS int, percentile float64, + testDeadline time.Time) (int, error) { + + // Prepare to collect transaction latencies + load := txloadtool.NewLoad(loadTarget, sendingTps, durationS, testDeadline, t.config.LogInterval) // Generate and sending transactions, waiting for their propagation execErr := load.Execute() @@ -129,7 +176,7 @@ func (t *Task) Execute(ctx context.Context) error { t.logger.Errorf("Error during transaction load execution: %v", execErr) t.ctx.SetResult(types.TaskResultFailure) - return execErr + return 0, execErr } // Collect the transactions and their latencies @@ -138,12 +185,12 @@ func (t *Task) Execute(ctx context.Context) error { t.logger.Errorf("Error measuring transaction propagation latencies: %v", measureErr) t.ctx.SetResult(types.TaskResultFailure) - return measureErr + return 0, measureErr } // Check if the context was cancelled or other errors occurred if result.Failed { - return fmt.Errorf("error measuring transaction propagation latencies: load failed") + return 0, fmt.Errorf("error measuring transaction propagation latencies: load failed") } // Send txes to other clients, for speeding up tx mining @@ -169,30 +216,12 @@ func (t *Task) Execute(ctx context.Context) error { // Calculate statistics t.logger.Infof("Last measure delay since start time: %s", result.LastMeasureDelay) - processedTxPerSecond := float64(result.TotalTxs) / result.LastMeasureDelay.Seconds() + processedTps_f := float64(result.TotalTxs) / result.LastMeasureDelay.Seconds() + processedTps := int(processedTps_f) // round t.logger.Infof("Processed %d transactions in %.2fs, mean throughput: %.2f tx/s", - result.TotalTxs, result.LastMeasureDelay.Seconds(), processedTxPerSecond) + result.TotalTxs, result.LastMeasureDelay.Seconds(), processedTps_f) t.logger.Infof("Sent %d transactions in %.2fs", result.TotalTxs, result.LastMeasureDelay.Seconds()) - t.ctx.Outputs.SetVar("mean_tps_throughput", processedTxPerSecond) - t.ctx.Outputs.SetVar("tx_count", result.TotalTxs) - t.ctx.Outputs.SetVar("duplicated_p2p_event_count", result.DuplicatedP2PEventCount) - t.ctx.Outputs.SetVar("missed_p2p_event_count", result.NotReceivedP2PEventCount) - t.ctx.Outputs.SetVar("coordinated_omission_event_count", result.CoordinatedOmissionEventCount) - - t.ctx.SetResult(types.TaskResultSuccess) - - outputs := map[string]interface{}{ - "tx_count": result.TotalTxs, - "mean_tps_throughput": processedTxPerSecond, - "duplicated_p2p_event_count": result.DuplicatedP2PEventCount, - "coordinated_omission_events_count": result.CoordinatedOmissionEventCount, - "missed_p2p_event_count": result.NotReceivedP2PEventCount, - } - - outputsJSON, _ := json.Marshal(outputs) - t.logger.Infof("outputs_json: %s", string(outputsJSON)) - - return nil + return processedTps, nil } From fc0e7c0b82c667363ec632df58a7276d8a6c8487 Mon Sep 17 00:00:00 2001 From: Michelangelo Riccobene Date: Fri, 27 Jun 2025 15:37:26 +0200 Subject: [PATCH 02/10] add logs --- .../tasks/tx_pool_throughput_analysis/task.go | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go b/pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go index 2b3551f4..99dee3db 100644 --- a/pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go +++ b/pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go @@ -132,6 +132,9 @@ func (t *Task) Execute(ctx context.Context) error { var throughoutMeasures []ThroughoutMeasure // Iterate over the TPS range and crate a plot processedTps vs sendingTps + t.logger.Infof("Iterating over the TPS range, starting TPS: %d, ending TPS: %d, increment TPS: %d", + t.config.StartingTPS, t.config.EndingTPS, t.config.IncrementTPS) + for sendingTps := t.config.StartingTPS; sendingTps <= t.config.EndingTPS; sendingTps += t.config.IncrementTPS { // measure the throughput with the current sendingTps @@ -149,10 +152,11 @@ func (t *Task) Execute(ctx context.Context) error { }) } - t.ctx.Outputs.SetVar("throughput_measures", throughoutMeasures) - // todo: log coordinated_omission_event_count and missed_p2p_event_count? + t.logger.Infof("Finished measuring throughput, collected %d measures", len(throughoutMeasures)) - t.ctx.SetResult(types.TaskResultSuccess) + // Set the throughput measures in the task context outputs + // from this plot we can compute the Maximum Sustainable Throughput or Capacity limit + t.ctx.Outputs.SetVar("throughput_measures", throughoutMeasures) // log coordinated_omission_event_count and missed_p2p_event_count? outputs := map[string]interface{}{ "throughput_measures": throughoutMeasures, @@ -161,12 +165,17 @@ func (t *Task) Execute(ctx context.Context) error { outputsJSON, _ := json.Marshal(outputs) t.logger.Infof("outputs_json: %s", string(outputsJSON)) + // Set the task result to success + t.ctx.SetResult(types.TaskResultSuccess) + return nil } func (t *Task) measureTpsWithLoad(loadTarget *txloadtool.LoadTarget, sendingTps int, durationS int, percentile float64, testDeadline time.Time) (int, error) { + t.logger.Infof("Single measure of throughput, sending TPS: %d, duration: %d secs", sendingTps, durationS) + // Prepare to collect transaction latencies load := txloadtool.NewLoad(loadTarget, sendingTps, durationS, testDeadline, t.config.LogInterval) From dd0288838a3d005cc4057f7dca359bba9bb9e985 Mon Sep 17 00:00:00 2001 From: tosettil-polimi Date: Fri, 27 Jun 2025 16:07:31 +0200 Subject: [PATCH 03/10] refactor(tx_pool_throughput_analysis): improve variable naming for clarity in throughput calculations --- .../tasks/tx_pool_throughput_analysis/task.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go b/pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go index 99dee3db..57740f62 100644 --- a/pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go +++ b/pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go @@ -136,12 +136,12 @@ func (t *Task) Execute(ctx context.Context) error { t.config.StartingTPS, t.config.EndingTPS, t.config.IncrementTPS) for sendingTps := t.config.StartingTPS; sendingTps <= t.config.EndingTPS; sendingTps += t.config.IncrementTPS { - // measure the throughput with the current sendingTps processedTps, err := t.measureTpsWithLoad(loadTarget, sendingTps, t.config.DurationS, percentile, singleMeasureDeadline) if err != nil { t.logger.Errorf("Error during throughput measurement with sendingTps=%d, duration=%d: %v", sendingTps, t.config.DurationS, err) t.ctx.SetResult(types.TaskResultFailure) + return err } @@ -173,7 +173,6 @@ func (t *Task) Execute(ctx context.Context) error { func (t *Task) measureTpsWithLoad(loadTarget *txloadtool.LoadTarget, sendingTps int, durationS int, percentile float64, testDeadline time.Time) (int, error) { - t.logger.Infof("Single measure of throughput, sending TPS: %d, duration: %d secs", sendingTps, durationS) // Prepare to collect transaction latencies @@ -225,11 +224,11 @@ func (t *Task) measureTpsWithLoad(loadTarget *txloadtool.LoadTarget, sendingTps // Calculate statistics t.logger.Infof("Last measure delay since start time: %s", result.LastMeasureDelay) - processedTps_f := float64(result.TotalTxs) / result.LastMeasureDelay.Seconds() - processedTps := int(processedTps_f) // round + processedTpsF := float64(result.TotalTxs) / result.LastMeasureDelay.Seconds() + processedTps := int(processedTpsF) // round t.logger.Infof("Processed %d transactions in %.2fs, mean throughput: %.2f tx/s", - result.TotalTxs, result.LastMeasureDelay.Seconds(), processedTps_f) + result.TotalTxs, result.LastMeasureDelay.Seconds(), processedTpsF) t.logger.Infof("Sent %d transactions in %.2fs", result.TotalTxs, result.LastMeasureDelay.Seconds()) return processedTps, nil From c29754da05e6775aa0b779ff6b934e48c963482b Mon Sep 17 00:00:00 2001 From: tosettil-polimi Date: Fri, 27 Jun 2025 16:15:57 +0200 Subject: [PATCH 04/10] refactor(tx_pool_throughput_analysis): update throughput test parameters to range from 500 to 2000 TPS with 250 TPS increments and reduced duration --- playbooks/dev/tx-pool-check-short.yaml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/playbooks/dev/tx-pool-check-short.yaml b/playbooks/dev/tx-pool-check-short.yaml index 4dee7a50..6046dcfc 100644 --- a/playbooks/dev/tx-pool-check-short.yaml +++ b/playbooks/dev/tx-pool-check-short.yaml @@ -20,11 +20,13 @@ tasks: config: waitTime: 5 - name: tx_pool_throughput_analysis - title: "Check transaction pool throughput with 10.000 transactions" + title: "Check transaction pool throughput from 500 to 2000 TPS with 250 TPS increment, duration 2s per test" timeout: 30m config: - tps: 2000 - durationS: 5 + startingTps: 500 + endingTps: 2000 + incrementTps: 250 + durationS: 2 logInterval: 1000 configVars: privateKey: "walletPrivkey" \ No newline at end of file From ac94251f8f528127d02e9eeb772f3e62e864ce72 Mon Sep 17 00:00:00 2001 From: tosettil-polimi Date: Fri, 27 Jun 2025 16:16:01 +0200 Subject: [PATCH 05/10] refactor(tx_pool_throughput_analysis): expand throughput test parameters to cover a range from 100 to 1000 TPS and from 1000 to 5000 TPS with adjusted durations and increments --- playbooks/dev/tx-pool-check.yaml | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/playbooks/dev/tx-pool-check.yaml b/playbooks/dev/tx-pool-check.yaml index 18df1340..af35305b 100644 --- a/playbooks/dev/tx-pool-check.yaml +++ b/playbooks/dev/tx-pool-check.yaml @@ -21,16 +21,18 @@ tasks: waitTime: 5 - name: tx_pool_throughput_analysis timeout: 5m - title: "Check transaction pool throughput with 1.000 transactions in one second, duration 10s" + title: "Check transaction pool throughput from 100 to 1000 TPS with 100 TPS increment, duration 2s per test" config: - tps: 1000 - durationS: 10 + startingTps: 100 + endingTps: 1000 + incrementTps: 100 + durationS: 2 logInterval: 1000 configVars: privateKey: "walletPrivkey" - name: tx_pool_clean title: "Clean transaction pool" - timeout: 5m + timeout: 15m config: waitTime: 5 - name: tx_pool_latency_analysis @@ -49,10 +51,12 @@ tasks: waitTime: 5 - name: tx_pool_throughput_analysis timeout: 5m - title: "Check transaction pool throughput with 5.000 transactions in one second, duration 5s" + title: "Check transaction pool throughput from 1000 to 5000 TPS with 500 TPS increment, duration 2s per test" config: - tps: 5000 - durationS: 5 + startingTps: 1000 + endingTps: 5000 + incrementTps: 500 + durationS: 2 logInterval: 2500 configVars: privateKey: "walletPrivkey" From 5e220b428283d498469aa1d591a33f2d7ec619fd Mon Sep 17 00:00:00 2001 From: tosettil-polimi Date: Fri, 27 Jun 2025 16:18:26 +0200 Subject: [PATCH 06/10] refactor(tx_pool_throughput_analysis): remove unused percentile parameter from throughput measurement function --- pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go b/pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go index 57740f62..d223703f 100644 --- a/pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go +++ b/pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go @@ -125,7 +125,6 @@ func (t *Task) Execute(ctx context.Context) error { // Create a new load target for the transaction propagation measurement loadTarget := txloadtool.NewLoadTarget(ctx, t.ctx, t.logger, t.wallet, client) - percentile := 0.95 singleMeasureDeadline := time.Now().Add(time.Duration(t.config.DurationS+60*30) * time.Second) // slice of pairs: sending tps, processed TPS values @@ -137,7 +136,7 @@ func (t *Task) Execute(ctx context.Context) error { for sendingTps := t.config.StartingTPS; sendingTps <= t.config.EndingTPS; sendingTps += t.config.IncrementTPS { // measure the throughput with the current sendingTps - processedTps, err := t.measureTpsWithLoad(loadTarget, sendingTps, t.config.DurationS, percentile, singleMeasureDeadline) + processedTps, err := t.measureTpsWithLoad(loadTarget, sendingTps, t.config.DurationS, singleMeasureDeadline) if err != nil { t.logger.Errorf("Error during throughput measurement with sendingTps=%d, duration=%d: %v", sendingTps, t.config.DurationS, err) t.ctx.SetResult(types.TaskResultFailure) @@ -171,8 +170,7 @@ func (t *Task) Execute(ctx context.Context) error { return nil } -func (t *Task) measureTpsWithLoad(loadTarget *txloadtool.LoadTarget, sendingTps int, durationS int, percentile float64, - testDeadline time.Time) (int, error) { +func (t *Task) measureTpsWithLoad(loadTarget *txloadtool.LoadTarget, sendingTps, durationS int, testDeadline time.Time) (int, error) { t.logger.Infof("Single measure of throughput, sending TPS: %d, duration: %d secs", sendingTps, durationS) // Prepare to collect transaction latencies From 4633d889afad757ddc71529c483ac996588f5668 Mon Sep 17 00:00:00 2001 From: tosettil-polimi Date: Fri, 27 Jun 2025 16:29:46 +0200 Subject: [PATCH 07/10] fix(build-release.yml): update wiki links and release artifact URLs to reflect new repository ownership --- .github/workflows/build-release.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build-release.yml b/.github/workflows/build-release.yml index ee81388e..ba2bdc9e 100644 --- a/.github/workflows/build-release.yml +++ b/.github/workflows/build-release.yml @@ -68,14 +68,14 @@ jobs: ${{ steps.changelog.outputs.changelog }} ### Release Artifacts - Please read through the [wiki](https://github.com/noku-team/assertoor/wiki) for setup & configuration instructions. + Please read through the [wiki](https://github.com/erigontech/assertoor/wiki) for setup & configuration instructions. | Release File | Description | | ------------- | ------------- | - | [assertoor_${{ inputs.version }}_windows_amd64.zip](https://github.com/noku-team/assertoor/releases/download/v${{ inputs.version }}/assertoor_${{ inputs.version }}_windows_amd64.zip) | assertoor executables for windows/amd64 | - | [assertoor_${{ inputs.version }}_linux_amd64.tar.gz](https://github.com/noku-team/assertoor/releases/download/v${{ inputs.version }}/assertoor_${{ inputs.version }}_linux_amd64.tar.gz) | assertoor executables for linux/amd64 | - | [assertoor_${{ inputs.version }}_linux_arm64.tar.gz](https://github.com/noku-team/assertoor/releases/download/v${{ inputs.version }}/assertoor_${{ inputs.version }}_linux_arm64.tar.gz) | assertoor executables for linux/arm64 | - | [assertoor_${{ inputs.version }}_darwin_amd64.tar.gz](https://github.com/noku-team/assertoor/releases/download/v${{ inputs.version }}/assertoor_${{ inputs.version }}_darwin_amd64.tar.gz) | assertoor executable for macos/amd64 | - | [assertoor_${{ inputs.version }}_darwin_arm64.tar.gz](https://github.com/noku-team/assertoor/releases/download/v${{ inputs.version }}/assertoor_${{ inputs.version }}_darwin_arm64.tar.gz) | assertoor executable for macos/arm64 | + | [assertoor_${{ inputs.version }}_windows_amd64.zip](https://github.com/erigontech/assertoor/releases/download/v${{ inputs.version }}/assertoor_${{ inputs.version }}_windows_amd64.zip) | assertoor executables for windows/amd64 | + | [assertoor_${{ inputs.version }}_linux_amd64.tar.gz](https://github.com/erigontech/assertoor/releases/download/v${{ inputs.version }}/assertoor_${{ inputs.version }}_linux_amd64.tar.gz) | assertoor executables for linux/amd64 | + | [assertoor_${{ inputs.version }}_linux_arm64.tar.gz](https://github.com/erigontech/assertoor/releases/download/v${{ inputs.version }}/assertoor_${{ inputs.version }}_linux_arm64.tar.gz) | assertoor executables for linux/arm64 | + | [assertoor_${{ inputs.version }}_darwin_amd64.tar.gz](https://github.com/erigontech/assertoor/releases/download/v${{ inputs.version }}/assertoor_${{ inputs.version }}_darwin_amd64.tar.gz) | assertoor executable for macos/amd64 | + | [assertoor_${{ inputs.version }}_darwin_arm64.tar.gz](https://github.com/erigontech/assertoor/releases/download/v${{ inputs.version }}/assertoor_${{ inputs.version }}_darwin_arm64.tar.gz) | assertoor executable for macos/arm64 | env: GITHUB_TOKEN: ${{ github.token }} From 5ea24341ab97c896bb8d8ce24b95a4f487117d91 Mon Sep 17 00:00:00 2001 From: tosettil-polimi Date: Fri, 27 Jun 2025 16:56:17 +0200 Subject: [PATCH 08/10] refactor(tx_pool_throughput_analysis): increase timeout for throughput analysis from 5m to 15m --- playbooks/dev/tx-pool-check.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/dev/tx-pool-check.yaml b/playbooks/dev/tx-pool-check.yaml index af35305b..0cf02494 100644 --- a/playbooks/dev/tx-pool-check.yaml +++ b/playbooks/dev/tx-pool-check.yaml @@ -50,7 +50,7 @@ tasks: config: waitTime: 5 - name: tx_pool_throughput_analysis - timeout: 5m + timeout: 15m title: "Check transaction pool throughput from 1000 to 5000 TPS with 500 TPS increment, duration 2s per test" config: startingTps: 1000 From 70b14d0181575ab59f2c552b70ceee9d09bb9419 Mon Sep 17 00:00:00 2001 From: Michelangelo Riccobene Date: Tue, 8 Jul 2025 11:04:13 +0200 Subject: [PATCH 09/10] prepare for percentile usage --- .../tasks/tx_pool_throughput_analysis/task.go | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go b/pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go index d223703f..8732d8e4 100644 --- a/pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go +++ b/pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go @@ -4,6 +4,7 @@ import ( "context" "crypto/rand" "encoding/json" + "errors" "fmt" "math/big" "time" @@ -125,6 +126,7 @@ func (t *Task) Execute(ctx context.Context) error { // Create a new load target for the transaction propagation measurement loadTarget := txloadtool.NewLoadTarget(ctx, t.ctx, t.logger, t.wallet, client) + percentile := 0.99 // 0.95 should be enough, change in the future if needed singleMeasureDeadline := time.Now().Add(time.Duration(t.config.DurationS+60*30) * time.Second) // slice of pairs: sending tps, processed TPS values @@ -136,7 +138,7 @@ func (t *Task) Execute(ctx context.Context) error { for sendingTps := t.config.StartingTPS; sendingTps <= t.config.EndingTPS; sendingTps += t.config.IncrementTPS { // measure the throughput with the current sendingTps - processedTps, err := t.measureTpsWithLoad(loadTarget, sendingTps, t.config.DurationS, singleMeasureDeadline) + processedTps, err := t.measureTpsWithLoad(loadTarget, sendingTps, t.config.DurationS, singleMeasureDeadline, percentile) if err != nil { t.logger.Errorf("Error during throughput measurement with sendingTps=%d, duration=%d: %v", sendingTps, t.config.DurationS, err) t.ctx.SetResult(types.TaskResultFailure) @@ -170,7 +172,8 @@ func (t *Task) Execute(ctx context.Context) error { return nil } -func (t *Task) measureTpsWithLoad(loadTarget *txloadtool.LoadTarget, sendingTps, durationS int, testDeadline time.Time) (int, error) { +func (t *Task) measureTpsWithLoad(loadTarget *txloadtool.LoadTarget, sendingTps, durationS int, + testDeadline time.Time, percentile float64) (int, error) { t.logger.Infof("Single measure of throughput, sending TPS: %d, duration: %d secs", sendingTps, durationS) // Prepare to collect transaction latencies @@ -219,7 +222,15 @@ func (t *Task) measureTpsWithLoad(loadTarget *txloadtool.LoadTarget, sendingTps, t.logger.Infof("Total transactions sent: %d", result.TotalTxs) - // Calculate statistics + if percentile != 0.99 { + // Calculate the percentile of latencies using result.LatenciesMus + // Not implemented yet + notImpl := errors.New("percentile selection not implemented, use 0.99") + return 0, notImpl + } else { + t.logger.Infof("Using 0.99 percentile for latency calculation") + } + t.logger.Infof("Last measure delay since start time: %s", result.LastMeasureDelay) processedTpsF := float64(result.TotalTxs) / result.LastMeasureDelay.Seconds() From 8a349cdf74c84e28e71c67f3bf4ff0ab1d33356a Mon Sep 17 00:00:00 2001 From: tosettil-polimi Date: Tue, 8 Jul 2025 15:42:13 +0200 Subject: [PATCH 10/10] refactor(tx_pool_throughput_analysis): move logging of percentile usage to the correct position in the function --- pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go b/pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go index 8732d8e4..450a2754 100644 --- a/pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go +++ b/pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go @@ -227,10 +227,10 @@ func (t *Task) measureTpsWithLoad(loadTarget *txloadtool.LoadTarget, sendingTps, // Not implemented yet notImpl := errors.New("percentile selection not implemented, use 0.99") return 0, notImpl - } else { - t.logger.Infof("Using 0.99 percentile for latency calculation") } + t.logger.Infof("Using 0.99 percentile for latency calculation") + t.logger.Infof("Last measure delay since start time: %s", result.LastMeasureDelay) processedTpsF := float64(result.TotalTxs) / result.LastMeasureDelay.Seconds()