Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions .github/workflows/build-release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -68,14 +68,14 @@ jobs:
${{ steps.changelog.outputs.changelog }}

### Release Artifacts
Please read through the [wiki](https://github.com/noku-team/assertoor/wiki) for setup & configuration instructions.
Please read through the [wiki](https://github.com/erigontech/assertoor/wiki) for setup & configuration instructions.
| Release File | Description |
| ------------- | ------------- |
| [assertoor_${{ inputs.version }}_windows_amd64.zip](https://github.com/noku-team/assertoor/releases/download/v${{ inputs.version }}/assertoor_${{ inputs.version }}_windows_amd64.zip) | assertoor executables for windows/amd64 |
| [assertoor_${{ inputs.version }}_linux_amd64.tar.gz](https://github.com/noku-team/assertoor/releases/download/v${{ inputs.version }}/assertoor_${{ inputs.version }}_linux_amd64.tar.gz) | assertoor executables for linux/amd64 |
| [assertoor_${{ inputs.version }}_linux_arm64.tar.gz](https://github.com/noku-team/assertoor/releases/download/v${{ inputs.version }}/assertoor_${{ inputs.version }}_linux_arm64.tar.gz) | assertoor executables for linux/arm64 |
| [assertoor_${{ inputs.version }}_darwin_amd64.tar.gz](https://github.com/noku-team/assertoor/releases/download/v${{ inputs.version }}/assertoor_${{ inputs.version }}_darwin_amd64.tar.gz) | assertoor executable for macos/amd64 |
| [assertoor_${{ inputs.version }}_darwin_arm64.tar.gz](https://github.com/noku-team/assertoor/releases/download/v${{ inputs.version }}/assertoor_${{ inputs.version }}_darwin_arm64.tar.gz) | assertoor executable for macos/arm64 |
| [assertoor_${{ inputs.version }}_windows_amd64.zip](https://github.com/erigontech/assertoor/releases/download/v${{ inputs.version }}/assertoor_${{ inputs.version }}_windows_amd64.zip) | assertoor executables for windows/amd64 |
| [assertoor_${{ inputs.version }}_linux_amd64.tar.gz](https://github.com/erigontech/assertoor/releases/download/v${{ inputs.version }}/assertoor_${{ inputs.version }}_linux_amd64.tar.gz) | assertoor executables for linux/amd64 |
| [assertoor_${{ inputs.version }}_linux_arm64.tar.gz](https://github.com/erigontech/assertoor/releases/download/v${{ inputs.version }}/assertoor_${{ inputs.version }}_linux_arm64.tar.gz) | assertoor executables for linux/arm64 |
| [assertoor_${{ inputs.version }}_darwin_amd64.tar.gz](https://github.com/erigontech/assertoor/releases/download/v${{ inputs.version }}/assertoor_${{ inputs.version }}_darwin_amd64.tar.gz) | assertoor executable for macos/amd64 |
| [assertoor_${{ inputs.version }}_darwin_arm64.tar.gz](https://github.com/erigontech/assertoor/releases/download/v${{ inputs.version }}/assertoor_${{ inputs.version }}_darwin_arm64.tar.gz) | assertoor executable for macos/arm64 |
env:
GITHUB_TOKEN: ${{ github.token }}

Expand Down
8 changes: 6 additions & 2 deletions pkg/coordinator/tasks/tx_pool_throughput_analysis/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,19 @@ package txpoolthroughputanalysis
type Config struct {
PrivateKey string `yaml:"privateKey" json:"privateKey"`

TPS int `yaml:"tps" json:"tps"`
StartingTPS int `yaml:"tps" json:"tps"`
EndingTPS int `yaml:"endingTps" json:"endingTps"`
IncrementTPS int `yaml:"incrementTps" json:"incrementTps"`
DurationS int `yaml:"durationS" json:"durationS"`
LogInterval int `yaml:"logInterval" json:"logInterval"`
SecondsBeforeRunning int `yaml:"secondsBeforeRunning" json:"secondsBeforeRunning"`
}

func DefaultConfig() Config {
return Config{
TPS: 100,
StartingTPS: 100,
EndingTPS: 1000,
IncrementTPS: 100,
DurationS: 60,
LogInterval: 100,
SecondsBeforeRunning: 0,
Expand Down
110 changes: 78 additions & 32 deletions pkg/coordinator/tasks/tx_pool_throughput_analysis/task.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"context"
"crypto/rand"
"encoding/json"
"errors"
"fmt"
"math/big"
"time"
Expand All @@ -25,6 +26,11 @@ var (
}
)

type ThroughoutMeasure struct {
LoadTPS int `json:"load_tps"`
ProcessedTPS int `json:"processed_tps"`
}

type Task struct {
ctx *types.TaskContext
options *types.TaskOptions
Expand Down Expand Up @@ -102,8 +108,8 @@ func (t *Task) Execute(ctx context.Context) error {
client := executionClients[n.Int64()]

t.logger.Infof("Measuring TxPool transaction propagation *throughput*")
t.logger.Infof("Targeting client: %s, TPS: %d, Duration: %d seconds",
client.GetName(), t.config.TPS, t.config.DurationS)
t.logger.Infof("Targeting client: %s, Starting TPS: %d, Ending TPS: %d, Increment TPS: %d, Duration: %d seconds",
client.GetName(), t.config.StartingTPS, t.config.EndingTPS, t.config.IncrementTPS, t.config.DurationS)

// Wait for the specified seconds before starting the task
if t.config.SecondsBeforeRunning > 0 {
Expand All @@ -117,19 +123,69 @@ func (t *Task) Execute(ctx context.Context) error {
}
}

// Prepare to collect transaction latencies
testDeadline := time.Now().Add(time.Duration(t.config.DurationS+60*30) * time.Second)

// Create a new load target for the transaction propagation measurement
loadTarget := txloadtool.NewLoadTarget(ctx, t.ctx, t.logger, t.wallet, client)
load := txloadtool.NewLoad(loadTarget, t.config.TPS, t.config.DurationS, testDeadline, t.config.LogInterval)

percentile := 0.99 // 0.95 should be enough, change in the future if needed
singleMeasureDeadline := time.Now().Add(time.Duration(t.config.DurationS+60*30) * time.Second)

// slice of pairs: sending tps, processed TPS values
var throughoutMeasures []ThroughoutMeasure

// Iterate over the TPS range and crate a plot processedTps vs sendingTps
t.logger.Infof("Iterating over the TPS range, starting TPS: %d, ending TPS: %d, increment TPS: %d",
t.config.StartingTPS, t.config.EndingTPS, t.config.IncrementTPS)

for sendingTps := t.config.StartingTPS; sendingTps <= t.config.EndingTPS; sendingTps += t.config.IncrementTPS {
// measure the throughput with the current sendingTps
processedTps, err := t.measureTpsWithLoad(loadTarget, sendingTps, t.config.DurationS, singleMeasureDeadline, percentile)
if err != nil {
t.logger.Errorf("Error during throughput measurement with sendingTps=%d, duration=%d: %v", sendingTps, t.config.DurationS, err)
t.ctx.SetResult(types.TaskResultFailure)

return err
}

// add to throughoutMeasures
throughoutMeasures = append(throughoutMeasures, ThroughoutMeasure{
LoadTPS: sendingTps,
ProcessedTPS: processedTps,
})
}

t.logger.Infof("Finished measuring throughput, collected %d measures", len(throughoutMeasures))

// Set the throughput measures in the task context outputs
// from this plot we can compute the Maximum Sustainable Throughput or Capacity limit
t.ctx.Outputs.SetVar("throughput_measures", throughoutMeasures) // log coordinated_omission_event_count and missed_p2p_event_count?

outputs := map[string]interface{}{
"throughput_measures": throughoutMeasures,
}

outputsJSON, _ := json.Marshal(outputs)
t.logger.Infof("outputs_json: %s", string(outputsJSON))

// Set the task result to success
t.ctx.SetResult(types.TaskResultSuccess)

return nil
}

func (t *Task) measureTpsWithLoad(loadTarget *txloadtool.LoadTarget, sendingTps, durationS int,
testDeadline time.Time, percentile float64) (int, error) {
t.logger.Infof("Single measure of throughput, sending TPS: %d, duration: %d secs", sendingTps, durationS)

// Prepare to collect transaction latencies
load := txloadtool.NewLoad(loadTarget, sendingTps, durationS, testDeadline, t.config.LogInterval)

// Generate and sending transactions, waiting for their propagation
execErr := load.Execute()
if execErr != nil {
t.logger.Errorf("Error during transaction load execution: %v", execErr)
t.ctx.SetResult(types.TaskResultFailure)

return execErr
return 0, execErr
}

// Collect the transactions and their latencies
Expand All @@ -138,12 +194,12 @@ func (t *Task) Execute(ctx context.Context) error {
t.logger.Errorf("Error measuring transaction propagation latencies: %v", measureErr)
t.ctx.SetResult(types.TaskResultFailure)

return measureErr
return 0, measureErr
}

// Check if the context was cancelled or other errors occurred
if result.Failed {
return fmt.Errorf("error measuring transaction propagation latencies: load failed")
return 0, fmt.Errorf("error measuring transaction propagation latencies: load failed")
}

// Send txes to other clients, for speeding up tx mining
Expand All @@ -166,33 +222,23 @@ func (t *Task) Execute(ctx context.Context) error {

t.logger.Infof("Total transactions sent: %d", result.TotalTxs)

// Calculate statistics
if percentile != 0.99 {
// Calculate the percentile of latencies using result.LatenciesMus
// Not implemented yet
notImpl := errors.New("percentile selection not implemented, use 0.99")
return 0, notImpl
}

t.logger.Infof("Using 0.99 percentile for latency calculation")

t.logger.Infof("Last measure delay since start time: %s", result.LastMeasureDelay)

processedTxPerSecond := float64(result.TotalTxs) / result.LastMeasureDelay.Seconds()
processedTpsF := float64(result.TotalTxs) / result.LastMeasureDelay.Seconds()
processedTps := int(processedTpsF) // round

t.logger.Infof("Processed %d transactions in %.2fs, mean throughput: %.2f tx/s",
result.TotalTxs, result.LastMeasureDelay.Seconds(), processedTxPerSecond)
result.TotalTxs, result.LastMeasureDelay.Seconds(), processedTpsF)
t.logger.Infof("Sent %d transactions in %.2fs", result.TotalTxs, result.LastMeasureDelay.Seconds())

t.ctx.Outputs.SetVar("mean_tps_throughput", processedTxPerSecond)
t.ctx.Outputs.SetVar("tx_count", result.TotalTxs)
t.ctx.Outputs.SetVar("duplicated_p2p_event_count", result.DuplicatedP2PEventCount)
t.ctx.Outputs.SetVar("missed_p2p_event_count", result.NotReceivedP2PEventCount)
t.ctx.Outputs.SetVar("coordinated_omission_event_count", result.CoordinatedOmissionEventCount)

t.ctx.SetResult(types.TaskResultSuccess)

outputs := map[string]interface{}{
"tx_count": result.TotalTxs,
"mean_tps_throughput": processedTxPerSecond,
"duplicated_p2p_event_count": result.DuplicatedP2PEventCount,
"coordinated_omission_events_count": result.CoordinatedOmissionEventCount,
"missed_p2p_event_count": result.NotReceivedP2PEventCount,
}

outputsJSON, _ := json.Marshal(outputs)
t.logger.Infof("outputs_json: %s", string(outputsJSON))

return nil
return processedTps, nil
}
8 changes: 5 additions & 3 deletions playbooks/dev/tx-pool-check-short.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,13 @@ tasks:
config:
waitTime: 5
- name: tx_pool_throughput_analysis
title: "Check transaction pool throughput with 10.000 transactions"
title: "Check transaction pool throughput from 500 to 2000 TPS with 250 TPS increment, duration 2s per test"
timeout: 30m
config:
tps: 2000
durationS: 5
startingTps: 500
endingTps: 2000
incrementTps: 250
durationS: 2
logInterval: 1000
configVars:
privateKey: "walletPrivkey"
20 changes: 12 additions & 8 deletions playbooks/dev/tx-pool-check.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,16 +21,18 @@ tasks:
waitTime: 5
- name: tx_pool_throughput_analysis
timeout: 5m
title: "Check transaction pool throughput with 1.000 transactions in one second, duration 10s"
title: "Check transaction pool throughput from 100 to 1000 TPS with 100 TPS increment, duration 2s per test"
config:
tps: 1000
durationS: 10
startingTps: 100
endingTps: 1000
incrementTps: 100
durationS: 2
logInterval: 1000
configVars:
privateKey: "walletPrivkey"
- name: tx_pool_clean
title: "Clean transaction pool"
timeout: 5m
timeout: 15m
config:
waitTime: 5
- name: tx_pool_latency_analysis
Expand All @@ -48,11 +50,13 @@ tasks:
config:
waitTime: 5
- name: tx_pool_throughput_analysis
timeout: 5m
title: "Check transaction pool throughput with 5.000 transactions in one second, duration 5s"
timeout: 15m
title: "Check transaction pool throughput from 1000 to 5000 TPS with 500 TPS increment, duration 2s per test"
config:
tps: 5000
durationS: 5
startingTps: 1000
endingTps: 5000
incrementTps: 500
durationS: 2
logInterval: 2500
configVars:
privateKey: "walletPrivkey"
Loading