diff --git a/nix/workbench/analyse/chain-filters/epoch4+.json b/nix/workbench/analyse/chain-filters/epoch4+.json new file mode 100644 index 00000000000..ad91a4e69e7 --- /dev/null +++ b/nix/workbench/analyse/chain-filters/epoch4+.json @@ -0,0 +1,7 @@ +[ { "tag": "CSlot" + , "contents": + { "tag": "EpochGEq" + , "contents": 3 + } + } +] diff --git a/nix/workbench/analyse/chain-filters/epoch5+.json b/nix/workbench/analyse/chain-filters/epoch5+.json new file mode 100644 index 00000000000..81619101a33 --- /dev/null +++ b/nix/workbench/analyse/chain-filters/epoch5+.json @@ -0,0 +1,7 @@ +[ { "tag": "CSlot" + , "contents": + { "tag": "EpochGEq" + , "contents": 4 + } + } +] diff --git a/nix/workbench/backend/backend.sh b/nix/workbench/backend/backend.sh index 2e75ac1e03f..d0ef16ea64c 100644 --- a/nix/workbench/backend/backend.sh +++ b/nix/workbench/backend/backend.sh @@ -24,6 +24,8 @@ usage_backend() { wait-pools-stopped RUNDIR Wait until all pools are stopped + wait-workloads-stopped RUNDIR + Wait until all workloads are stopped stop-cluster RUNDIR cleanup-cluster RUNDIR Wipe cluster state to pristine @@ -50,8 +52,8 @@ case "${op}" in start-tracers ) backend_$WB_BACKEND "$@";; start-nodes ) backend_$WB_BACKEND "$@";; start-generator ) backend_$WB_BACKEND "$@";; + start-workloads ) backend_$WB_BACKEND "$@";; start-healthchecks ) backend_$WB_BACKEND "$@";; - start-latencies ) backend_$WB_BACKEND "$@";; # Fine grained start-node ) backend_$WB_BACKEND "$@";; stop-node ) backend_$WB_BACKEND "$@";; @@ -59,7 +61,7 @@ case "${op}" in wait-node-stopped ) backend_$WB_BACKEND "$@";; get-node-socket-path ) backend_$WB_BACKEND "$@";; wait-pools-stopped ) backend_$WB_BACKEND "$@";; - wait-latencies-stopped ) backend_$WB_BACKEND "$@";; + wait-workloads-stopped ) backend_$WB_BACKEND "$@";; # Stop functions stop-all ) backend_$WB_BACKEND "$@";; fetch-logs ) backend_$WB_BACKEND "$@";; diff --git a/nix/workbench/backend/nomad-job.nix b/nix/workbench/backend/nomad-job.nix index 57bcdaed097..a7bbe91d710 100644 --- a/nix/workbench/backend/nomad-job.nix +++ b/nix/workbench/backend/nomad-job.nix @@ -902,28 +902,27 @@ let } ]) ++ - # healthcheck - [ - ## healthcheck start.sh script. + # workloads + (builtins.map (workload: + ## workload start.sh script. { env = false; - destination = "local/${stateDir}/healthcheck/start.sh"; - data = escapeTemplate - profileData.healthcheck-service.start.value; + destination = "local/${stateDir}/workloads/${workload.name}/start.sh"; + data = escapeTemplate workload.start.value; change_mode = "noop"; error_on_missing_key = true; perms = "744"; # Only for every "start.sh" script. Default: "644" } - ] + ) profileData.workloads-service) ++ - # latency + # healthcheck [ - ## Latency start.sh script. + ## healthcheck start.sh script. { env = false; - destination = "local/${stateDir}/latency/start.sh"; + destination = "local/${stateDir}/healthcheck/start.sh"; data = escapeTemplate - profileData.latency-service.start.value; + profileData.healthcheck-service.start.value; change_mode = "noop"; error_on_missing_key = true; perms = "744"; # Only for every "start.sh" script. Default: "644" diff --git a/nix/workbench/backend/nomad.sh b/nix/workbench/backend/nomad.sh index 1d9554ee620..75e9c5d0bae 100644 --- a/nix/workbench/backend/nomad.sh +++ b/nix/workbench/backend/nomad.sh @@ -34,8 +34,8 @@ backend_nomad() { # - allocate-run-directory-tracers RUN-DIR (Nomad only) # - allocate-run-directory-nodes RUN-DIR (Nomad only) # - allocate-run-directory-generator RUN-DIR (Nomad only) + # - allocate-run-directory-workloads RUN-DIR (Nomad only) # - allocate-run-directory-healthchecks RUN-DIR (Nomad only) - # - allocate-run-directory-latencies RUN-DIR (Nomad only) # - allocate-run-nomad-job-patch-name RUN-DIR NAME (Nomad only) # - allocate-run-nomad-job-patch-namespace RUN-DIR NAME (Nomad only) # - allocate-run-nomad-job-patch-nix RUN-DIR (Nomad only) @@ -52,10 +52,10 @@ backend_nomad() { # After `allocate-run` the Nomad job is running (supervisord) waiting for # genesis to be deployed and tracer/cardano-nodes/generator to be started. # - # "generator", "tracer", "node", "healthcheck" and "latency" folder contents - # (start.sh, config files, etc) are included in the Nomad Job spec file as - # "template" stanzas and are materialized inside the container when the job - # is started. This is how it works for every environment combination + # "generator", "tracer", "node", "workloads" and "healthcheck" folder + # contents (start.sh, config files, etc) are included in the Nomad Job spec + # file as "template" stanzas and are materialized inside the container when + # the job is started. This is how it works for every environment combination # (exec-(local/cloud)). # # But "genesis" and "CARDANO_MAINNET_MIRROR" are the deployment exceptions: @@ -83,8 +83,8 @@ backend_nomad() { backend_nomad allocate-run-directory-tracers "${dir}" backend_nomad allocate-run-directory-nodes "${dir}" backend_nomad allocate-run-directory-generator "${dir}" + backend_nomad allocate-run-directory-workloads "${dir}" backend_nomad allocate-run-directory-healthchecks "${dir}" - backend_nomad allocate-run-directory-latencies "${dir}" # This one is decided at "setenv-defaults" of each sub-backend. local nomad_environment=$(envjqr 'nomad_environment') @@ -188,26 +188,32 @@ backend_nomad() { mkdir -p "${dir}"/generator ;; - allocate-run-directory-healthchecks ) + allocate-run-directory-workloads ) local usage="USAGE: wb backend $op RUN-DIR" local dir=${1:?$usage}; shift - mkdir "${dir}"/healthcheck - # For every node ... - local nodes=($(jq_tolist keys "${dir}"/node-specs.json)) - for node in ${nodes[*]} + mkdir "${dir}"/workloads + + # For every workload ... + for workload in $(jq_tolist '.workloads | map(.name)' "$dir"/profile.json) do - # File "start.sh" that usually goes in here is copied from the - # Task/container once it's started because the contents are created or - # patched using Nomad's "template" stanza in the job spec and we want to - # hold a copy of what was actually run. - mkdir "${dir}"/healthcheck/"${node}" + mkdir "${dir}"/workloads/"${workload}" + # Workload may or may not run something in all producers. + local nodes=($(jq_tolist 'map(select(.isProducer) | .name)' "$dir"/node-specs.json)) + for node in ${nodes[*]} + do + # File "start.sh" that usually goes in here is copied from the + # Task/container once it's started because the contents are created or + # patched using Nomad's "template" stanza in the job spec and we want + # to hold a copy of what was actually run. + mkdir "${dir}"/workloads/"${workload}"/"${node}" + done done ;; - allocate-run-directory-latencies ) + allocate-run-directory-healthchecks ) local usage="USAGE: wb backend $op RUN-DIR" local dir=${1:?$usage}; shift - mkdir "${dir}"/latency + mkdir "${dir}"/healthcheck # For every node ... local nodes=($(jq_tolist keys "${dir}"/node-specs.json)) for node in ${nodes[*]} @@ -216,7 +222,7 @@ backend_nomad() { # Task/container once it's started because the contents are created or # patched using Nomad's "template" stanza in the job spec and we want to # hold a copy of what was actually run. - mkdir "${dir}"/latency/"${node}" + mkdir "${dir}"/healthcheck/"${node}" done ;; @@ -391,18 +397,7 @@ backend_nomad() { # Last the Tasks' template stanzas. msg "Fetch Nomad generated files ..." local jobs_tasks=() - # The `tx-generator` config files, running in one of the Tasks were - # `cardano-node` is deployed. - backend_nomad download-config-generator "${dir}" & - jobs_tasks+=("$!") - # For every node (not including a possible tracer Task) ... - local nodes=($(jq_tolist keys "$dir"/node-specs.json)) - for node in ${nodes[*]} - do - # `cardano-node` config files. - backend_nomad download-config-node "${dir}" "${node}" & - jobs_tasks+=("$!") - done + # Tracers. if test "${one_tracer_per_node}" = "true" then local nodes=($(jq_tolist keys "$dir"/node-specs.json)) @@ -422,22 +417,35 @@ backend_nomad() { local nodes=($(jq_tolist keys "$dir"/node-specs.json)) for node in ${nodes[*]} do - # Only used for debugging! - backend_nomad download-config-healthcheck "${dir}" "${node}" & + # `cardano-node` config files. + backend_nomad download-config-node "${dir}" "${node}" & jobs_tasks+=("$!") done - # DO NOT DOWNLOAD THE latency SCRIPTS/CONFIG EVERY TIME - if echo "${WB_SHELL_PROFILE}" | grep --quiet "latency" - then + # The `tx-generator` config files, running in one of the Tasks were + # `cardano-node` is deployed. + backend_nomad download-config-generator "${dir}" & + jobs_tasks+=("$!") + # For every workload + for workload in $(jq_tolist '.workloads | map(.name)' "$dir"/profile.json) + do # For every node (not including a possible tracer Task) ... - local nodes=($(jq_tolist keys "$dir"/node-specs.json)) + # Workload may or may not run something in all producers. + local nodes=($(jq_tolist 'map(select(.isProducer) | .name)' "$dir"/node-specs.json)) for node in ${nodes[*]} do # Only used for debugging! - backend_nomad download-config-latency "${dir}" "${node}" & + backend_nomad download-config-workload "${dir}" "${workload}" "${node}" & jobs_tasks+=("$!") done - fi + done + # For every node (not including a possible tracer Task) ... + local nodes=($(jq_tolist keys "$dir"/node-specs.json)) + for node in ${nodes[*]} + do + # Only used for debugging! + backend_nomad download-config-healthcheck "${dir}" "${node}" & + jobs_tasks+=("$!") + done # Wait and check! if test -n "${jobs_tasks}" then @@ -754,7 +762,7 @@ backend_nomad() { # Functions to stop the cluster: # - stop-all RUN-DIR # - stop-all-healthchecks RUN-DIR (Nomad only) - # - stop-all-latencies RUN-DIR (Nomad only) + # - stop-all-workloads RUN-DIR (Nomad only) # - stop-all-generator RUN-DIR (Nomad only) # - stop-all-nodes RUN-DIR (Nomad only) # - stop-all-tracers RUN-DIR (Nomad only) @@ -770,20 +778,8 @@ backend_nomad() { local dir=${1:?$usage}; shift local generator_task=$(envjqr 'generator_task_name') - # Stop latency(s). - ################## - local jobs_latencies_array=() - for node in $(jq_tolist 'keys' "${dir}"/node-specs.json) - do - backend_nomad stop-all-latencies "${dir}" "${node}" & - jobs_latencies_array+=("$!") - done - if ! wait_all "${jobs_latencies_array[@]}" - then - msg "$(red "Failed to stop latency(ies)")" - fi # Stop healthcheck(s). - ##################### + ###################### local jobs_healthchecks_array=() for node in $(jq_tolist 'keys' "${dir}"/node-specs.json) do @@ -794,6 +790,23 @@ backend_nomad() { then msg "$(red "Failed to stop healthcheck(s)")" fi + # Stop workload(s). + ################### + local jobs_workloads_array=() + # For every workload + for workload in $(jq_tolist '.workloads | map(.name)' "$dir"/profile.json) + do + # Workload may or may not run something in all producers. + for node in $(jq_tolist 'map(select(.isProducer) | .name)' "$dir"/node-specs.json) + do + backend_nomad stop-all-workloads "${dir}" "${workload}" "${node}" & + jobs_workloads_array+=("$!") + done + done + if ! wait_all "${jobs_workloads_array[@]}" + then + msg "$(red "Failed to stop workload(s)")" + fi # Stop generator. ################# backend_nomad stop-all-generator "${dir}" "${generator_task}" @@ -833,25 +846,25 @@ backend_nomad() { fi ;; - stop-all-latencies ) + stop-all-healthchecks ) local usage="USAGE: wb backend $op RUN-DIR" local dir=${1:?$usage}; shift local task=${1:?$usage}; shift - local task_dir="${dir}"/latency/"${task}" + local task_dir="${dir}"/healthcheck/"${task}" if test -f "${task_dir}"/started && !(test -f "${task_dir}"/stopped || test -f "${task_dir}"/quit) then - if backend_nomad is-task-program-running "${dir}" "${task}" latency + if backend_nomad is-task-program-running "${dir}" "${task}" healthcheck then - if ! backend_nomad task-program-stop "${dir}" "${task}" latency + if ! backend_nomad task-program-stop "${dir}" "${task}" healthcheck then - msg "$(yellow "WARNING: Program \"latency\" inside Task \"${task}\" failed to stop")" + msg "$(yellow "WARNING: Program \"healthcheck\" inside Task \"${task}\" failed to stop")" else touch "${task_dir}"/stopped - msg "$(green "supervisord program \"latency\" inside Nomad Task \"${task}\" down!")" + msg "$(green "supervisord program \"healthcheck\" inside Nomad Task \"${task}\" down!")" fi else touch "${task_dir}"/quit - if backend_nomad is-task-program-failed "${dir}" "${task}" latency + if backend_nomad is-task-program-failed "${dir}" "${task}" healthcheck then local generator_task=$(envjqr 'generator_task_name') # If the node quits (due to `--shutdown_on_slot_synced X` or @@ -860,36 +873,37 @@ backend_nomad() { if test "${generator_task}" != "${task}" || test "${generator_can_fail}" = "false" || backend_nomad is-task-program-running "${dir}" "${task}" "${task}" then # Do not fail here, because nobody will be able to stop the cluster! - msg "$(red "FATAL: \"latency\" inside Task \"${task}\" quit unexpectedly")" + msg "$(red "FATAL: \"healthcheck\" inside Task \"${task}\" quit unexpectedly")" else - msg "$(yellow "INFO: Program \"latency\" inside Task \"${task}\" failed, but expected when \"${task}\" automatically exits first and makes \"generator\" fail")" + msg "$(yellow "INFO: Program \"healthcheck\" inside Task \"${task}\" failed, but expected when \"${task}\" automatically exits first and makes \"generator\" fail")" fi else - msg "$(yellow "WARNING: Program \"latency\" inside Task \"${task}\" was not running, should it?")" + msg "$(yellow "WARNING: Program \"healthcheck\" inside Task \"${task}\" was not running, should it?")" fi fi fi ;; - stop-all-healthchecks ) + stop-all-workloads ) local usage="USAGE: wb backend $op RUN-DIR" local dir=${1:?$usage}; shift + local workload=${1:?$usage}; shift local task=${1:?$usage}; shift - local task_dir="${dir}"/healthcheck/"${task}" + local task_dir="${dir}"/"${workload}"/"${task}" if test -f "${task_dir}"/started && !(test -f "${task_dir}"/stopped || test -f "${task_dir}"/quit) then - if backend_nomad is-task-program-running "${dir}" "${task}" healthcheck + if backend_nomad is-task-program-running "${dir}" "${task}" "${workload}" then - if ! backend_nomad task-program-stop "${dir}" "${task}" healthcheck + if ! backend_nomad task-program-stop "${dir}" "${task}" "${workload}" then - msg "$(yellow "WARNING: Program \"healthcheck\" inside Task \"${task}\" failed to stop")" + msg "$(yellow "WARNING: Program \"${workload}\" inside Task \"${task}\" failed to stop")" else touch "${task_dir}"/stopped - msg "$(green "supervisord program \"healthcheck\" inside Nomad Task \"${task}\" down!")" + msg "$(green "supervisord program \"${workload}\" inside Nomad Task \"${task}\" down!")" fi else touch "${task_dir}"/quit - if backend_nomad is-task-program-failed "${dir}" "${task}" healthcheck + if backend_nomad is-task-program-failed "${dir}" "${task}" "${workload}" then local generator_task=$(envjqr 'generator_task_name') # If the node quits (due to `--shutdown_on_slot_synced X` or @@ -898,12 +912,12 @@ backend_nomad() { if test "${generator_task}" != "${task}" || test "${generator_can_fail}" = "false" || backend_nomad is-task-program-running "${dir}" "${task}" "${task}" then # Do not fail here, because nobody will be able to stop the cluster! - msg "$(red "FATAL: \"healthcheck\" inside Task \"${task}\" quit unexpectedly")" + msg "$(red "FATAL: \"${workload}\" inside Task \"${task}\" quit unexpectedly")" else - msg "$(yellow "INFO: Program \"healthcheck\" inside Task \"${task}\" failed, but expected when \"${task}\" automatically exits first and makes \"generator\" fail")" + msg "$(yellow "INFO: Program \"${workload}\" inside Task \"${task}\" failed, but expected when \"${task}\" automatically exits first and makes \"generator\" fail")" fi else - msg "$(yellow "WARNING: Program \"healthcheck\" inside Task \"${task}\" was not running, should it?")" + msg "$(yellow "WARNING: Program \"${workload}\" inside Task \"${task}\" was not running, should it?")" fi fi fi @@ -1070,35 +1084,6 @@ backend_nomad() { msg "Fetch logs ..." - # Download latency(ies) logs. ############################################ - ########################################################################## - # Download retry "infinite" loop. - local latencies_array - # Fetch the nodes that don't have all the log files in its directory - latencies_array="$(jq_tolist 'keys' "$dir"/node-specs.json)" - while test -n "${latencies_array:-}" - do - local latencies_jobs_array=() - for node in ${latencies_array[*]} - do - backend_nomad download-logs-latency "${dir}" "${node}" & - latencies_jobs_array+=("$!") - done - if test -n "${latencies_jobs_array:-}" # If = () "unbound variable" error - then - # Wait until all jobs finish, don't use `wait_kill_em_all` that kills - # Returns the exit code of the last failed job, we ignore it! - wait_all "${latencies_jobs_array[@]}" || true - fi - # Fetch the nodes that don't have all the log files in its directory - latencies_array="$(backend_nomad fetch-logs-latencies "${dir}")" - if test -n "${latencies_array:-}" - then - msg "Retrying latency(ies) [${latencies_array[@]}] logs download" - read -p "Hit enter to continue ..." - fi - done - msg "$(green "Finished downloading latency(ies) logs")" # Download healthcheck(s) logs. ########################################## ########################################################################## # Download retry "infinite" loop. @@ -1128,6 +1113,40 @@ backend_nomad() { fi done msg "$(green "Finished downloading Healthcheck(s) logs")" + # Download workload(s) logs. ############################################# + ########################################################################## + # For every workload + for workload in $(jq_tolist '.workloads | map(.name)' "$dir"/profile.json) + do + # Download retry "infinite" loop. + local workloads_array + # Fetch the nodes that don't have all the log files in its directory + # Workload may or may not run something in all producers. + workloads_array=($(jq_tolist 'map(select(.isProducer) | .name)' "$dir"/node-specs.json)) + while test -n "${workloads_array:-}" + do + local workloads_jobs_array=() + for node in ${workloads_array[*]} + do + backend_nomad download-logs-workload "${dir}" "${workload}" "${node}" & + workloads_jobs_array+=("$!") + done + if test -n "${workloads_jobs_array:-}" # If = () "unbound variable" error + then + # Wait until all jobs finish, don't use `wait_kill_em_all` that kills + # Returns the exit code of the last failed job, we ignore it! + wait_all "${workloads_jobs_array[@]}" || true + fi + # Fetch the nodes that don't have all the log files in its directory + workloads_array="$(backend_nomad fetch-logs-workloads "${dir}" "${workload}")" + if test -n "${workloads_array:-}" + then + msg "Retrying workload \"${workload}\" [${workloads_array[@]}] logs download" + read -p "Hit enter to continue ..." + fi + done + done + msg "$(green "Finished downloading workload(s) logs")" # Download generator logs. ############################################### ########################################################################## # Download retry "infinite" loop. @@ -1256,12 +1275,12 @@ backend_nomad() { msg "$(green "Finished downloading entrypoint(s) logs")" # TODO: Check downloads - # ls run/current/nomad/{node-{0..51},explorer}/{stdout,stderr} || msg "" - # ls run/current/tracer/{node-{0..51},explorer}/{exit_code,stdout,stderr} || msg "" - # ls run/current/{node-{0..51},explorer}/{exit_code,stdout,stderr} || msg "" - # ls run/current/generator/{exit_code,stdout,stderr} || msg "" - # ls run/current/healthcheck/{node-{0..51},explorer}/{exit_code,stdout,stderr} || msg "" - # ls run/current/latency/{node-{0..51},explorer}/{exit_code,stdout,stderr} || msg "" + # ls run/current/nomad/{node-{0..51},explorer}/{stdout,stderr} || msg "" + # ls run/current/tracer/{node-{0..51},explorer}/{exit_code,stdout,stderr} || msg "" + # ls run/current/{node-{0..51},explorer}/{exit_code,stdout,stderr} || msg "" + # ls run/current/generator/{exit_code,stdout,stderr} || msg "" + # ls run/current/workloads/WORKLOAD-NAME/{node-{0..51}}/{exit_code,stdout,stderr} || msg "" + # ls run/current/healthcheck/{node-{0..51},explorer}/{exit_code,stdout,stderr} || msg "" msg "$(green "Finished fetching logs")" ;; @@ -1396,47 +1415,49 @@ backend_nomad() { ;; # Array of nodes that don't have all the required log files in its directory - fetch-logs-latencies ) - local usage="USAGE: wb backend $op RUN-DIR" + fetch-logs-workloads ) + local usage="USAGE: wb backend $op RUN-DIR WORKLOAD-NAME" local dir=${1:?$usage}; shift - local latencies_array=() - for node in $(jq_tolist 'keys' "${dir}"/node-specs.json) + local workload=${1:?$usage}; shift + local workloads_array=() + # Workload may or may not run something in all producers. + for node in $(jq_tolist 'map(select(.isProducer) | .name)' "$dir"/node-specs.json) do - # Only if the latency was started. - if test -f "${dir}"/latency/"${node}"/started + # Only if the workload was started. + if test -f "${dir}"/workloads/"${workload}"/"${node}"/started then - local latency_ok="true" + local workload_ok="true" # Check the existance of all the wanted files: - if ! test -f "${dir}"/latency/"${node}"/exit_code + if ! test -f "${dir}"/workloads/"${workload}"/"${node}"/exit_code then - latency_ok="false" + workload_ok="false" fi - if ! test -f "${dir}"/latency/"${node}"/stdout + if ! test -f "${dir}"/workloads/"${workload}"/"${node}"/stdout then - latency_ok="false" + workload_ok="false" fi - if ! test -f "${dir}"/latency/"${node}"/stderr + if ! test -f "${dir}"/workloads/"${workload}"/"${node}"/stderr then - latency_ok="false" + workload_ok="false" fi # Below like errors can end in truncated files, a proper flag is used! # failed to exec into task: read tcp 10.0.0.115:33840->3.72.231.105:443: read: connection reset by peer # tar: Unexpected EOF in archive # tar: Unexpected EOF in archive # tar: Error is not recoverable: exiting now - if test -f "${dir}"/latency/"${node}"/download_failed + if test -f "${dir}"/workloads/"${workload}"/"${node}"/download_failed then - latency_ok="false" + workload_ok="false" fi - # If any error add this latency to the array - if test "${latency_ok}" = "false" + # If any error add this workload to the array + if test "${workload_ok}" = "false" then - latencies_array+=("${node}") + workloads_array+=("${node}") fi fi done # Return array - echo "${latencies_array[@]}" + echo "${workloads_array[@]}" ;; # Array of nodes that don't have all the required log files in its directory @@ -1502,8 +1523,8 @@ backend_nomad() { # Functions to start/stop groups of cluster "programs": # - start-tracers RUN-DIR # - start-nodes RUN-DIR + # - start-workloads RUN-DIR # - start-healthchecks RUN-DIR - # - start-latencies RUN-DIR ############################################################################ # * Functions in the backend "interface" must use `fatal` when errors! @@ -1613,47 +1634,52 @@ backend_nomad() { ;; # Called by `scenario.sh` with the exit trap (`scenario_setup_exit_trap`) set! - start-healthchecks ) + start-workloads ) local usage="USAGE: wb backend $op RUN-DIR" local dir=${1:?$usage}; shift - local jobs_array=() - local nodes=($(jq_tolist keys "$dir"/node-specs.json)) - for node in ${nodes[*]} + # For every workload + for workload in $(jq_tolist '.workloads | map(.name)' "$dir"/profile.json) do - backend_nomad start-healthcheck "${dir}" "${node}" & - jobs_array+=("$!") - done - # Wait and check! - if test -n "${jobs_array}" - then - if ! wait_kill_em_all "${jobs_array[@]}" + local jobs_array=() + # Workload may or may not run something in all producers. + local nodes=($(jq_tolist 'map(select(.isProducer) | .name)' "$dir"/node-specs.json)) + for node in ${nodes[*]} + do + backend_nomad start-workload "${dir}" "${workload}" "${node}" & + jobs_array+=("$!") + done + # Wait and check! + if test -n "${jobs_array}" then - fatal "Failed to start healthcheck(s)" - return 1 - else - for node in ${nodes[*]} - do - if ! test -f "${dir}"/healthcheck/"${node}"/started - then - fatal "Healthcheck for \"${node}\" failed to start!" - fi - done + if ! wait_kill_em_all "${jobs_array[@]}" + then + fatal "Failed to start workload(s)" + return 1 + else + for node in ${nodes[*]} + do + if ! test -f "${dir}"/workloads/"${workload}"/"${node}"/started + then + fatal "Workload \"${workload}\" for \"${node}\" failed to start!" + fi + done + fi fi - fi + done return 0 ;; # Called by `scenario.sh` with the exit trap (`scenario_setup_exit_trap`) set! - start-latencies ) + start-healthchecks ) local usage="USAGE: wb backend $op RUN-DIR" local dir=${1:?$usage}; shift + local jobs_array=() - # explorer node is ignored, it will ping every other node. - local nodes=($(jq_tolist 'map(select(.isProducer) | .name)' "$dir"/node-specs.json)) + local nodes=($(jq_tolist keys "$dir"/node-specs.json)) for node in ${nodes[*]} do - backend_nomad start-latency "${dir}" "${node}" & + backend_nomad start-healthcheck "${dir}" "${node}" & jobs_array+=("$!") done # Wait and check! @@ -1661,14 +1687,14 @@ backend_nomad() { then if ! wait_kill_em_all "${jobs_array[@]}" then - fatal "Failed to start latency(ies)" + fatal "Failed to start healthcheck(s)" return 1 else for node in ${nodes[*]} do - if ! test -f "${dir}"/latency/"${node}"/started + if ! test -f "${dir}"/healthcheck/"${node}"/started then - fatal "Latency for \"${node}\" failed to start!" + fatal "Healthcheck for \"${node}\" failed to start!" fi done fi @@ -1681,17 +1707,17 @@ backend_nomad() { # - start-tracer RUN-DIR (Nomad backend specific subcommand) # - start-node RUN-DIR NODE-NAME # - start-generator RUN-DIR + # - start-workload RUN-DIR WORKLOAD-NAME TASK-NAME (Nomad backend .....) # - start-healthcheck RUN-DIR TASK-NAME (Nomad backend specific subcommand) - # - start-latency RUN-DIR TASK-NAME (Nomad backend specific subcommand) # - wait-tracer RUN-DIR TASK-NAME (Nomad backend specific subcommand) # - wait-node RUN-DIR NODE_NAME (Nomad backend specific subcommand) # - stop-node RUN-DIR NODE-NAME # # TODO: They are up here as "stop-cluster-*" # - stop-generator RUN-DIR TASK-NAME (Nomad backend specific subcommand) + # - stop-workload RUN-DIR WORKLOAD-NAME TASK-NAME (Nomad backend .....) # - stop-tracer RUN-DIR TASK-NAME (Nomad backend specific subcommand) # - stop-healthcheck RUN-DIR TASK-NAME (Nomad backend specific subcommand) - # - stop-latency RUN-DIR TASK-NAME (Nomad backend specific subcommand) ############################################################################ # * Functions in the backend "interface" must use `fatal` when errors! @@ -1950,18 +1976,19 @@ backend_nomad() { fi ;; - # Called by "start-healthchecks" that has no exit trap, don't use fatal here! - start-healthcheck ) # Nomad backend specific subcommands - local usage="USAGE: wb backend $op RUN-DIR TASK" + # Called by "start-workloads" that has no exit trap, don't use fatal here! + start-workload ) # Nomad backend specific subcommands + local usage="USAGE: wb backend $op RUN-DIR WORKLOAD-NAME TASK" local dir=${1:?$usage}; shift + local workload=${1:?$usage}; shift local task=${1:?$usage}; shift - if ! backend_nomad task-program-start "${dir}" "${task}" healthcheck + if ! backend_nomad task-program-start "${dir}" "${task}" "${workload}" then - msg "$(red "FATAL: Program \"healthcheck\" inside Nomad Task \"${task}\" startup failed")" + msg "$(red "FATAL: Program \"${workload}\" workload inside Nomad Task \"${task}\" startup failed")" # TODO: Let the download fail when everything fails? backend_nomad download-logs-entrypoint "${dir}" "${task}" || true - backend_nomad download-logs-healthcheck "${dir}" "${task}" || true + backend_nomad download-logs-workload "${dir}" "${workload}" "${task}" || true # Should show the output/log of `supervisord` (runs as "entrypoint"). msg "$(yellow "${dir}/nomad/${task}/stdout:")" cat \ @@ -1974,57 +2001,57 @@ backend_nomad() { "${dir}"/nomad/"${task}"/stderr \ <(echo "-------------------- log end --------------------") # Depending on when the start command failed, logs may not be available! - if test -f "${dir}"/healthcheck/"${task}"/stdout + if test -f "${dir}"/workloads/"${workload}"/"${task}"/stdout then - msg "$(yellow "${dir}/healthcheck/${task}/stdout:")" + msg "$(yellow "${dir}/workloads/"${workload}"/${task}/stdout:")" cat \ <(echo "-------------------- log start --------------------") \ - "${dir}"/healthcheck/"${task}"/stdout \ + "${dir}"/workloads/"${workload}"/"${task}"/stdout \ <(echo "-------------------- log end --------------------") fi # Depending on when the start command failed, logs may not be available! - if test -f "${dir}"/healthcheck/"${task}"/stderr + if test -f "${dir}"/workloads/"${workload}"/"${task}"/stderr then - msg "$(yellow "${dir}/healthcheck/${task}/stderr:")" + msg "$(yellow "${dir}/workloads/"${workload}"/${task}/stderr:")" cat \ <(echo "-------------------- log start --------------------") \ - "${dir}"/healthcheck/"${task}"/stderr \ + "${dir}"/workloads/"${workload}"/"${task}"/stderr \ <(echo "-------------------- log end --------------------") fi # Let "start" parse the response code and handle the cleanup! - msg "$(red "Failed to start program \"healthcheck\" inside Nomad Task \"${task}\"")" + msg "$(red "Failed to start program \"${workload}\" workload inside Nomad Task \"${task}\"")" return 1 else local nomad_environment=$(envjqr 'nomad_environment') if test "${nomad_environment}" != "cloud" then - ln -s \ - ../../nomad/alloc/"${task}"/local/run/current/healthcheck/stdout \ - "${dir}"/healthcheck/"${task}"/stdout - ln -s \ - ../../nomad/alloc/"${task}"/local/run/current/healthcheck/stderr \ - "${dir}"/healthcheck/"${task}"/stderr - ln -s \ - ../../nomad/alloc/"${task}"/local/run/current/healthcheck/exit_code \ - "${dir}"/healthcheck/"${task}"/exit_code + ln -s \ + ../../../nomad/alloc/"${task}"/local/run/current/workloads/"${workload}"/stdout \ + "${dir}"/workloads/"${workload}"/"${task}"/stdout + ln -s \ + ../../../nomad/alloc/"${task}"/local/run/current/workloads/"${workload}"/stderr \ + "${dir}"/workloads/"${workload}"/"${task}"/stderr + ln -s \ + ../../../nomad/alloc/"${task}"/local/run/current/workloads/"${workload}"/exit_code \ + "${dir}"/workloads/"${workload}"/"${task}"/exit_code fi # It was "intentionally started and should not automagically stop" flag! - touch "${dir}"/healthcheck/"${task}"/started + touch "${dir}"/workloads/"${workload}"/"${task}"/started fi ;; - # Called by "start-latencies" that has no exit trap, don't use fatal here! - start-latency ) # Nomad backend specific subcommands + # Called by "start-healthchecks" that has no exit trap, don't use fatal here! + start-healthcheck ) # Nomad backend specific subcommands local usage="USAGE: wb backend $op RUN-DIR TASK" local dir=${1:?$usage}; shift local task=${1:?$usage}; shift - if ! backend_nomad task-program-start "${dir}" "${task}" latency + if ! backend_nomad task-program-start "${dir}" "${task}" healthcheck then - msg "$(red "FATAL: Program \"latency\" inside Nomad Task \"${task}\" startup failed")" + msg "$(red "FATAL: Program \"healthcheck\" inside Nomad Task \"${task}\" startup failed")" # TODO: Let the download fail when everything fails? backend_nomad download-logs-entrypoint "${dir}" "${task}" || true - backend_nomad download-logs-latency "${dir}" "${task}" || true + backend_nomad download-logs-healthcheck "${dir}" "${task}" || true # Should show the output/log of `supervisord` (runs as "entrypoint"). msg "$(yellow "${dir}/nomad/${task}/stdout:")" cat \ @@ -2037,42 +2064,42 @@ backend_nomad() { "${dir}"/nomad/"${task}"/stderr \ <(echo "-------------------- log end --------------------") # Depending on when the start command failed, logs may not be available! - if test -f "${dir}"/latency/"${task}"/stdout + if test -f "${dir}"/healthcheck/"${task}"/stdout then - msg "$(yellow "${dir}/latency/${task}/stdout:")" + msg "$(yellow "${dir}/healthcheck/${task}/stdout:")" cat \ <(echo "-------------------- log start --------------------") \ - "${dir}"/latency/"${task}"/stdout \ + "${dir}"/healthcheck/"${task}"/stdout \ <(echo "-------------------- log end --------------------") fi # Depending on when the start command failed, logs may not be available! - if test -f "${dir}"/latency/"${task}"/stderr + if test -f "${dir}"/healthcheck/"${task}"/stderr then - msg "$(yellow "${dir}/latency/${task}/stderr:")" + msg "$(yellow "${dir}/healthcheck/${task}/stderr:")" cat \ <(echo "-------------------- log start --------------------") \ - "${dir}"/latency/"${task}"/stderr \ + "${dir}"/healthcheck/"${task}"/stderr \ <(echo "-------------------- log end --------------------") fi # Let "start" parse the response code and handle the cleanup! - msg "$(red "Failed to start program \"latency\" inside Nomad Task \"${task}\"")" + msg "$(red "Failed to start program \"healthcheck\" inside Nomad Task \"${task}\"")" return 1 else local nomad_environment=$(envjqr 'nomad_environment') if test "${nomad_environment}" != "cloud" then ln -s \ - ../../nomad/alloc/"${task}"/local/run/current/latency/stdout \ - "${dir}"/latency/"${task}"/stdout + ../../nomad/alloc/"${task}"/local/run/current/healthcheck/stdout \ + "${dir}"/healthcheck/"${task}"/stdout ln -s \ - ../../nomad/alloc/"${task}"/local/run/current/latency/stderr \ - "${dir}"/latency/"${task}"/stderr + ../../nomad/alloc/"${task}"/local/run/current/healthcheck/stderr \ + "${dir}"/healthcheck/"${task}"/stderr ln -s \ - ../../nomad/alloc/"${task}"/local/run/current/latency/exit_code \ - "${dir}"/latency/"${task}"/exit_code + ../../nomad/alloc/"${task}"/local/run/current/healthcheck/exit_code \ + "${dir}"/healthcheck/"${task}"/exit_code fi # It was "intentionally started and should not automagically stop" flag! - touch "${dir}"/latency/"${task}"/started + touch "${dir}"/healthcheck/"${task}"/started fi ;; @@ -2169,7 +2196,7 @@ backend_nomad() { # - get-node-socket-path RUN-DIR NODE-NAME (Will break when cloud running) # - wait-node-stopped RUN-DIR NODE-NAME # - wait-pools-stopped RUN-DIR - # - wait-latencies-stopped RUN-DIR + # - wait-workloads-stopped RUN-DIR # - cluster-exited-programs RUN-DIR (Nomad backend specific subcommand) ############################################################################ # * Functions in the backend "interface" must use `fatal` when errors! @@ -2320,41 +2347,44 @@ backend_nomad() { fi ;; - wait-latencies-stopped ) + wait-workloads-stopped ) local usage="USAGE: wb backend $op SLEEP-SECONDS RUN-DIR" # This parameters is added by the nomad backend being used. local sleep_seconds=${1:?$usage}; shift local dir=${1:?$usage}; shift local start_time=$(date +%s) - # explorer node is ignored, it will ping every other node. - local nodes=($(jq_tolist 'map(select(.isProducer) | .name)' "${dir}"/node-specs.json)) - msg_ne "nomad: $(blue Waiting) until all latency services are stopped: 000000" - for node in ${nodes[*]} + msg_ne "nomad: $(blue Waiting) until all workloads are stopped: 000000" + for workload in $(jq_tolist '.workloads | map(.name)' "$dir"/profile.json) do - while \ - ! test -f "${dir}"/flag/cluster-stopping \ - && \ - backend_nomad is-task-program-running "${dir}" "${node}" "latency" 5 > /dev/null + # Workload may or may not run something in all producers. + local nodes=($(jq_tolist 'map(select(.isProducer) | .name)' "${dir}"/node-specs.json)) + for node in ${nodes[*]} do + while \ + ! test -f "${dir}"/flag/cluster-stopping \ + && \ + backend_nomad is-task-program-running "${dir}" "${node}" "${workload}" 5 > /dev/null + do + local elapsed="$(($(date +%s) - start_time))" + echo -ne "\b\b\b\b\b\b" + printf "%6d" "${elapsed}" + # This time is different between local and cloud backends to avoid + # unnecesary Nomad specific traffic and at the same time be less + # sensitive to network failures. + sleep "${sleep_seconds}" + done # While + if ! test -f "${dir}"/flag/cluster-stopping + then + echo -ne "\n" + msg "$(yellow "supervisord program \"${workload}\" workload of \"${node}\" stopped")" + msg_ne "nomad: $(blue Waiting) until workload nodes are stopped: 000000" + fi local elapsed="$(($(date +%s) - start_time))" echo -ne "\b\b\b\b\b\b" printf "%6d" "${elapsed}" - # This time is different between local and cloud backends to avoid - # unnecesary Nomad specific traffic and at the same time be less - # sensitive to network failures. - sleep "${sleep_seconds}" - done # While - if ! test -f "${dir}"/flag/cluster-stopping - then - echo -ne "\n" - msg "$(yellow "supervisord program \"latency\" of \"${node}\" stopped")" - msg_ne "nomad: $(blue Waiting) until all pool nodes are stopped: 000000" - fi - local elapsed="$(($(date +%s) - start_time))" - echo -ne "\b\b\b\b\b\b" - printf "%6d" "${elapsed}" - done >&2 # For + done >&2 # For + done echo -ne "\b\b\b\b\b\b" local elapsed=$(($(date +%s) - start_time)) @@ -2365,7 +2395,7 @@ backend_nomad() { else touch "${dir}"/flag/cluster-stopping echo -ne "\n" - msg "All latency services exited -- after $(yellow ${elapsed})s" + msg "All workloads exited -- after $(yellow ${elapsed})s" fi ;; @@ -2412,7 +2442,7 @@ backend_nomad() { ;; # For debugging when something fails, downloads and prints details! - download-logs-latency ) + download-logs-healthcheck ) local usage="USAGE: wb backend pass $op RUN-DIR TASK-NAME" local dir=${1:?$usage}; shift local task=${1:?$usage}; shift @@ -2421,63 +2451,64 @@ backend_nomad() { local nomad_environment=$(envjqr 'nomad_environment') if test "${nomad_environment}" != "cloud" then - rm -f "${dir}"/latency/"${task}"/{stdout,stderr,exit_code} + rm -f "${dir}"/healthcheck/"${task}"/{stdout,stderr,exit_code} fi # Downloads "exit_code", "stdout", "stderr" and GHC files. # Depending on when the start command failed, logs may not be available! - backend_nomad download-zstd-latency "${dir}" "${task}" \ + backend_nomad download-zstd-healthcheck "${dir}" "${task}" \ || download_ok="false" # Return if test "${download_ok}" = "false" then - msg "$(red "Failed to download \"latency\" run files from \"${task}\"")" + msg "$(red "Failed to download \"healthcheck\" run files from \"${task}\"")" # Below like errors can end in truncated files, a proper flag is needed! # failed to exec into task: read tcp 10.0.0.115:33840->3.72.231.105:443: read: connection reset by peer # tar: Unexpected EOF in archive # tar: Unexpected EOF in archive # tar: Error is not recoverable: exiting now - touch "${dir}"/latency/"${task}"/download_failed + touch "${dir}"/healthcheck/"${task}"/download_failed return 1 else - if test -f "${dir}"/latency/"${task}"/download_failed + if test -f "${dir}"/healthcheck/"${task}"/download_failed then - rm "${dir}"/latency/"${task}"/download_failed + rm "${dir}"/healthcheck/"${task}"/download_failed fi return 0 fi ;; # For debugging when something fails, downloads and prints details! - download-logs-healthcheck ) - local usage="USAGE: wb backend pass $op RUN-DIR TASK-NAME" + download-logs-workload ) + local usage="USAGE: wb backend pass $op RUN-DIR WORKLOAD-NAME TASK-NAME" local dir=${1:?$usage}; shift + local workload=${1:?$usage}; shift local task=${1:?$usage}; shift local download_ok="true" # Remove "live" symlinks before downloading the "originals" local nomad_environment=$(envjqr 'nomad_environment') if test "${nomad_environment}" != "cloud" then - rm -f "${dir}"/healthcheck/"${task}"/{stdout,stderr,exit_code} + rm -f "${dir}"/workloads/"${workload}"/"${task}"/{stdout,stderr,exit_code} fi # Downloads "exit_code", "stdout", "stderr" and GHC files. # Depending on when the start command failed, logs may not be available! - backend_nomad download-zstd-healthcheck "${dir}" "${task}" \ + backend_nomad download-zstd-workload "${dir}" "${workload}" "${task}" \ || download_ok="false" # Return if test "${download_ok}" = "false" then - msg "$(red "Failed to download \"healthcheck\" run files from \"${task}\"")" + msg "$(red "Failed to download \"${workload}\" workload run files from \"${task}\"")" # Below like errors can end in truncated files, a proper flag is needed! # failed to exec into task: read tcp 10.0.0.115:33840->3.72.231.105:443: read: connection reset by peer # tar: Unexpected EOF in archive # tar: Unexpected EOF in archive # tar: Error is not recoverable: exiting now - touch "${dir}"/healthcheck/"${task}"/download_failed + touch "${dir}"/workloads/"${workload}"/"${task}"/download_failed return 1 else - if test -f "${dir}"/healthcheck/"${task}"/download_failed + if test -f "${dir}"/workloads/"${workload}"/"${task}"/download_failed then - rm "${dir}"/healthcheck/"${task}"/download_failed + rm "${dir}"/workloads/"${workload}"/"${task}"/download_failed fi return 0 fi @@ -2662,31 +2693,32 @@ backend_nomad() { fi ;; - download-zstd-latency ) + download-zstd-healthcheck ) local usage="USAGE: wb backend pass $op RUN-DIR TASK-NAME" local dir=${1:?$usage}; shift local task=${1:?$usage}; shift - msg "$(blue Fetching) $(yellow "\"latency\"") run files from Nomad $(yellow "Task \"${task}\"") ..." + msg "$(blue Fetching) $(yellow "\"healthcheck\"") run files from Nomad $(yellow "Task \"${task}\"") ..." # TODO: Add compression, either "--zstd" or "--xz" backend_nomad task-exec-program-run-files-tar-zstd \ - "${dir}" "${task}" "latency" \ + "${dir}" "${task}" "healthcheck" \ | tar --extract \ - --directory="${dir}"/latency/"${task}"/ --file=- \ + --directory="${dir}"/healthcheck/"${task}"/ --file=- \ --no-same-owner --no-same-permissions ;; - download-zstd-healthcheck ) - local usage="USAGE: wb backend pass $op RUN-DIR TASK-NAME" + download-zstd-workload ) + local usage="USAGE: wb backend pass $op RUN-DIR WORKLOAD-NAME TASK-NAME" local dir=${1:?$usage}; shift + local workload=${1:?$usage}; shift local task=${1:?$usage}; shift - msg "$(blue Fetching) $(yellow "\"healthcheck\"") run files from Nomad $(yellow "Task \"${task}\"") ..." + msg "$(blue Fetching) $(yellow "\"${workload}\" workload") run files from Nomad $(yellow "Task \"${task}\"") ..." # TODO: Add compression, either "--zstd" or "--xz" - backend_nomad task-exec-program-run-files-tar-zstd \ - "${dir}" "${task}" "healthcheck" \ - | tar --extract \ - --directory="${dir}"/healthcheck/"${task}"/ --file=- \ + backend_nomad task-exec-program-run-files-tar-zstd \ + "${dir}" "${task}" "workloads/${workload}" \ + | tar --extract \ + --directory="${dir}"/workloads/"${workload}"/"${task}"/ --file=- \ --no-same-owner --no-same-permissions ;; @@ -2814,6 +2846,16 @@ backend_nomad() { > "${dir}"/nomad/"${task}"/networking.json ;; + download-config-workload ) + local usage="USAGE: wb backend pass $op RUN-DIR NODE-NAME" + local dir=${1:?$usage}; shift + local workload=${1:?$usage}; shift + local node=${1:?$usage}; shift + backend_nomad task-file-contents "${dir}" "${node}" \ + run/current/workloads/"${workload}"/start.sh \ + > "${dir}"/workloads/"${workload}"/"${node}"/start.sh + ;; + download-config-generator ) local usage="USAGE: wb backend pass $op RUN-DIR" local dir=${1:?$usage}; shift @@ -2881,15 +2923,6 @@ backend_nomad() { > "${dir}"/healthcheck/"${node}"/start.sh ;; - download-config-latency ) - local usage="USAGE: wb backend pass $op RUN-DIR NODE-NAME" - local dir=${1:?$usage}; shift - local node=${1:?$usage}; shift - backend_nomad task-file-contents "${dir}" "${node}" \ - run/current/latency/start.sh \ - > "${dir}"/latency/"${node}"/start.sh - ;; - ## Nomad Job's Tasks supervisord queries ######################################## diff --git a/nix/workbench/backend/nomad/cloud.sh b/nix/workbench/backend/nomad/cloud.sh index 92e60244ea2..21c4efb5bfa 100644 --- a/nix/workbench/backend/nomad/cloud.sh +++ b/nix/workbench/backend/nomad/cloud.sh @@ -81,13 +81,13 @@ backend_nomadcloud() { backend_nomad wait-pools-stopped 60 "$@" ;; - wait-latencies-stopped ) + wait-workloads-stopped ) # It passes the sleep time (in seconds) required argument. # This time is different between local and cloud backends to avoid # unnecesary Nomad specific traffic (~99% happens waiting for node-0, the # first one it waits to stop inside a loop) and at the same time be less # sensitive to network failures. - backend_nomad wait-latencies-stopped 60 "$@" + backend_nomad wait-workloads-stopped 60 "$@" ;; fetch-logs ) @@ -146,12 +146,12 @@ backend_nomadcloud() { backend_nomad start-generator "$@" ;; - start-healthchecks ) - backend_nomad start-healthchecks "$@" + start-workloads ) + backend_nomad start-workloads "$@" ;; - start-latencies ) - backend_nomad start-latencies "$@" + start-healthchecks ) + backend_nomad start-healthchecks "$@" ;; start-node ) @@ -998,18 +998,6 @@ fetch-logs-ssh-node() { local ssh_config_path ssh_command ssh_config_path="$(wb nomad ssh config)" ssh_command="ssh -F ${ssh_config_path} -p 32000 -l nobody" - # Download latency(ies) logs. ################################################ - ############################################################################## - msg "$(blue "Fetching") $(yellow "program \"latency\"") run files from $(yellow "\"${node}\" (\"${public_ipv4}\")") ..." - if ! rsync -e "${ssh_command}" -au \ - -f'- start.sh' \ - "${public_ipv4}":/local/run/current/latency/ \ - "${dir}"/latency/"${node}"/ - then - node_ok="false" - touch "${dir}"/nomad/"${node}"/download_failed - msg "$(red Error fetching) $(yellow "program \"latency\"") $(red "run files from") $(yellow "\"${node}\" (\"${public_ipv4}\")") ..." - fi # Download healthcheck(s) logs. ############################################## ############################################################################## msg "$(blue "Fetching") $(yellow "program \"healthcheck\"") run files from $(yellow "\"${node}\" (\"${public_ipv4}\")") ..." @@ -1022,6 +1010,22 @@ fetch-logs-ssh-node() { touch "${dir}"/nomad/"${node}"/download_failed msg "$(red Error fetching) $(yellow "program \"healthcheck\"") $(red "run files from") $(yellow "\"${node}\" (\"${public_ipv4}\")") ..." fi + # Download workload(s) logs. ################################################# + ############################################################################## + # For every workload + for workload in $(jq_tolist '.workloads | map(.name)' "$dir"/profile.json) + do + msg "$(blue "Fetching") $(yellow "program \"${workload}\" workload") run files from $(yellow "\"${node}\" (\"${public_ipv4}\")") ..." + if ! rsync -e "${ssh_command}" -au \ + -f'- start.sh' \ + "${public_ipv4}":/local/run/current/workloads/"${workload}"/ \ + "${dir}"/workloads/"${workload}"/"${node}"/ + then + node_ok="false" + touch "${dir}"/nomad/"${node}"/download_failed + msg "$(red Error fetching) $(yellow "program \"${workload}\" workload") $(red "run files from") $(yellow "\"${node}\" (\"${public_ipv4}\")") ..." + fi + done # Download generator logs. ################################################### ############################################################################## if test "${node}" = "explorer" diff --git a/nix/workbench/backend/nomad/exec.sh b/nix/workbench/backend/nomad/exec.sh index 38a21875f9e..5c4f61a4364 100644 --- a/nix/workbench/backend/nomad/exec.sh +++ b/nix/workbench/backend/nomad/exec.sh @@ -61,13 +61,13 @@ backend_nomadexec() { backend_nomad wait-pools-stopped 1 "$@" ;; - wait-latencies-stopped ) + wait-workloads-stopped ) # It passes the sleep time (in seconds) required argument. # This time is different between local and cloud backends to avoid # unnecesary Nomad specific traffic (~99% happens waiting for node-0, the # first one it waits to stop inside a loop) and at the same time be less # sensitive to network failures. - backend_nomad wait-latencies-stopped 1 "$@" + backend_nomad wait-workloads-stopped 1 "$@" ;; # All or clean up everything! @@ -107,12 +107,12 @@ backend_nomadexec() { backend_nomad start-generator "$@" ;; - start-healthchecks ) - backend_nomad start-healthchecks "$@" + start-workloads ) + backend_nomad start-workloads "$@" ;; - start-latencies ) - backend_nomad start-latencies "$@" + start-healthchecks ) + backend_nomad start-healthchecks "$@" ;; start-node ) diff --git a/nix/workbench/backend/supervisor-conf.nix b/nix/workbench/backend/supervisor-conf.nix index 052362e35e7..ccf2b3d4ec4 100644 --- a/nix/workbench/backend/supervisor-conf.nix +++ b/nix/workbench/backend/supervisor-conf.nix @@ -184,14 +184,18 @@ let startsecs = 5; }; } + + + // - { - "program:latency" = { + (builtins.listToAttrs (builtins.map (workload: { + name = "program:${workload.name}"; + value = { # "command" below assumes "directory" is set accordingly. - directory = "${stateDir}/latency"; + directory = "${stateDir}/workloads/${workload.name}"; command = "${command}"; - stdout_logfile = "${stateDir}/latency/stdout"; - stderr_logfile = "${stateDir}/latency/stderr"; + stdout_logfile = "${stateDir}/workloads/${workload.name}/stdout"; + stderr_logfile = "${stateDir}/workloads/${workload.name}/stderr"; # Set these values to 0 to indicate an unlimited log size / no rotation. stdout_logfile_maxbytes = 0; stderr_logfile_maxbytes = 0; @@ -204,7 +208,10 @@ let # Seconds it needs to stay running to consider the start successful startsecs = 5; }; - } + }) profileData.workloads)) + + + // lib.attrsets.optionalAttrs withSsh { diff --git a/nix/workbench/backend/supervisor.sh b/nix/workbench/backend/supervisor.sh index c9e08301140..43a4dfd9339 100755 --- a/nix/workbench/backend/supervisor.sh +++ b/nix/workbench/backend/supervisor.sh @@ -58,6 +58,7 @@ case "$op" in local svcs=$dir/profile/node-services.json local gtor=$dir/profile/generator-service.json + local work=$dir/profile/workloads-service.json local trac=$dir/profile/tracer-service.json local hche=$dir/profile/healthcheck-service.json @@ -76,6 +77,15 @@ case "$op" in cp $(jq '."plutus-redeemer"' -r $gtor) "$gen_dir"/plutus-redeemer.json cp $(jq '."plutus-datum"' -r $gtor) "$gen_dir"/plutus-datum.json + local work_dir="$dir"/workloads + mkdir -p "$work_dir" + for workload in $(jq_tolist 'map(.name)' "$work") + do + mkdir -p "$work_dir"/"${workload}" + cp $(jq "map(select(.name == \"${workload}\"))[0] | .start" -r $work) \ + "$work_dir"/"${workload}"/start.sh + done + local trac_dir="$dir"/tracer mkdir -p "$trac_dir" cp $(jq '."start"' -r $trac) "$trac_dir"/start.sh @@ -84,8 +94,6 @@ case "$op" in local hche_dir="$dir"/healthcheck mkdir -p "$hche_dir" cp $(jq '."start"' -r $hche) "$hche_dir"/start.sh - - mkdir -p "$dir"/latency ;; deploy-genesis ) @@ -274,6 +282,30 @@ EOF fi backend_supervisor save-child-pids "$dir";; + start-workloads ) + local usage="USAGE: wb backend $op RUN-DIR" + local dir=${1:?$usage}; shift + + while test $# -gt 0 + do case "$1" in + --* ) msg "FATAL: unknown flag '$1'"; usage_supervisor;; + * ) break;; esac; shift; done + + # For every workload + for workload in $(jq_tolist '.workloads | map(.name)' "$dir"/profile.json) + do + if ! supervisorctl start "${workload}" + then progress "supervisor" "$(red fatal: failed to start) $(white "${workload} workload")" + echo "$(red "${workload}" workload stdout) ----------------------" >&2 + cat "$dir"/workloads/"${workload}"/stdout + echo "$(red "${workload}" workload stderr) ----------------------" >&2 + cat "$dir"/workloads/"${workload}"/stderr + echo "$(white -------------------------------------------------)" >&2 + fatal "could not start $(white "${workload} workload")" + fi + done + backend_supervisor save-child-pids "$dir";; + wait-node-stopped ) local usage="USAGE: wb backend $op RUN-DIR NODE" local dir=${1:?$usage}; shift @@ -322,6 +354,40 @@ EOF fi ;; + wait-workloads-stopped ) + local usage="USAGE: wb backend $op RUN-DIR" + local dir=${1:?$usage}; shift + + local start_time=$(date +%s) + msg_ne "supervisor: waiting until all workloads are stopped: 000000" + for workload in $(jq_tolist '.workloads | map(.name)' "$dir"/profile.json) + do + while \ + ! test -f "${dir}"/flag/cluster-stopping \ + && \ + supervisorctl status "${workload}" > /dev/null + do + echo -ne "\b\b\b\b\b\b" + printf "%6d" "$(($(date +%s) - start_time))" + sleep 1 + done + if ! test -f "${dir}"/flag/cluster-stopping + then + echo -ne "\b\b\b\b\b\b" + echo -n "${workload} 000000" + fi + done >&2 + echo -ne "\b\b\b\b\b\b" + local elapsed=$(($(date +%s) - start_time)) + if test -f "${dir}"/flag/cluster-stopping + then + echo " Termination requested -- after $(yellow ${elapsed})s" >&2 + else + touch "${dir}"/flag/cluster-stopping + echo " All workloads exited -- after $(yellow ${elapsed})s" >&2 + fi + ;; + stop-all ) local usage="USAGE: wb backend $op RUN-DIR" local dir=${1:?$usage}; shift diff --git a/nix/workbench/genesis/genesis.jq b/nix/workbench/genesis/genesis.jq index 1c48f65406c..ef18c8d9004 100644 --- a/nix/workbench/genesis/genesis.jq +++ b/nix/workbench/genesis/genesis.jq @@ -8,7 +8,7 @@ def profile_cli_args($p): { createStakedArgs: ([ "--testnet-magic", $p.genesis.network_magic , "--supply", fmt_decimal_10_5($p.genesis.funds_balance) - , "--gen-utxo-keys", 1 + , "--gen-utxo-keys", $p.genesis.utxo_keys , "--gen-genesis-keys", $p.composition.n_bft_hosts , "--supply-delegated", fmt_decimal_10_5($p.derived.supply_delegated) , "--gen-pools", $p.composition.n_pools @@ -23,7 +23,7 @@ def profile_cli_args($p): , createTestnetDataArgs: ([ "--testnet-magic", $p.genesis.network_magic , "--total-supply", fmt_decimal_10_5($p.genesis.funds_balance + $p.derived.supply_delegated) - , "--utxo-keys", 1 + , "--utxo-keys", $p.genesis.utxo_keys , "--genesis-keys", $p.composition.n_bft_hosts , "--delegated-supply", fmt_decimal_10_5($p.derived.supply_delegated) , "--pools", $p.composition.n_pools diff --git a/nix/workbench/genesis/genesis.sh b/nix/workbench/genesis/genesis.sh index 7c42cf9f860..fc06076c13c 100644 --- a/nix/workbench/genesis/genesis.sh +++ b/nix/workbench/genesis/genesis.sh @@ -734,11 +734,34 @@ genesis-create-testnet-data() { mkdir -p "$dir/utxo-keys" link_keys utxo-keys utxo-keys - info genesis "removing delegator keys." - rm "$dir/stake-delegators" -rf - - info genesis "removing dreps keys." - rm "$dir"/drep-keys -rf + local is_voting + is_voting=$(jq --raw-output '.workloads | any( .name == "voting")' "$profile_json") + if [[ "$is_voting" == "true" ]]; + then + info genesis "voting workload specified - keeping one stake key per producer" + mv "$dir/stake-delegators" "$dir/stake-delegators.bak" + mkdir "$dir/stake-delegators" + local pools + pools="$(jq --raw-output '.composition.n_pools' "${profile_json}")" + for i in $(seq 1 "$pools") + do + if test -d "$dir/stake-delegators.bak/delegator${i}" + then + local from_dir to_dir + from_dir="$dir/stake-delegators.bak/delegator${i}" + to_dir="$dir/stake-delegators/delegator$((i - 1))" + mkdir "$to_dir" + cp "$from_dir"/{payment,staking}.{skey,vkey} "$to_dir"/ + fi + done + rm "$dir/stake-delegators.bak" -rf + info genesis "voting workload specified - skipping deletion of DRep keys" + else + info genesis "removing delegator keys." + rm "$dir/stake-delegators" -rf + info genesis "removing dreps keys." + rm "$dir"/drep-keys -rf + fi info genesis "moving keys" Massage_the_key_file_layout_to_match_AWS "$profile_json" "$node_specs" "$dir" diff --git a/nix/workbench/genesis/guardrails-script.plutus b/nix/workbench/genesis/guardrails-script.plutus new file mode 100644 index 00000000000..46ab9559eb6 --- /dev/null +++ b/nix/workbench/genesis/guardrails-script.plutus @@ -0,0 +1,5 @@ +{ + "type": "PlutusScriptV3", + "description": "*BE CAREFUL* that this is compiled from a release commit of plutus and not from master", + "cborHex": "5908545908510101003232323232323232323232323232323232323232323232323232323232323232323232323232323232259323255333573466e1d20000011180098111bab357426ae88d55cf00104554ccd5cd19b87480100044600422c6aae74004dd51aba1357446ae88d55cf1baa3255333573466e1d200a35573a002226ae84d5d11aab9e00111637546ae84d5d11aba235573c6ea800642b26006003149a2c8a4c301f801c0052000c00e0070018016006901e406cc00e003000c00d20d00fc000c0003003800a4005801c00e003002c00d20c09a0c80d9801c006001801a4101b5881380018000600700148013003801c006005801a410100078001801c006001801a4101001f8001800060070014801b0038018096007001800600690404002600060001801c0052008c00e006025801c006001801a41209d8001800060070014802b003801c006005801a410112f501b3003800c00300348202b7881300030000c00e00290066007003800c00b003482032ad7b806036403060070014803b00380180960003003800a4021801c00e003002c00d20f40380d9801c006001801a41403f800100a0c00e0029009600f0030078040c00e002900a600f003800c00b003301c483403e01a600700180060066038904801e00060001801c0052016c01e00600f801c006001801980ca402900e30000c00e002901060070030128060c00e00290116007003800c00b003483c0ba03660070018006006906432e00040283003800a40498003003800a404d802c00e00f003800c00b003301c480cb0003003800c003003301c4802b00030001801c01e0070018016006603890605c0160006007001800600660389048276000600030000c00e0029014600b003801c00c04b003800c00300348203a2489b00030001801c00e006025801c006001801a4101b11dc2df80018000c0003003800a4055802c00e007003012c00e003000c00d2080b8b872c000c0006007003801809600700180060069040607e4155016000600030000c00e00290166007003012c00e003000c00d2080c001c000c0003003800a405d801c00e003002c00d20c80180d9801c006001801a412007800100a0c00e00290186007003014c0006007001480cb0058018016006007801801e00600300403003800a4069802c00c00b003003c00c00f003803c00e003002c00c03f00333023480692028c0004014c00c007003002c00c00b003002c00e00f003800c00b00300f80590052008003003800a406d801c00e003002c00d2000c00d2006c00060070018006006900a600060001801c0052038c00e007001801600690006006901260003003800c003003483281300020141801c005203ac00e006029801c006001801a403d800180006007001480f3003801804e00700180060069040404af3c4e302600060001801c005203ec00e006013801c006001801a4101416f0fd20b80018000600700148103003801c006005801a403501b3003800c0030034812b00030000c00e0029021600f003800c00a01ac00e003000c00ccc08d20d00f4800b00030000c0000000000803c00c017003800c003003014c00c04b00018000803c00c013003800c00300301380498000803c00c00e004400e00f003800c00b00300bc000802180020070018006006021801808e00030004006005801804e0060158000800c00b00330154805200c400e00300080330004006005801a4001801a410112f58000801c00600901160008019807240118002007001800600690404a75ee01e00060008018026000801803e000300d48010c03520c80130074800a0030028048c011200a800c00b0034800b0000c01d2002300448050c0312008300b48000c029200630094804a00690006000300748008c0192066300a2233335573e00250002801994004d55ce800cd55cf0008d5d08014c00cd5d10011263009222532900389800a4d2219002912c80344c01526910c80148964cc04cdd68010034564cc03801400626601800e0071801226601800e01518010096400a3000910c008600444002600244004a664600200244246466004460044460040064600444600200646a660080080066a00600224446600644b20051800484ccc02600244666ae68cdc3801000c00200500a91199ab9a33710004003000801488ccd5cd19b89002001800400a44666ae68cdc4801000c00a00122333573466e20008006005000912a999ab9a3371200400222002220052255333573466e2400800444008440040026eb400a42660080026eb000a4264666015001229002914801c8954ccd5cd19b8700400211333573466e1c00c006001002118011229002914801c88cc044cdc100200099b82002003245200522900391199ab9a3371066e08010004cdc1001001c002004403245200522900391199ab9a3371266e08010004cdc1001001c00a00048a400a45200722333573466e20cdc100200099b820020038014000912c99807001000c40062004912c99807001000c400a2002001199919ab9a357466ae880048cc028dd69aba1003375a6ae84008d5d1000934000dd60010a40064666ae68d5d1800c0020052225933006003357420031330050023574400318010600a444aa666ae68cdc3a400000222c22aa666ae68cdc4000a4000226600666e05200000233702900000088994004cdc2001800ccdc20010008cc010008004c01088954ccd5cd19b87480000044400844cc00c004cdc300100091119803112c800c60012219002911919806912c800c4c02401a442b26600a004019130040018c008002590028c804c8888888800d1900991111111002a244b267201722222222008001000c600518000001112a999ab9a3370e004002230001155333573466e240080044600823002229002914801c88ccd5cd19b893370400800266e0800800e00100208c8c0040048c0088cc008008005" +} diff --git a/nix/workbench/modules/genesis.nix b/nix/workbench/modules/genesis.nix index cc5d774336c..70088938ce8 100644 --- a/nix/workbench/modules/genesis.nix +++ b/nix/workbench/modules/genesis.nix @@ -292,7 +292,7 @@ in create-staked-args = concatStringsSep " " ([ "--supply ${toString genesis.funds_balance}" - "--gen-utxo-keys 1" + "--gen-utxo-keys ${toString genesis.utxo_keys}" "--gen-genesis-keys ${toString composition.n_bft_hosts}" "--supply-delegated ${toString derived.supply_delegated}" "--gen-pools ${toString composition.n_pools}" @@ -307,7 +307,7 @@ in create-testnet-data-args = concatStringsSep " " [ "--total-supply ${toString genesis.total_supply}" - "--utxo-keys 1" + "--utxo-keys ${toString genesis.utxo_keys}" "--genesis-keys ${toString composition.n_bft_hosts}" "--delegated-supply ${toString derived.supply_delegated}" "--pools ${toString composition.n_pools}" diff --git a/nix/workbench/profile/pparams/delta-voting.jq b/nix/workbench/profile/pparams/delta-voting.jq new file mode 100644 index 00000000000..63622621fad --- /dev/null +++ b/nix/workbench/profile/pparams/delta-voting.jq @@ -0,0 +1,6 @@ +def delta_voting: +{ + "conway": { + "govActionDeposit": 100000 + } +}; diff --git a/nix/workbench/profile/prof0-defaults.jq b/nix/workbench/profile/prof0-defaults.jq index 2aae1263592..b9e5e6e29fd 100644 --- a/nix/workbench/profile/prof0-defaults.jq +++ b/nix/workbench/profile/prof0-defaults.jq @@ -31,6 +31,7 @@ def era_defaults($era): ## UTxO & delegation , per_pool_balance: 1000000000000000 , funds_balance: 10000000000000 + , utxo_keys: 1 , utxo: 0 ## DReps @@ -62,6 +63,8 @@ def era_defaults($era): } } + , workloads: [] + , node: { rts_flags_override: [] , heap_limit: null ## optional: heap limit in MB (translates to RTS flag -M) diff --git a/nix/workbench/profile/prof1-variants.jq b/nix/workbench/profile/prof1-variants.jq index a46a29f9124..9d4f85f666f 100644 --- a/nix/workbench/profile/prof1-variants.jq +++ b/nix/workbench/profile/prof1-variants.jq @@ -392,7 +392,7 @@ def all_profile_variants: | .node.shutdown_on_slot_synced = 1200 ) as $for_1200slot ## - ### Definition vocabulary: workload + ### Definition vocabulary: generator workload ## | ({}| .generator.tps = 15 @@ -426,6 +426,13 @@ def all_profile_variants: { filters: ["size-small"] } }) as $plutus_base + | + ({ extra_desc: "with DRep voting workload" + , generator: + { inputs_per_tx: 1 + , outputs_per_tx: 1 + } + }) as $voting_base | ({ generator: { plutus: @@ -537,6 +544,27 @@ def all_profile_variants: | .generator.tx_fee = 940000 ) as $plutus_loop_ripemd ## + ### Definition vocabulary: custom workloads + ## + | + ({ name: "latency" + , parameters: {} + , entrypoints: { + pre_generator: null + , producers: "latency" + } + , wait_pools: false + }) as $latency_workload + | + ({ name: "voting" + , parameters: {} + , entrypoints: { + pre_generator: "workflow_generator" + , producers: "workflow_producer" + } + , wait_pools: true + }) as $voting_workload + ## ### Definition vocabulary: genesis variants ## | @@ -586,6 +614,13 @@ def all_profile_variants: ({} | .genesis.pparamsOverlays = ["v10-preview"] ) as $costmodel_v10_preview + | + ($genesis_voltaire + | .genesis.pparamsOverlays as $ovls + | .genesis.pparamsOverlays = $ovls + ["v10-preview", "voting"] + | .genesis.utxo_keys = 2 + | .genesis.funds_balance = 40000000000000 + ) as $genesis_voting ## ### Definition vocabulary: node + tracer config variants ## @@ -701,9 +736,6 @@ def all_profile_variants: | ({ scenario: "tracer-only" }) as $scenario_tracer_only - | - ({ scenario: "latency" - }) as $scenario_latency | ## ### Definition vocabulary: base variant @@ -815,12 +847,14 @@ def all_profile_variants: } }) as $nomad_perf_plutussecp_base | - ($scenario_latency * $compose_fiftytwo * $dataset_empty * $no_filtering * + ($compose_fiftytwo * $dataset_empty * $no_filtering * { desc: "AWS perf class cluster, stop when all latency services stop" + , workloads: [ $latency_workload ] }) as $nomad_perf_latency_base | - ($scenario_latency * $compose_fiftytwo * $dataset_empty * $no_filtering * + ($compose_fiftytwo * $dataset_empty * $no_filtering * { desc: "AWS perf-ssd class cluster, stop when all latency services stop" + , workloads: [ $latency_workload ] }) as $nomad_perfssd_latency_base | ($scenario_nomad_perfssd_solo * $solo * $dataset_24m * @@ -936,10 +970,20 @@ def all_profile_variants: ($nomad_perf_base * $nomad_perf_dense * $p2p * $genesis_voltaire ) as $valuevolt_nomadperf_template | + # P&T Nomad cluster: 52 nodes, P2P by default - value+voting workload + # Extra splits, benchmarking from 5th epoch (skip 0,1,2,3 / 533 min / 8.88 hs) + ($nomad_perf_base * $nomad_perf_dense * $p2p * $genesis_voting * + {analysis: { filters: ["epoch5+", "size-full"] } } + ) as $valuevoting_nomadperf_template + | # P&T Nomad cluster: 52 nodes, P2P by default - Plutus workload ($nomad_perf_plutus_base * $nomad_perf_dense * $p2p * $genesis_voltaire ) as $plutusvolt_nomadperf_template | + # P&T Nomad cluster: 52 nodes, P2P by default - plutus+voting workload + ($nomad_perf_plutus_base * $nomad_perf_dense * $p2p * $genesis_voting + ) as $plutusvoting_nomadperf_template + | # P&T Nomad cluster: 52 nodes, P2P by default - PlutusV3 BLST workload ($nomad_perf_plutusv3blst_base * $nomad_perf_dense * $p2p * $genesis_voltaire ) as $plutusv3blst_nomadperf_template @@ -1296,6 +1340,74 @@ def all_profile_variants: { name: "plutus-volt-nomadperf" } +## As "value" above with an extra voting workload + # Split creating 500k UTxO, create the transactions (build-raw) but no submit. + , $valuevoting_nomadperf_template * $dreps_large * + { name: "value-voting-utxo-volt-nomadperf" + , workloads: + [ $voting_workload * {parameters: + { outs_per_split_transaction: 193 + , submit_vote: false + } + }] + } + # One vote per voting tx version. + , $valuevoting_nomadperf_template * $dreps_large * + { name: "value-voting-volt-nomadperf" + , workloads: + [ $voting_workload * {parameters: + { outs_per_split_transaction: 193 + , submit_vote: true + , votes_per_tx: 1 + } + }] + } + # Two votes per voting tx version. + , $valuevoting_nomadperf_template * $dreps_large * + { name: "value-voting-double-volt-nomadperf" + , workloads: + [ $voting_workload * {parameters: + { outs_per_split_transaction: 193 + , submit_vote: true + , votes_per_tx: 2 + } + }] + } + +## As "plutus" above with an extra voting workload + # Split creating 500k UTxO, create the transactions (build-raw) but no submit. + , $plutusvoting_nomadperf_template * $dreps_large * + { name: "plutus-voting-utxo-volt-nomadperf" + , workloads: + [ $voting_workload * {parameters: + { outs_per_split_transaction: 193 + , submit_vote: false + } + }] + } + # One vote per voting tx version. + , $plutusvoting_nomadperf_template * $dreps_large * + { name: "plutus-voting-volt-nomadperf" + , workloads: + [ $voting_workload * {parameters: + { outs_per_split_transaction: 193 + , submit_vote: true + , votes_per_tx: 1 + } + }] + } + # Two votes per voting tx version. + , $plutusvoting_nomadperf_template * $dreps_large * + { name: "plutus-voting-double-volt-nomadperf" + , workloads: + [ $voting_workload * {parameters: + { outs_per_split_transaction: 193 + , submit_vote: true + , votes_per_tx: 2 + } + }] + } + ## P&T Nomad cluster: 52 nodes, PlutusV3 BLST and Plutus SECP workloads , $plutusv3blst_nomadperf_template * { name: "plutusv3-blst-nomadperf" @@ -1530,6 +1642,18 @@ def all_profile_variants: { name: "chainsync-early-alonzo-p2p" } + ## development profile for voting workload: PV9, Conway costmodel, 1000 DReps injected + , $scenario_fixed_loaded * $doublet * $dataset_miniature * $for_3ep * $no_filtering * $voting_base * $double_plus_tps_saturation_plutus * $genesis_voting * $dreps_small * + { name: "development-voting" + , workloads: + [ $voting_workload * {parameters: + { outs_per_split_transaction: 193 + , submit_vote: true + , votes_per_tx: 2 + } + }] + } + ## Last, but not least, the profile used by "nix-shell -A devops": , { name: "devops" , scenario: "idle" diff --git a/nix/workbench/profile/prof2-pparams.jq b/nix/workbench/profile/prof2-pparams.jq index b068acd17b6..042ac6001f4 100644 --- a/nix/workbench/profile/prof2-pparams.jq +++ b/nix/workbench/profile/prof2-pparams.jq @@ -7,6 +7,7 @@ import "delta-blocksizes" as blocksizes; import "delta-v8-preview" as v8preview; import "delta-v9-preview" as v9preview; import "delta-v10-preview" as v10preview; +import "delta-voting" as voting; def filterMapPParams(flt; map): timeline::epochs @@ -32,6 +33,7 @@ def overlays: , "v9-preview": v9preview::delta , "v10-preview": v10preview::delta , "blocksize64k": blocksizes::delta_64kblocks + , "voting": voting::delta_voting }; def pParamsWithOverlays(epoch; overlay_names): diff --git a/nix/workbench/profile/profile.nix b/nix/workbench/profile/profile.nix index b23795282ea..839194d5024 100644 --- a/nix/workbench/profile/profile.nix +++ b/nix/workbench/profile/profile.nix @@ -54,6 +54,19 @@ let }) generator-service; + workloads-service = builtins.map (workload: rec { + name = workload.name; + start = rec { + value = '' + ${import ../workload/${name}.nix + {inherit pkgs profile nodeSpecs workload;} + } + ${workload.entrypoints.producers} + ''; + JSON = pkgs.writeScript "startup-${name}.sh" value; + }; + }) profile.workloads; + inherit (pkgs.callPackage ../service/tracer.nix @@ -70,12 +83,6 @@ let inherit backend profile nodeSpecs; }) healthcheck-service; - - inherit - (pkgs.callPackage - ../service/latency.nix - {}) - latency-service; }; materialise-profile = @@ -95,9 +102,9 @@ let }) node-services generator-service + workloads-service tracer-service healthcheck-service - latency-service ; in pkgs.runCommand "workbench-profile-${profileName}" @@ -125,6 +132,11 @@ let plutus-redeemer = plutus-redeemer.JSON; plutus-datum = plutus-datum.JSON; }; + workloadsService = __toJSON (builtins.map (workload: { + name = workload.name; + start = workload.start.JSON; + } + ) workloads-service); tracerService = with tracer-service; __toJSON @@ -138,19 +150,13 @@ let { name = "healthcheck"; start = start.JSON; }; - latencyService = - with healthcheck-service; - __toJSON - { name = "latency"; - start = start.JSON; - }; passAsFile = [ "nodeServices" "generatorService" + "workloadsService" "tracerService" "healthcheckService" - "latencyService" "topologyJson" "topologyDot" ]; @@ -163,9 +169,9 @@ let cp $topologyDotPath $out/topology.dot cp $nodeServicesPath $out/node-services.json cp $generatorServicePath $out/generator-service.json + cp $workloadsServicePath $out/workloads-service.json cp $tracerServicePath $out/tracer-service.json cp $healthcheckServicePath $out/healthcheck-service.json - cp $latencyServicePath $out/latency-service.json '' // ( @@ -180,7 +186,7 @@ let value = (__fromJSON (__readFile "${topologyFiles}/topology.json")); }; node-specs = {JSON = nodeSpecsJson; value = nodeSpecs;}; - inherit node-services generator-service tracer-service healthcheck-service latency-service; + inherit node-services generator-service tracer-service healthcheck-service workloads-service; } ) ; diff --git a/nix/workbench/run.sh b/nix/workbench/run.sh index d131f6b2f2e..3150c3c089a 100644 --- a/nix/workbench/run.sh +++ b/nix/workbench/run.sh @@ -569,6 +569,8 @@ EOF cp "$dir"/genesis/genesis-shelley.json "$dir"/genesis-shelley.json cp "$dir"/genesis/genesis.alonzo.json "$dir"/genesis.alonzo.json echo >&2 + ## Add global_basedir Voltaire Plutus guardrails script + cp "$global_basedir"/genesis/guardrails-script.plutus "$dir"/genesis/ ## 8. deploy genesis progress "run | genesis" "deploying.." diff --git a/nix/workbench/scenario.sh b/nix/workbench/scenario.sh index 6ea13f2b992..3076da2c30e 100644 --- a/nix/workbench/scenario.sh +++ b/nix/workbench/scenario.sh @@ -71,28 +71,18 @@ case "$op" in ############ backend start-nodes "$dir" backend start-generator "$dir" + backend start-workloads "$dir" backend start-healthchecks "$dir" - scenario_setup_workload_termination "$dir" - # Trap end - ########## - - backend wait-pools-stopped "$dir" - scenario_cleanup_termination - - backend stop-all "$dir" - ;; - - latency ) - - scenario_setup_exit_trap "$dir" - # Trap start - ############ - backend start-nodes "$dir" - backend start-latencies "$dir" + if jqtest '.workloads == []' "$dir"/profile.json \ + || jqtest '.workloads | any(.wait_pools)' "$dir"/profile.json + then + scenario_setup_workload_termination "$dir" + backend wait-pools-stopped "$dir" + else + backend wait-workloads-stopped "$dir" + fi # Trap end ########## - - backend wait-latencies-stopped "$dir" scenario_cleanup_termination backend stop-all "$dir" diff --git a/nix/workbench/service/generator.nix b/nix/workbench/service/generator.nix index 3f347c2a643..bdbf52bbc90 100644 --- a/nix/workbench/service/generator.nix +++ b/nix/workbench/service/generator.nix @@ -129,6 +129,43 @@ let value = '' #!${pkgs.stdenv.shell} + ########################################### + # Extra workloads start ################### + ########################################### + ${builtins.concatStringsSep "" (builtins.map (workload: + let workload_name = workload.name; + entrypoint = workload.entrypoints.pre_generator; + node_name = if profile.composition.with_explorer + then "explorer" + else "node-0" + ; + in + '' + ########################################### + ########## workload start: ${workload_name} + ########################################### + ${if entrypoint != null + then + '' + ${import ../workload/${workload_name}.nix + {inherit pkgs profile nodeSpecs workload;} + } + (cd ../workloads/${workload_name} && ${entrypoint} ${node_name}) + '' + else + '' + '' + } + ########################################### + ########## workload end: ${workload_name} + ########################################### + '' + ) (profile.workloads or [])) + } + ############################################# + # Extra workloads end ####################### + ############################################# + ${service.script} ''; JSON = pkgs.writeScript "startup-generator.sh" value; diff --git a/nix/workbench/service/healthcheck.nix b/nix/workbench/service/healthcheck.nix index 54e396039c6..5cb09c8f9bd 100644 --- a/nix/workbench/service/healthcheck.nix +++ b/nix/workbench/service/healthcheck.nix @@ -53,12 +53,14 @@ let active_slots="$(${jq}/bin/jq --null-input -r \ "''${epoch_length} * ''${active_slots_coeff}" \ )" + with_explorer="$(${jq}/bin/jq .composition.with_explorer ../profile.json)" ${coreutils}/bin/echo "profile.json:" ${coreutils}/bin/echo "- network_magic: ''${network_magic}" ${coreutils}/bin/echo "- slot_duration: ''${slot_duration}" ${coreutils}/bin/echo "- epoch_length: ''${epoch_length}" ${coreutils}/bin/echo "- active_slots_coeff: ''${active_slots_coeff}" ${coreutils}/bin/echo "- active_slots: ''${active_slots}" + ${coreutils}/bin/echo "- with_explorer: ''${with_explorer}" # Fetch all defined node names (Including "explorer" nodes) ########################################################### @@ -122,9 +124,6 @@ let for node in ''${nodes[*]} do - # TODO: A couple of simple pings - # latency_topology_producers "''${node}" - # Cardano cluster connectivity (cardano-ping) connectivity_topology_producers "''${node}" @@ -202,8 +201,6 @@ let # Network functions ################################################## ###################################################################### - # TODO: latency_topology_producers "''${node}" - function connectivity_topology_producers() { local node=$1 msg "Connectivity using 'cardano-cli ping' of \"''${node}\"'s Producers" diff --git a/nix/workbench/service/latency.nix b/nix/workbench/service/latency.nix deleted file mode 100644 index c00d0256300..00000000000 --- a/nix/workbench/service/latency.nix +++ /dev/null @@ -1,144 +0,0 @@ -{ pkgs }: - -with pkgs.lib; - -let - latency-service = - (nodeSpecs: - let - bashInteractive = pkgs.bashInteractive; - coreutils = pkgs.coreutils; - iputils = pkgs.iputils; - jq = pkgs.jq; - in { - start = rec { - # Assumptions: - # - Command `date` and node's log use the same timezone! - value = '' - #!${bashInteractive}/bin/sh - - ###################################################################### - # Set script globals ################################################# - ###################################################################### - - # Strict runtime - ################ - - # e: Immediately exit if any command has a non-zero exit status - # u: Reference to non previously defined variables is an error - # pipefail: Any failed command in a pipeline is used as return code - set -euo pipefail - - # Fetch all defined node names (Including "explorer" nodes) - ########################################################### - - node_specs_nodes=$(${jq}/bin/jq --raw-output \ - "keys | join (\" \")" \ - ../node-specs.json \ - ) - node_specs_pools=$(${jq}/bin/jq \ - 'map(select(.kind == "pool")) | length' \ - ../node-specs.json \ - ) - ${coreutils}/bin/echo "node-specs.json:" - ${coreutils}/bin/echo "- Nodes: [''${node_specs_nodes[*]}]" - ${coreutils}/bin/echo "- Pools: ''${node_specs_pools}" - - # Look for locally deployed nodes and allocate latency - ###################################################### - - nodes=() - started_time=$(${coreutils}/bin/date +%s) - for node in ''${node_specs_nodes[*]} - do - if test -d "../''${node}" - then - nodes+=("''${node}") - # Create latency directory inside available node directories - ${coreutils}/bin/mkdir "../''${node}/latency" - # Save the starting time - ${coreutils}/bin/echo "''${started_time}" > "../''${node}/latency/start_time" - fi - done - ${coreutils}/bin/echo "Found deployed nodes:" - ${coreutils}/bin/echo "- Nodes: [''${nodes[*]}]" - - ###################################################################### - # Main ############################################################### - ###################################################################### - - # The main function, called at the end of the file/script. - function latency() { - - msg "Started!" - - for node in ''${nodes[*]} - do - - msg "Latency of \"''${node}\"'s Producers using 'ping'" - local topology_path="../''${node}/topology.json" - # Merge non-P2P and P2P in the same {addr:"ADDR",port:0} format. - local producers - producers=$(${jq}/bin/jq '.Producers//[] + ((.localRoots[0].accessPoints//[]) | map({addr:.address,port:.port}))' "''${topology_path}") - local keys - keys=$(echo "''${producers}" | ${jq}/bin/jq --raw-output 'keys | join (" ")') - for key in ''${keys[*]} - do - - local host - host=$(echo "''${producers}" | ${jq}/bin/jq --raw-output ".[''${key}].addr") - local port - port=$(echo "''${producers}" | ${jq}/bin/jq --raw-output ".[''${key}].port") - - # If the ping fails the whole script must fail! - - msg "Executing 'ping' to \"''${host}:''${port}\"" - ${coreutils}/bin/sleep 60 - ${iputils}/bin/ping -D -c 60 -i 1 -n -O "''${host}" - ${coreutils}/bin/sleep 60 - ${iputils}/bin/ping -D -c 60 -i 1 -n -O "''${host}" - ${coreutils}/bin/sleep 60 - ${iputils}/bin/ping -D -c 60 -i 1 -n -O "''${host}" - ${coreutils}/bin/sleep 60 - ${iputils}/bin/ping -D -c 60 -i 1 -n -O "''${host}" - - msg "Executing 'ping' to \"''${host}:''${port}\" with size 16" - ${coreutils}/bin/sleep 60 - ${iputils}/bin/ping -D -c 60 -i 1 -n -O -s 16 "''${host}" - ${coreutils}/bin/sleep 60 - ${iputils}/bin/ping -D -c 60 -i 1 -n -O -s 16 "''${host}" - ${coreutils}/bin/sleep 60 - ${iputils}/bin/ping -D -c 60 -i 1 -n -O -s 16 "''${host}" - ${coreutils}/bin/sleep 60 - ${iputils}/bin/ping -D -c 60 -i 1 -n -O -s 16 "''${host}" - - msg "Executing 'ping' to \"''${host}:''${port}\" with size 65507" - ${coreutils}/bin/sleep 60 - ${iputils}/bin/ping -D -c 60 -i 1 -n -O -s 65507 "''${host}" - ${coreutils}/bin/sleep 60 - ${iputils}/bin/ping -D -c 60 -i 1 -n -O -s 65507 "''${host}" - ${coreutils}/bin/sleep 60 - ${iputils}/bin/ping -D -c 60 -i 1 -n -O -s 65507 "''${host}" - ${coreutils}/bin/sleep 60 - ${iputils}/bin/ping -D -c 60 -i 1 -n -O -s 65507 "''${host}" - - done - - done - } - - function msg { - # Outputs to stdout, unbuffered if not the message may be lost! - ${coreutils}/bin/stdbuf -o0 \ - ${bashInteractive}/bin/sh -c \ - "${coreutils}/bin/echo -e \"$(${coreutils}/bin/date --rfc-3339=seconds): $1\"" - } - - latency $@ - ''; - JSON = pkgs.writeScript "startup-latency.sh" value; - }; - }) - nodeSpecs; -in - { inherit latency-service; } diff --git a/nix/workbench/workload/latency.nix b/nix/workbench/workload/latency.nix new file mode 100644 index 00000000000..70be055d360 --- /dev/null +++ b/nix/workbench/workload/latency.nix @@ -0,0 +1,133 @@ +{ pkgs +, profile +, nodeSpecs +, workload +}: + +with pkgs.lib; + +let + bashInteractive = pkgs.bashInteractive; + coreutils = pkgs.coreutils; + iputils = pkgs.iputils; + jq = pkgs.jq; +# Assumptions: +# - Command `date` and node's log use the same timezone! +in '' +#!${bashInteractive}/bin/sh + +###################################################################### +# Set script globals ################################################# +###################################################################### + +# Strict runtime +################ + +# e: Immediately exit if any command has a non-zero exit status +# u: Reference to non previously defined variables is an error +# pipefail: Any failed command in a pipeline is used as return code +set -euo pipefail + +# Fetch all defined node names (Including "explorer" nodes) +########################################################### + +node_specs_nodes=$(${jq}/bin/jq --raw-output \ + "keys | join (\" \")" \ + ../../node-specs.json \ +) +node_specs_pools=$(${jq}/bin/jq \ + 'map(select(.kind == "pool")) | length' \ + ../../node-specs.json \ +) +${coreutils}/bin/echo "node-specs.json:" +${coreutils}/bin/echo "- Nodes: [''${node_specs_nodes[*]}]" +${coreutils}/bin/echo "- Pools: ''${node_specs_pools}" + +# Look for locally deployed nodes and allocate latency +###################################################### + +nodes=() +started_time=$(${coreutils}/bin/date +%s) +for node in ''${node_specs_nodes[*]} +do + if test -d "../../''${node}" + then + nodes+=("''${node}") + # Save the starting time + ${coreutils}/bin/echo "''${started_time}" > "../../workloads/latency/start_time_''${node}" + fi +done +${coreutils}/bin/echo "Found deployed nodes:" +${coreutils}/bin/echo "- Nodes: [''${nodes[*]}]" + +###################################################################### +# Main ############################################################### +###################################################################### + +# The main function, called at the end of the file/script. +function latency() { + + msg "Started!" + + for node in ''${nodes[*]} + do + + msg "Latency of \"''${node}\"'s Producers using 'ping'" + local topology_path="../../''${node}/topology.json" + # Merge non-P2P and P2P in the same {addr:"ADDR",port:0} format. + local producers + producers=$(${jq}/bin/jq '.Producers//[] + ((.localRoots[0].accessPoints//[]) | map({addr:.address,port:.port}))' "''${topology_path}") + local keys + keys=$(echo "''${producers}" | ${jq}/bin/jq --raw-output 'keys | join (" ")') + for key in ''${keys[*]} + do + + local host + host=$(echo "''${producers}" | ${jq}/bin/jq --raw-output ".[''${key}].addr") + local port + port=$(echo "''${producers}" | ${jq}/bin/jq --raw-output ".[''${key}].port") + + # If the ping fails the whole script must fail! + + msg "Executing 'ping' to \"''${host}:''${port}\"" + ${coreutils}/bin/sleep 60 + ${iputils}/bin/ping -D -c 60 -i 1 -n -O "''${host}" + ${coreutils}/bin/sleep 60 + ${iputils}/bin/ping -D -c 60 -i 1 -n -O "''${host}" + ${coreutils}/bin/sleep 60 + ${iputils}/bin/ping -D -c 60 -i 1 -n -O "''${host}" + ${coreutils}/bin/sleep 60 + ${iputils}/bin/ping -D -c 60 -i 1 -n -O "''${host}" + + msg "Executing 'ping' to \"''${host}:''${port}\" with size 16" + ${coreutils}/bin/sleep 60 + ${iputils}/bin/ping -D -c 60 -i 1 -n -O -s 16 "''${host}" + ${coreutils}/bin/sleep 60 + ${iputils}/bin/ping -D -c 60 -i 1 -n -O -s 16 "''${host}" + ${coreutils}/bin/sleep 60 + ${iputils}/bin/ping -D -c 60 -i 1 -n -O -s 16 "''${host}" + ${coreutils}/bin/sleep 60 + ${iputils}/bin/ping -D -c 60 -i 1 -n -O -s 16 "''${host}" + + msg "Executing 'ping' to \"''${host}:''${port}\" with size 65507" + ${coreutils}/bin/sleep 60 + ${iputils}/bin/ping -D -c 60 -i 1 -n -O -s 65507 "''${host}" + ${coreutils}/bin/sleep 60 + ${iputils}/bin/ping -D -c 60 -i 1 -n -O -s 65507 "''${host}" + ${coreutils}/bin/sleep 60 + ${iputils}/bin/ping -D -c 60 -i 1 -n -O -s 65507 "''${host}" + ${coreutils}/bin/sleep 60 + ${iputils}/bin/ping -D -c 60 -i 1 -n -O -s 65507 "''${host}" + + done + + done +} + +function msg { + # Outputs to stdout, unbuffered if not the message may be lost! + ${coreutils}/bin/stdbuf -o0 \ + ${bashInteractive}/bin/sh -c \ + "${coreutils}/bin/echo -e \"$(${coreutils}/bin/date --rfc-3339=seconds): $1\"" +} +'' diff --git a/nix/workbench/workload/voting.nix b/nix/workbench/workload/voting.nix new file mode 100644 index 00000000000..e8e8baa4808 --- /dev/null +++ b/nix/workbench/workload/voting.nix @@ -0,0 +1,1417 @@ +{ pkgs +, profile +, nodeSpecs +, workload +}: + +let + + # Packages + ########## + + bashInteractive = pkgs.bashInteractive; + coreutils = pkgs.coreutils; + jq = pkgs.jq; + cardano-cli = pkgs.cardanoNodePackages.cardano-cli; + + # Script params! + ################ + + testnet_magic = profile.genesis.network_magic; + gov_action_deposit = + if __hasAttr "conway" profile.genesis + then profile.genesis.conway.govActionDeposit + else throw "Conway genesis needed!" + ; + # Where to obtain the genesis funds from. + genesis_funds_vkey = "../../genesis/cache-entry/utxo-keys/utxo2.vkey"; + genesis_funds_skey = "../../genesis/cache-entry/utxo-keys/utxo2.skey"; + # Initial donation from genesis funds to make "valid" withdrawal proposals. + treasury_donation = 500000; + + # Filter producers from "node-specs.json". + producers = + builtins.filter + (nodeSpec: nodeSpec.isProducer) + (builtins.attrValues nodeSpecs) + ; + # Construct an "array" with node producers to use in BASH `for` loops. + producers_bash_array = + "(" + + (builtins.concatStringsSep + " " + (builtins.map + (x: "\"" + x.name + "\"") + producers + ) + ) + + ")" + ; + + # How many constitutions to create with the genesis funds (explorer node). + constitutions_from_genesis = 1; + # To calculate how much funds to leave on nodes' addresses (Prop 0 / DRep 0) + # for the node to create withdrawal proposals (`--governance-action-deposit`). + withdrawal_proposals_per_producer = 1; + + # UTxO preparation / creation / splitting phase. + # When splitting the genesis funds, we first move from `genesis_funds_vkey` to + # a "node address", called proposal-0 / DRep-0, for each producer, and then to + # a "node-proposal address", called proposal-i / DRep-0, for each proposal and + # last split is from each of this last node-proposal addressed to one + # "node-proposal-drep address", called node-i-prop-j-drep-k, for each DRep the + # node has assigned and will use to vote on each proposal. + # Genesis + # | + # ---------------------------------------- + # | | + # node-0 node-1 + # | | + # --------------------- --------------------- + # | | | | + # proposal-1 proposal-2 proposal-1 proposal-2 + # | | | | + # ------------- ------------- ------------- ------------- + # | | | | | | | | | | | | + # drep1 drep2 drep3 drep1 drep2 drep3 drep1 drep2 drep3 drep1 drep2 drep3 + # + # Helper with the total number of producers. + producers_count = builtins.length producers; + # Helper with the total number of proposals created. + proposals_count = + constitutions_from_genesis + + producers_count * withdrawal_proposals_per_producer + ; + dreps_per_producer = builtins.floor (profile.genesis.dreps / producers_count); + # Max number of '--tx-out' when splitting funds. + outs_per_split_transaction = + workload.parameters.outs_per_split_transaction or 100 + ; + + # Sleeps. + # Used when splitting funds to wait for funds to arrive, as this initial funds + # are sent from a different process (not genesis) this works as a semaphore! + wait_any_utxo_tries = 30; + wait_any_utxo_sleep = 10; # 5 minutes in 10s steps. + # Used when splitting funds, it waits for the expected UTxO to arrive to the + # change-address and re-submits the transaction if necessary! + wait_utxo_id_tries = 18; + wait_utxo_id_sleep = 10; # 3 minutes in 10s steps. + funds_submit_tries = 3; # Submit the transaction up to this times on timeout. + # Used when waiting for the recently created proposal. + wait_proposal_id_tries = 30; + wait_proposal_id_sleep = 10; # 5 minutes + # Use to wait for all proposals to be available before we start voting. + # As nodes will end their splitting phases at different times, this parameters + # work as a formation lap before race start =). + # No tries, waits forever! + wait_proposals_count_sleep = 10; + + # No decimals also needed because of how the Bash script treats this number. + votes_per_tx = builtins.ceil (workload.parameters.votes_per_tx or 1); + # The most important one. To calculate and achieve a predictable TPS. + # For reference: + ### A local 2-node cluster with no tx-generator, max TPS was: + ###### node-0: 5.356377670184436 + ###### node-1: 5.384256440453143 + ###### cluster: 10.443500635764893 + ### A 52-node Nomad cluster with value+voting: + ###### One third of the nodes couldn't achieve 0.08 TPS (per node), this is + ###### ~4 TPS cluster wide, when the requested TPS was 0.096 per node. + desired_cluster_average_tps = 3.0; # Add decimals!!!!! + # VOTES = TIME * TPS + desired_producer_tps = desired_cluster_average_tps / producers_count; + desired_producer_sleep = 1.0 / desired_producer_tps; # Add decimals!!!!! + + # Script behavior + create_proposals = true; + build_vote = true; use_build_raw = true; + sign_vote = true; + submit_vote = workload.parameters.submit_vote or true; + wait_submit = workload.parameters.submit_vote or false; + +in '' + +# producers_count: ${toString producers_count} +# proposals_count: ${toString proposals_count} +# dreps_per_producer: ${toString dreps_per_producer} +# votes_per_tx: ${toString votes_per_tx} +# desired_cluster_average_tps: ${toString desired_cluster_average_tps} +# desired_producer_tps: ${toString desired_producer_tps} +# desired_producer_sleep: ${toString desired_producer_sleep} + +################################################################################ +# Give a node name ("node-0", "explorer", etc) returns the node's socket path. +################################################################################ +function get_socket_path { + + # Function arguments. + local node_str=$1 # node name / folder to find the socket. + + local socket_path="../../''${node_str}/node.socket" + ${coreutils}/bin/echo "''${socket_path}" +} + +################################################################################ +# Given a "tx.signed" returns a JSON object that has as "tx_id" and "tx_ix" +# properties the TxHash#TxIx of the FIRST occurrence of the provided address in +# its "outputs" and in the "value" property the lovelace it will contain. +# For example: {"tx_id":"0000000000", "tx_ix": 0, "value":123456}. +# If NO ADDRESS IS SUPPLIED as argument to this function we use/assume the last +# output is the change address (--change-address) and you want to use that one +# to calculate a future expected UTxO. +################################################################################ +function calculate_next_utxo { + + # Function arguments. + local tx_signed=$1 + local addr=''${2:-null} + + local tx_id + # Prints a transaction identifier. + tx_id="$( \ + ${cardano-cli}/bin/cardano-cli conway transaction txid \ + --tx-file "''${tx_signed}" \ + )" + # View transaction as JSON and get index of FIRST output containing "$addr". + ${cardano-cli}/bin/cardano-cli debug transaction view \ + --output-json \ + --tx-file "''${tx_signed}" \ + | ${jq}/bin/jq --raw-output \ + --argjson tx_id "\"''${tx_id}\"" \ + --argjson addr "\"''${addr}\"" \ + ' + ( + if $addr == null or $addr == "null" + then + (.outputs | length - 1) + else + ( + .outputs + | map(.address == $addr) + | index(true) + ) + end + ) as $tx_ix + | { "tx_id": $tx_id + , "tx_ix": $tx_ix + , "value": ( .outputs[$tx_ix].amount.lovelace ) + } + ' +} + +################################################################################ +# Store the pre-calculated "cached" future UTxO of this address. +# (Only useful if an address is always used from the same node/socket/path). +################################################################################ +function store_address_utxo_expected { + + # Function arguments. + local tx_signed=$1 + local addr=$2 + + local utxo_file=./addr."''${addr}".json # Store in workload's directory! + calculate_next_utxo \ + "''${tx_signed}" \ + "''${addr}" \ + > "''${utxo_file}" +} + +################################################################################ +# Get pre-calculated "cached" future UTxO TxHash#TxIx suitable to use as a part +# of a "--tx-in" argument. Returns an "empty" string if not available. +# (Only useful if an address is always used from the same node/socket/path). +################################################################################ +function get_address_utxo_expected_id { + + # Function arguments. + local addr=$1 + + local utxo_file=./addr."''${addr}".json # Store in workload's directory! + if test -f "''${utxo_file}" + then + ${jq}/bin/jq --raw-output \ + '( .tx_id + "#" + (.tx_ix | tostring) )' \ + "''${utxo_file}" + fi +} + +################################################################################ +# Get pre-calculated "cached" future UTxO lovelace amount suitable to use as +# part of a "--tx-in" argument. Returns an "empty" string if not available. +# (This only works if an address is always used from the same node/socket/path). +################################################################################ +function get_address_utxo_expected_value { + + # Function arguments. + local addr=$1 + + local utxo_file=./addr."''${addr}".json # Store in workload's directory! + if test -f "''${utxo_file}" + then + ${jq}/bin/jq --raw-output '.value' "''${utxo_file}" + fi +} + +################################################################################ +# Give a "tx.signed" filepath returns "true" or "false". +# Not to be run during the benchmarking phase: lots of queries! +################################################################################ +function is_tx_in_mempool { + + # Function arguments. + local node_str=$1 # node name / folder where to store the files. + local tx_signed=$2 + + # Only defined in functions that use it. + local socket_path + socket_path="$(get_socket_path "''${node_str}")" + + local tx_id + tx_id="$( \ + ${cardano-cli}/bin/cardano-cli conway transaction txid \ + --tx-file "''${tx_signed}" \ + )" + ${cardano-cli}/bin/cardano-cli conway query tx-mempool \ + tx-exists "''${tx_id}" \ + --testnet-magic ${toString testnet_magic} \ + --socket-path "''${socket_path}" \ + | ${jq}/bin/jq --raw-output \ + .exists +} + +################################################################################ +# Function to submit the funds-splitting tx and retry if needed. +# Not to be run during the benchmarking phase: lots of queries! +################################################################################ +function funds_submit_retry { + + # Function arguments. + local node_str=$1 # node name / folder to find the socket to use. + local tx_signed=$2 # tx to send and maybe re-send. + local addr=$3 # Address to wait for (UTxO id must be cached). + + # Only defined in functions that use it. + local socket_path + socket_path="$(get_socket_path "''${node_str}")" + + local utxo_id + utxo_id="$(get_address_utxo_expected_id "''${addr}")" + + local contains_addr="false" + local submit_tries=${toString funds_submit_tries} + while test ! "''${contains_addr}" = "true" + do + if test "''${submit_tries}" -le 0 + then + # Time's up! + ${coreutils}/bin/echo "funds_submit_retry: Timeout waiting for: ''${addr} - ''${utxo_id}" + exit 1 + else + + # Some debugging. + ${coreutils}/bin/echo "funds_submit_retry: submit: ''${tx_signed} (''${submit_tries})" + + # (Re)Submit transaction ignoring errors. + ${cardano-cli}/bin/cardano-cli conway transaction submit \ + --testnet-magic ${toString testnet_magic} \ + --socket-path "''${socket_path}" \ + --tx-file "''${tx_signed}" \ + || true + submit_tries="$((submit_tries - 1))" + + # Wait for the transaction to NOT be in the mempool anymore + local in_mempool="true" + while test ! "''${in_mempool}" = "false" + do + ${coreutils}/bin/sleep 1 + in_mempool="$(is_tx_in_mempool "''${node_str}" "''${tx_signed}")" + done + + # Some loops to see if the expected UTxO of this address appears. + local utxo_tries=${toString wait_utxo_id_tries} + while test ! "''${contains_addr}" = "true" && test "''${utxo_tries}" -gt 0 + do + ${coreutils}/bin/sleep ${toString wait_utxo_id_sleep} + # Some debugging. + ${coreutils}/bin/echo "funds_submit_retry: wait_utxo_id: ''${utxo_id} (''${utxo_tries})" + contains_addr="$( \ + ${cardano-cli}/bin/cardano-cli conway query utxo \ + --testnet-magic ${toString testnet_magic} \ + --socket-path "''${socket_path}" \ + --address "''${addr}" \ + --output-json \ + | ${jq}/bin/jq --raw-output \ + --argjson utxo_id "\"''${utxo_id}\"" \ + 'keys | any(. == $utxo_id) // false' \ + )" + utxo_tries="$((utxo_tries - 1))" + done + + fi + done + +} + +################################################################################ +# Evenly split the first UTxO of this key to the addresses in the array! +# Does it in batchs so we don't exceed "maxTxSize" of 16384. +# Stores the future UTxOs of all addresses in files for later references. +# Not to be run during the benchmarking phase: waits for funds between batchs! +################################################################################ +function funds_from_to { + + # Function arguments. + local node_str=''${1}; shift # node name / folder to find the socket to use. + local utxo_vkey=''${1}; shift # In + local utxo_skey=''${1}; shift # In + local reminder=''${1}; shift # Funds to keep in the origin address. + local donation=''${1}; shift # To treasury. + local addrs_array=("$@") # Outs + + # Only defined in functions that use it. + local socket_path + socket_path="$(get_socket_path "''${node_str}")" + + # Get the "in" address and its first UTxO only once we have the lock. + local funds_addr + funds_addr="$( \ + ${cardano-cli}/bin/cardano-cli address build \ + --testnet-magic ${toString testnet_magic} \ + --payment-verification-key-file "''${utxo_vkey}" \ + )" + # This three only needed for the first batch and to calculate funds per node. + local funds_json funds_tx funds_lovelace + funds_json="$( \ + ${cardano-cli}/bin/cardano-cli conway query utxo \ + --testnet-magic ${toString testnet_magic} \ + --socket-path "''${socket_path}" \ + --address "''${funds_addr}" \ + --output-json \ + )" + funds_tx="$( \ + ${coreutils}/bin/echo "''${funds_json}" \ + | ${jq}/bin/jq -r \ + 'keys[0]' \ + )" + funds_lovelace="$( \ + ${coreutils}/bin/echo "''${funds_json}" \ + | ${jq}/bin/jq -r \ + --arg keyName "''${funds_tx}" \ + '.[$keyName].value.lovelace' \ + )" + + # Calculate how much lovelace for each output address. + local outs_count per_out_lovelace + outs_count="''${#addrs_array[@]}" + ### HACK: Fees! Always using 550000!!! + ### With 2 outputs: "Estimated transaction fee: 172233 Lovelace" + ### With 10 outputs: "Estimated transaction fee: 186665 Lovelace" + ### With 53 outputs: "Estimated transaction fee: 264281 Lovelace" + ### With 150 outputs: "Estimated transaction fee: 439357 Lovelace" + ### With 193 outputs: "Estimated transaction fee: 516929 Lovelace" + per_out_lovelace="$( \ + ${jq}/bin/jq -r --null-input \ + --argjson numerator "''${funds_lovelace}" \ + --argjson denominator "''${outs_count}" \ + --argjson reminder "''${reminder}" \ + --argjson donation "''${donation}" \ + '( + ( $numerator + - $reminder + - $donation + - ( 550000 + * ( ($denominator / ${toString outs_per_split_transaction}) | ceil ) + ) + ) + / $denominator + | round + )' \ + )" + + # Split the funds in batchs (donations only happen in the first batch). + local i=0 + local txOuts_args_array=() txOuts_addrs_array=() + local batch=${toString outs_per_split_transaction} + local tx_in tx_filename + local treasury_donation_args_array=() + for addr in "''${addrs_array[@]}" + do + i="$((i + 1))" + # Build the "--tx-out" arguments array of this batch. + txOuts_args_array+=("--tx-out") + txOuts_args_array+=("''${addr}+''${per_out_lovelace}") + txOuts_addrs_array+=("''${addr}") + + # We send if last addr in the for loop or batch max exceeded. + if test "$i" -ge "''${#addrs_array[@]}" || test "$i" -ge "$batch" + then + if test "$batch" -eq ${toString outs_per_split_transaction} + then + # First transaction. + # The input comes from the function arguments. + tx_in="''${funds_tx}" + # Treasury donation happens only once. + if ! test "''${donation}" = "0" + then + treasury_donation_args_array=("--treasury-donation" "''${donation}") + fi + else + # Not the first batch. + # The input comes from the last transaction submitted. + # No need to wait for it because the submission function does this! + tx_in="$(get_address_utxo_expected_id "''${funds_addr}")" + # Treasury donation happens only once. + treasury_donation_args_array=() + fi + + # Some debugging! + ${coreutils}/bin/echo "funds_from_to: ''${utxo_vkey} (''${funds_addr}): --tx-in ''${tx_in}" + + # Send this batch to each node! + # Build transaction. + tx_filename=./funds_from_to."''${funds_addr}"."''${i}" + ${cardano-cli}/bin/cardano-cli conway transaction build \ + --testnet-magic ${toString testnet_magic} \ + --socket-path "''${socket_path}" \ + --tx-in "''${tx_in}" \ + ''${txOuts_args_array[@]} \ + ''${treasury_donation_args_array[@]} \ + --change-address "''${funds_addr}" \ + --out-file "''${tx_filename}.raw" + # Sign transaction. + ${cardano-cli}/bin/cardano-cli conway transaction sign \ + --testnet-magic ${toString testnet_magic} \ + --signing-key-file "''${utxo_skey}" \ + --tx-body-file "''${tx_filename}.raw" \ + --out-file "''${tx_filename}.signed" + + # Store outs/addresses next UTxO. + for addr_cache in "''${txOuts_addrs_array[@]}" + do + store_address_utxo_expected \ + "''${tx_filename}.signed" \ + "''${addr_cache}" + done + # Without the change address we can't wait for the funds after submission + # or calculate the next input to use if an extra batch is needed! + store_address_utxo_expected \ + "''${tx_filename}.signed" \ + "''${funds_addr}" + + # Submit transaction and wait for settlement. + funds_submit_retry \ + "''${node_str}" \ + "''${tx_filename}.signed" \ + "''${funds_addr}" + + # Reset variables for next batch iteration. + txOuts_args_array=() txOuts_addrs_array=() + batch="$((batch + ${toString outs_per_split_transaction}))" + fi + done +} + +################################################################################ +# Waits until the UTxOs of this address are not empty (errors on timeout). +# Not to be run during the benchmarking phase: lots of queries! +################################################################################ +function wait_any_utxo { + + # Function arguments. + local node_str=$1 # node name / folder to find the socket to use. + local addr=$2 + + # Only defined in functions that use it. + local socket_path + socket_path="$(get_socket_path "''${node_str}")" + + local tries=${toString wait_any_utxo_tries} + local utxos_json="{}" + while test "''${utxos_json}" = "{}" + do + if test "''${tries}" -le 0 + then + # Time's up! + ${coreutils}/bin/echo "wait_any_utxo: Timeout waiting for: ''${addr}" + exit 1 + fi + utxos_json="$( \ + ${cardano-cli}/bin/cardano-cli conway query utxo \ + --testnet-magic ${toString testnet_magic} \ + --socket-path "''${socket_path}" \ + --address "''${addr}" \ + --output-json \ + )" + if ! test "''${tries}" = ${toString wait_any_utxo_tries} + then + ${coreutils}/bin/sleep ${toString wait_any_utxo_sleep} + fi + tries="$((tries - 1))" + done +} + +################################################################################ +# Waits until an specific proposal appears or fails. +# Not to be run during the benchmarking phase: lots of queries! +################################################################################ +function wait_proposal_id { + + # Function arguments. + local node_str=$1 # node name / folder to find the socket to use. + local tx_signed=$2 + + # Only defined in functions that use it. + local socket_path + socket_path="$(get_socket_path "''${node_str}")" + + # Get proposal's "txId" from the "--tx-file". + local tx_id + tx_id="$( \ + ${cardano-cli}/bin/cardano-cli conway transaction txid \ + --tx-file "''${tx_signed}" \ + )" + + local contains_proposal="false" + local tries=${toString wait_proposal_id_tries} + while test "''${contains_proposal}" = "false" + do + if test "''${tries}" -le 0 + then + # Time's up! + ${coreutils}/bin/echo "wait_proposal_id: Timeout waiting for: ''${tx_id}" + exit 1 + else + # No "--output-json" needed. + contains_proposal="$( \ + ${cardano-cli}/bin/cardano-cli conway query gov-state \ + --testnet-magic ${toString testnet_magic} \ + --socket-path "''${socket_path}" \ + | ${jq}/bin/jq --raw-output \ + --argjson tx_id "\"''${tx_id}\"" \ + '.proposals | any(.actionId.txId == $tx_id) // false' \ + )" + if ! test "''${tries}" = ${toString wait_proposal_id_tries} + then + ${coreutils}/bin/sleep ${toString wait_proposal_id_sleep} + fi + tries="$((tries - 1))" + fi + done +} + +################################################################################ +# Waits until an specific number of proposals are visible. +# Not to be run during the benchmarking phase: lots of queries! +################################################################################ +function wait_proposals_count { + + # Function arguments. + local node_str=$1 # node name / folder to find the socket to use. + local count=$2 + + # Only defined in functions that use it. + local socket_path + socket_path="$(get_socket_path "''${node_str}")" + + local contains_proposals="false" + while test "''${contains_proposals}" = "false" + do + # No "--output-json" needed. + contains_proposals="$( \ + ${cardano-cli}/bin/cardano-cli conway query gov-state \ + --testnet-magic ${toString testnet_magic} \ + --socket-path "''${socket_path}" \ + | ${jq}/bin/jq --raw-output \ + --argjson count "''${count}" \ + '.proposals | length == $count // false' \ + )" + ${coreutils}/bin/sleep ${toString wait_proposals_count_sleep} + done +} + +################################################################################ +# Hack: Given a node "i" and proposal number and a DRep number create always the +# same address keys. +# Only supports up to 99 nodes, 9999 proposals and 999999 DReps by adding the +# missing Hex chars. +# Returns the file path without the extensions (the ".skey" or ".vkey" part). +################################################################################ +function create_node_prop_drep_key_files { + + # Function arguments. + local node_str=$1 # String for the key file name (not for the socket). + local node_i=$2 # This "i" is part of the node name ("node-i"). + local prop_i=$3 + local drep_i=$4 + + local filename=./"''${node_str}"-prop-"''${prop_i}"-drep-"''${drep_i}" + # Now with the extensions. + local skey="''${filename}".skey + local vkey="''${filename}".vkey + + # Only create if not already there! + if ! test -f "''${vkey}" + then + ${jq}/bin/jq --null-input \ + --argjson node_i "''${node_i}" \ + --argjson prop_i "''${prop_i}" \ + --argjson drep_i "''${drep_i}" \ + ' + {"type": "PaymentSigningKeyShelley_ed25519", + "description": "Payment Signing Key", + "cborHex": ( + "5820b02868d722df021278c78be3b7363759b37f5852b8747b488bab" + + (if $node_i <= 9 + then ("0" + ($node_i | tostring)) + elif $node_i >= 10 and $node_i <= 99 + then ( $node_i | tostring) + else (error ("Node ID above 99")) + end + ) + + (if $prop_i <= 9 + then ( "000" + ($prop_i | tostring)) + elif $prop_i >= 10 and $prop_i <= 99 + then ( "00" + ($prop_i | tostring)) + elif $prop_i >= 100 and $prop_i <= 999 + then ( "0" + ($prop_i | tostring)) + elif $prop_i >= 1000 and $prop_i <= 9999 + then ( ($prop_i | tostring)) + else (error ("Proposal ID above 9999")) + end + ) + + (if $drep_i <= 9 + then ( "00000" + ($drep_i | tostring)) + elif $drep_i >= 10 and $drep_i <= 99 + then ( "0000" + ($drep_i | tostring)) + elif $drep_i >= 100 and $drep_i <= 999 + then ( "000" + ($drep_i | tostring)) + elif $drep_i >= 1000 and $drep_i <= 9999 + then ( "00" + ($drep_i | tostring)) + elif $drep_i >= 10000 and $drep_i <= 99999 + then ( "0" + ($drep_i | tostring)) + elif $drep_i >= 100000 and $drep_i <= 999999 + then ( ($drep_i | tostring)) + else (error ("DRep ID above 999999")) + end + ) + ) + } + ' \ + > "''${skey}" + ${cardano-cli}/bin/cardano-cli conway key verification-key \ + --signing-key-file "''${skey}" \ + --verification-key-file "''${vkey}" + fi + ${coreutils}/bin/echo "''${filename}" +} + +################################################################################ +# Get address of the node-proposal-drep combination! +################################################################################ +function build_node_prop_drep_address { + + # Function arguments. + local node_str=$1 # String for the key file name (not for the socket). + local node_i=$2 # This "i" is part of the node name ("node-i"). + local prop_i=$3 + local drep_i=$4 + + local filename addr + filename="$(create_node_prop_drep_key_files "''${node_str}" "''${node_i}" "''${prop_i}" "''${drep_i}")" + addr="''${filename}.addr" + # Only create if not already there! + if ! test -f "''${addr}" + then + local vkey="''${filename}".vkey + ${cardano-cli}/bin/cardano-cli address build \ + --testnet-magic ${toString testnet_magic} \ + --payment-verification-key-file "''${vkey}" \ + > "''${addr}" + fi + ${coreutils}/bin/cat "''${addr}" +} + +################################################################################ +# Evenly distribute the "utxo_*key" genesis funds to all producer nodes. +# To be called before `governance_funds_producer`. +# See above for a better explanation. +# Not to be run during the benchmarking phase: waits for funds to arrive! +################################################################################ +function governance_funds_genesis { + + # Function arguments. + local node_str=$1 # node name / folder to find the socket to use. + local utxo_vkey=$2 # tx-in + local utxo_skey=$3 # tx-in + local producers=${toString producers_bash_array} + + # Send funds to each node (using DRep ID 0 as a special logical separation). + ${coreutils}/bin/echo "governance_funds_genesis: Node(s) splitting phase! (''${node_str})" + + local action_deposit constitution_reminder + action_deposit="${toString gov_action_deposit}" + # HACK: Plus a fee estimate ("Estimated transaction fee: 172585 Lovelace"). + # Plus "Minimum UTxO threshold: 105986 Lovelace" + # Not more than one split transaction from here (no profile has more than 52 nodes, no batch). + constitution_reminder="$(( (action_deposit + 2000000) * ${toString constitutions_from_genesis} + 200000 ))" + + local producers_addrs_array=() + for producer_name in ''${producers[*]} + do + local producer_i + producer_i="$( \ + ${jq}/bin/jq --raw-output \ + --arg keyName "''${producer_name}" \ + '.[$keyName].i' \ + ../../node-specs.json \ + )" + local producer_addr + # Drep 0 is No DRep (funds for the node). + producer_addr="$(build_node_prop_drep_address "''${producer_name}" "''${producer_i}" 0 0)" + producers_addrs_array+=("''${producer_addr}") + ${coreutils}/bin/echo "governance_funds_genesis: Splitting to: ''${producer_name} - ''${producer_i} - 0 - (''${producer_addr})" + done + + # Split (no need to wait for the funds or re-submit, function takes care)! + funds_from_to \ + "''${node_str}" \ + "''${utxo_vkey}" "''${utxo_skey}" \ + "''${constitution_reminder}" \ + ${toString treasury_donation} \ + "''${producers_addrs_array[@]}" +} + +################################################################################ +# Evenly distribute, for each proposal, producer funds to all producer DReps. +# To be called after `governance_funds_genesis`. +# See above for a better explanation. +# Not to be run during the benchmarking phase: waits for funds to arrive! +################################################################################ +function governance_funds_producer { + + # Function arguments. + local node_str=$1 # node name / folder to find the socket to use. + local producer_name=$2 + + # Send funds to each node-proposal combination. + ${coreutils}/bin/echo "governance_funds_producer: Node(s)-Prop(s) splitting phase! (''${node_str})" + + local producer_i + producer_i="$( \ + ${jq}/bin/jq --raw-output \ + --arg keyName "''${producer_name}" \ + '.[$keyName].i' \ + ../../node-specs.json \ + )" + local producer_addr producer_vkey producer_skey + producer_addr="$(build_node_prop_drep_address "''${producer_name}" "''${producer_i}" 0 0)" + producer_vkey="$(create_node_prop_drep_key_files "''${producer_name}" "''${producer_i}" 0 0)".vkey + producer_skey="$(create_node_prop_drep_key_files "''${producer_name}" "''${producer_i}" 0 0)".skey + + # Wait for initial funds to arrive! + ${coreutils}/bin/echo "governance_funds_producer: Wait for funds: $(${coreutils}/bin/date --rfc-3339=seconds)" + wait_any_utxo "''${node_str}" "''${producer_addr}" + ${coreutils}/bin/echo "governance_funds_producer: Funds available: $(${coreutils}/bin/date --rfc-3339=seconds)" + + ############################### + # Producer -> Proposals split # + ############################### + + local action_deposit proposals_reminder + action_deposit="${toString gov_action_deposit}" + # HACK: Plus a fee estimate ("Estimated transaction fee: 374457 Lovelace"). + # Plus "Minimum UTxO threshold: 105986 Lovelace" + proposals_reminder="$(( (action_deposit + 4000000) * ${toString withdrawal_proposals_per_producer} + 200000 ))" + + local producer_prop_addr_array=() + for prop_i in {1..${toString proposals_count}} + do + local producer_prop_addr + producer_prop_addr="$(build_node_prop_drep_address "''${producer_name}" "''${producer_i}" "''${prop_i}" 0)" + producer_prop_addr_array+=("''${producer_prop_addr}") + ${coreutils}/bin/echo "governance_funds_producer: Splitting to: ''${producer_name} - ''${producer_i} - ''${prop_i} - ''${producer_prop_addr}" + done + + # Split (no need to wait for the funds or re-submit, function takes care)! + funds_from_to \ + "''${node_str}" \ + "''${producer_vkey}" "''${producer_skey}" \ + "''${proposals_reminder}" \ + 0 \ + "''${producer_prop_addr_array[@]}" + + ############################ + # Proposals -> DReps split # + ############################ + + local dreps_reminder + # HACK: Plus "Minimum UTxO threshold: 105986 Lovelace" + dreps_reminder="$(( 200000 ))" + + for prop_i in {1..${toString proposals_count}} + do + + local producer_prop_vkey producer_prop_skey + producer_prop_vkey="$(create_node_prop_drep_key_files "''${producer_name}" "''${producer_i}" "''${prop_i}" 0)".vkey + producer_prop_skey="$(create_node_prop_drep_key_files "''${producer_name}" "''${producer_i}" "''${prop_i}" 0)".skey + + local producer_dreps_addrs_array=() + local drep_step=0 + drep_step="$((producer_i * ${toString dreps_per_producer}))" + local actual_drep + for i in {1..${toString dreps_per_producer}} + do + local producer_drep_addr + actual_drep="$((drep_step + i))" + producer_drep_addr="$(build_node_prop_drep_address "''${producer_name}" "''${producer_i}" "''${prop_i}" "''${actual_drep}")" + producer_dreps_addrs_array+=("''${producer_drep_addr}") + ${coreutils}/bin/echo "governance_funds_producer: Splitting to: ''${producer_name} - ''${producer_i} - ''${prop_i} - ''${actual_drep} - ''${producer_drep_addr}" + done + + # Split (no need to wait for the funds or re-submit, function takes care)! + funds_from_to \ + "''${node_str}" \ + "''${producer_prop_vkey}" "''${producer_prop_skey}" \ + "''${dreps_reminder}" \ + 0 \ + "''${producer_dreps_addrs_array[@]}" + + done +} + +################################################################################ +# Create constitution proposal and wait for it in the `gov-state` query. +# Not to be run during the benchmarking phase: Waits for the expected UTxO and +# if timeout uses the first one available. +# Not to be run during the benchmarking phase: waits for proposal to appear! +################################################################################ +function governance_create_constitution { + + # Function arguments. + local node_str=$1 # node name / folder to find the socket to use. + local utxo_vkey=$2 + local utxo_skey=$3 + + ${coreutils}/bin/echo "governance_create_constitution: ''${node_str} - ''${utxo_vkey}" + + # Only defined in functions that use it. + local socket_path + socket_path="$(get_socket_path "''${node_str}")" + + local node_addr + node_addr="$( \ + ${cardano-cli}/bin/cardano-cli address build \ + --testnet-magic ${toString testnet_magic} \ + --payment-verification-key-file "''${utxo_vkey}" + )" + + # Funds needed for this governance action ? + local action_deposit + action_deposit="${toString gov_action_deposit}" + # Funds address. + # The input is calculated from the last transaction submitted. + # No waiting! But, if last submitted transaction fails (function + # `governance_funds_genesis` in current workflow), everything else fails. + local funds_tx + funds_tx="$(get_address_utxo_expected_id "''${node_addr}")" + + # Show current gov-state. + ${cardano-cli}/bin/cardano-cli conway query gov-state \ + --testnet-magic ${toString testnet_magic} \ + --socket-path "''${socket_path}" \ + | ${jq}/bin/jq -r \ + '.nextRatifyState.nextEnactState.prevGovActionIds' + + # Create dummy constitution. + ${coreutils}/bin/echo "My Constitution: free mate and asado" \ + > ./constitution.txt + # Calculate constitution hash. + ${cardano-cli}/bin/cardano-cli hash anchor-data \ + --file-text ./constitution.txt \ + --out-file ./constitution.hash + # Copy guardrails-script. + ${coreutils}/bin/cp \ + ../../genesis/guardrails-script.plutus \ + ./guardrails-script.plutus + # Calculate guardrails-script hash. + ${cardano-cli}/bin/cardano-cli hash script \ + --script-file ./guardrails-script.plutus \ + --out-file ./guardrails-script.hash + + # Create action. + local tx_filename=./create-constitution + ${cardano-cli}/bin/cardano-cli conway governance action create-constitution \ + --testnet \ + --anchor-url "https://raw.githubusercontent.com/cardano-foundation/CIPs/master/CIP-0100/cip-0100.common.schema.json" \ + --anchor-data-hash "9d99fbca260b2d77e6d3012204e1a8658f872637ae94cdb1d8a53f4369400aa9" \ + --constitution-url "https://ipfs.io/ipfs/Qmdo2J5vkGKVu2ur43PuTrM7FdaeyfeFav8fhovT6C2tto" \ + --constitution-hash "$(${coreutils}/bin/cat ./constitution.hash)" \ + --constitution-script-hash "$(${coreutils}/bin/cat ./guardrails-script.hash)" \ + --governance-action-deposit "''${action_deposit}" \ + --deposit-return-stake-verification-key-file ../../genesis/cache-entry/stake-delegators/delegator0/staking.vkey \ + --out-file "''${tx_filename}".action + # Build transaction. + ${cardano-cli}/bin/cardano-cli conway transaction build \ + --testnet-magic ${toString testnet_magic} \ + --socket-path "''${socket_path}" \ + --tx-in "''${funds_tx}" \ + --change-address "''${node_addr}" \ + --proposal-file "''${tx_filename}".action \ + --out-file "''${tx_filename}".raw \ + > /dev/null + # Sign transaction. + ${cardano-cli}/bin/cardano-cli conway transaction sign \ + --testnet-magic ${toString testnet_magic} \ + --signing-key-file "''${utxo_skey}" \ + --tx-body-file "''${tx_filename}".raw \ + --out-file "''${tx_filename}".signed + # Submit transaction. + ${cardano-cli}/bin/cardano-cli conway transaction submit \ + --testnet-magic ${toString testnet_magic} \ + --socket-path "''${socket_path}" \ + --tx-file "''${tx_filename}".signed \ + > /dev/null + + # Wait for the proposal without releasing the local socket. + wait_proposal_id "''${node_str}" "''${tx_filename}".signed >/dev/null + + store_address_utxo_expected \ + "''${tx_filename}.signed" \ + "''${node_addr}" +} + +################################################################################ +# Create withdrawal proposal and wait for it in the `gov-state` query. +# Not to be run during the benchmarking phase: Waits for the expected UTxO and +# if timeout uses the first one available. +# Not to be run during the benchmarking phase: waits for proposal to appear! +################################################################################ +function governance_create_withdrawal { + + # Function arguments. + local node_str=$1 # node name / folder to find the socket to use. + local node_i=$2 # This "i" is part of the node name ("node-i"). + local drep_i=$3 + + ${coreutils}/bin/echo "governance_create_withdrawal: ''${node_str} - ''${drep_i}" + + # Only defined in functions that use it. + local socket_path + socket_path="$(get_socket_path "''${node_str}")" + + local node_drep_skey node_drep_addr + node_drep_skey="$(create_node_prop_drep_key_files "''${node_str}" "''${node_i}" 0 "''${drep_i}")".skey + node_drep_addr="$(build_node_prop_drep_address "''${node_str}" "''${node_i}" 0 "''${drep_i}")" + + # Funds needed for this governance action ? + local action_deposit + action_deposit="${toString gov_action_deposit}" + # Funds address. + # The input is calculated from the last transaction submitted. + # No waiting! But, if last submitted transaction fails (function + # `governance_funds_producer` current workflow), everything else fails. + local funds_tx + funds_tx="$(get_address_utxo_expected_id "''${node_drep_addr}")" + + local tx_filename=./create-withdrawal."''${node_str}"."''${drep_i}" + # Create action. + ${cardano-cli}/bin/cardano-cli conway governance action create-treasury-withdrawal \ + --testnet \ + --anchor-url "https://raw.githubusercontent.com/cardano-foundation/CIPs/master/CIP-0108/examples/treasury-withdrawal.jsonld" \ + --anchor-data-hash "311b148ca792007a3b1fee75a8698165911e306c3bc2afef6cf0145ecc7d03d4" \ + --governance-action-deposit "''${action_deposit}" \ + --transfer 50 \ + --deposit-return-stake-verification-key-file ../../genesis/cache-entry/stake-delegators/"delegator''${node_i}"/staking.vkey \ + --funds-receiving-stake-verification-key-file ../../genesis/cache-entry/stake-delegators/"delegator''${node_i}"/staking.vkey \ + --out-file "''${tx_filename}".action + # Build transaction. + ${cardano-cli}/bin/cardano-cli conway transaction build \ + --testnet-magic ${toString testnet_magic} \ + --socket-path "''${socket_path}" \ + --tx-in "''${funds_tx}" \ + --change-address "''${node_drep_addr}" \ + --proposal-file "''${tx_filename}".action \ + --out-file "''${tx_filename}".raw \ + > /dev/null + # Sign transaction. + ${cardano-cli}/bin/cardano-cli conway transaction sign \ + --testnet-magic ${toString testnet_magic} \ + --signing-key-file "''${node_drep_skey}" \ + --tx-body-file "''${tx_filename}".raw \ + --out-file "''${tx_filename}".signed + # Submit transaction. + ${cardano-cli}/bin/cardano-cli conway transaction submit \ + --testnet-magic ${toString testnet_magic} \ + --socket-path "''${socket_path}" \ + --tx-file "''${tx_filename}".signed \ + > /dev/null + + # Wait for the proposal without releasing the local socket. + wait_proposal_id "''${node_str}" "''${tx_filename}".signed >/dev/null + + store_address_utxo_expected \ + "''${tx_filename}.signed" \ + "''${node_drep_addr}" +} + +################################################################################ +# Sleeps the node between votes submissions to achieve the desired TPS. +################################################################################ + +function vote_tps_throttle() { + # Function arguments. + local node_str=$1 # node name / folder to find the socket to use. + local txs_count=$2 # Actual number of total txs already submitted. + + local filepath_first=./first_vote_time + local filepath_last=./last_vote_time + if ! test -f "''${filepath_first}" + then + local start_time + start_time="$(${coreutils}/bin/date +"%s.%3N")" # With milliseconds / decimals. + ${coreutils}/bin/echo "''${start_time}" > "''${filepath_first}" + ${coreutils}/bin/echo "''${start_time}" > "''${filepath_last}" + else + local start_time last_time + start_time="$(${coreutils}/bin/cat "''${filepath_first}")" + last_time="$(${coreutils}/bin/cat "''${filepath_last}")" + local current_time + current_time="$(${coreutils}/bin/date +"%s.%3N")" # With milliseconds / decimals. + ${coreutils}/bin/echo "''${current_time}" > "''${filepath_last}" + local sleep_arg_number + sleep_arg_number="$( \ + ${jq}/bin/jq --null-input --raw-output \ + --argjson start_time "''${start_time}" \ + --argjson last_time "''${last_time}" \ + --argjson current_time "''${current_time}" \ + --argjson txs_count "''${txs_count}" \ + ' + if $txs_count < 3 + then 0 + else ( + ($current_time - $start_time ) as $cluster_seconds + | ($txs_count / $cluster_seconds) as $current_tps + | if $current_tps < ${toString desired_producer_tps} + then 0 + else ( + (($txs_count + 1) / ${toString desired_producer_tps}) as $next_cluster_seconds + | ($start_time + $next_cluster_seconds - $current_time) + ) + end + ) + end + ' \ + )" + ${coreutils}/bin/echo "vote_tps_throttle: ''${node_str}: ''${sleep_arg_number}" + if ! test "''${sleep_arg_number}" = "0" + then + ${coreutils}/bin/sleep "''${sleep_arg_number}" + fi + fi +} + +################################################################################ +# Benchmarking phase function! +# Fetch all proposals and call `governance_vote_proposal` with each proposal ID +# so the node votes that proposal with all the DReps it controls. +################################################################################ +function governance_vote_all { + + # Function arguments. + local node_str=$1 # node name / folder to find the socket to use. + local node_i=$2 # This "i" is part of the node name ("node-i"). + + # Only defined in functions that use it. + local socket_path + socket_path="$(get_socket_path "''${node_str}")" + + # Don't query the node while voting! + local txIdsJSON proposal_tx_ids_array + txIdsJSON="$( \ + ${cardano-cli}/bin/cardano-cli conway query gov-state \ + --testnet-magic ${toString testnet_magic} \ + --socket-path "''${socket_path}" \ + | ${jq}/bin/jq '.proposals | map(.actionId.txId)' \ + )" + proposal_tx_ids_array=$(echo "''${txIdsJSON}" | ${jq}/bin/jq --raw-output '. | join (" ")') + + # Keep a TXs counter for TPS calculation. + local txs_count=0 # Actual number of total txs already submitted. + # To calculate DReps assigned to this node. + local drep_step + drep_step=$(( node_i * ${toString dreps_per_producer} )) + + # Cycle proposals. + local prop_i=0 + for proposal_tx_id in ''${proposal_tx_ids_array[*]} + do + # Prop key to use. + prop_i=$(( prop_i + 1 )) + # Dreps to use (a voting transaction can carry more than 1 vote). + local dreps_array=() + for i in {1..${toString dreps_per_producer}} + do + local actual_drep + actual_drep="$((drep_step + i))" + dreps_array+=("''${actual_drep}") + if test "''${#dreps_array[@]}" -ge ${toString votes_per_tx} + then + vote_tps_throttle "''${node_str}" "''${txs_count}" + governance_vote_proposal \ + "''${node_str}" \ + "''${node_i}" \ + "''${prop_i}" \ + "''${proposal_tx_id}" \ + "''${dreps_array[@]}" + txs_count=$(( txs_count + 1 )) + dreps_array=() + fi + done + local proposal_flag="./proposal.''${proposal_tx_id}.voted" + ${coreutils}/bin/touch "''${proposal_flag}" + done +} + +################################################################################ +# Benchmarking phase function! +# The node votes the proposal with all the DReps it controls. +################################################################################ +function governance_vote_proposal { + + # Function arguments. + local node_str=''${1}; shift # node name / folder to find the socket to use. + local node_i=''${1}; shift # This "i" is part of the node name ("node-i"). + local prop_i=''${1}; shift + local proposal_tx_id=''${1}; shift # Proposal key/address to use. + local dreps_array=("$@") # DReps to use in this voting transaction. + + # Only defined in functions that use it. + local socket_path + socket_path="$(get_socket_path "''${node_str}")" + + ${coreutils}/bin/echo "governance_vote_proposal: ''${proposal_tx_id} - ''${node_str} - ''${dreps_array[0]}-''${dreps_array[-1]}" + + local funds_addr funds_tx funds_value + local vote_file_params_array=() + local signing_key_file_params_array=() + for drep_i in ''${dreps_array[*]} + do + local node_drep_skey node_drep_addr + node_drep_skey="$(create_node_prop_drep_key_files "''${node_str}" "''${node_i}" "''${prop_i}" "''${drep_i}")".skey + node_drep_addr="$(build_node_prop_drep_address "''${node_str}" "''${node_i}" "''${prop_i}" "''${drep_i}")" + # UTxO are created for 1 vote per transaction so all runs have the same + # number of UTxOs. We grab the funds from the first address/UTxO. + if test -z "''${funds_tx-}" + then + # Funds address. + funds_addr="''${node_drep_addr}" + # The input is calculated from the last transaction submitted. + # No waiting! But, if last submitted transaction fails (function + # `governance_funds_producer` or `governance_vote_proposal` in current + # workflow), everything else fails. + funds_tx="$(get_address_utxo_expected_id "''${node_drep_addr}")" + # A next UTxO must be cached by `funds_from_to` when splitting the funds, + # we don't check the response to be sure there is an expected UTxO to be + # really sure we are not querying the node unnecessarily. + funds_value="$(get_address_utxo_expected_value "''${node_drep_addr}")" + # Need to be used twice to sign + signing_key_file_params_array+=("--signing-key-file ''${node_drep_skey}") + fi + local vote_filename=./proposal."''${proposal_tx_id}"."''${drep_i}" + ${cardano-cli}/bin/cardano-cli conway governance vote create \ + --yes \ + --governance-action-tx-id "''${proposal_tx_id}" \ + --governance-action-index "0" \ + --drep-verification-key-file ../../genesis/cache-entry/drep-keys/drep"''${drep_i}"/drep.vkey \ + --out-file "''${vote_filename}".action + vote_file_params_array+=("--vote-file ''${vote_filename}.action") + signing_key_file_params_array+=("--signing-key-file ../../genesis/cache-entry/drep-keys/drep''${drep_i}/drep.skey") + done + + local tx_filename=./proposal."''${proposal_tx_id}"."''${dreps_array[0]}"-"''${dreps_array[-1]}" + # Build the transaction. + ${if build_vote + then ( + if use_build_raw + then '' + local change + change=$(( $funds_value - 250000 )) + ${cardano-cli}/bin/cardano-cli conway transaction build-raw \ + --tx-in "''${funds_tx}" \ + --fee 250000 \ + --tx-out "''${funds_addr}"+"''${change}" \ + ''${vote_file_params_array[@]} \ + --out-file "''${tx_filename}".raw + '' + else '' + ${cardano-cli}/bin/cardano-cli conway transaction build \ + --testnet-magic ${toString testnet_magic} \ + --socket-path "''${socket_path}" \ + --tx-in "''${funds_tx}" \ + --change-address "''${funds_addr}" \ + --witness-override "$(( 1 + ''${#dreps_array[@]} ))" \ + ''${vote_file_params_array[@]} \ + --out-file "''${tx_filename}".raw \ + > /dev/null + '' + ) + else '' + ${coreutils}/bin/echo "transaction build off!" + '' + } + ${if build_vote && sign_vote + then '' + # Sign it with the DRep key: + ${cardano-cli}/bin/cardano-cli conway transaction sign \ + --testnet-magic ${toString testnet_magic} \ + ''${signing_key_file_params_array[@]} \ + --tx-body-file "''${tx_filename}".raw \ + --out-file "''${tx_filename}".signed + '' + else '' + ${coreutils}/bin/echo "transaction sign off!" + '' + } + ${if build_vote && sign_vote && submit_vote + then '' + # Submit the transaction: + ${cardano-cli}/bin/cardano-cli conway transaction submit \ + --testnet-magic ${toString testnet_magic} \ + --socket-path "''${socket_path}" \ + --tx-file "''${tx_filename}".signed \ + >/dev/null ${if wait_submit then "&" else ""} + '' + else '' + ${coreutils}/bin/echo "transaction submit off!" + '' + } + ${coreutils}/bin/touch "''${tx_filename}".voted + + # No `store_address_utxo_expected`, all UTxOs are created before voting! + +} + +################################################################################ +# Entrypoints. +################################################################################ + +function workflow_generator_log_proposals { + # Function arguments. + local node_str=$1 # node name / folder to find the socket to use. + local socket_path + socket_path="$(get_socket_path "''${node_str}")" + while true + do + ${cardano-cli}/bin/cardano-cli conway query gov-state \ + --testnet-magic ${toString testnet_magic} \ + --socket-path "''${socket_path}" \ + | ${jq}/bin/jq '.proposals' \ + > ./proposals."''$(${coreutils}/bin/date +"%Y-%m-%d-%H-%M-%S-%3N")".json + ${coreutils}/bin/sleep 60 + done +} + +function workflow_generator { + # Function arguments. + local node_str=$1 # node name / folder to find the socket to use. + + #- Splitting 0 --------------------------------------------------------------# + ${coreutils}/bin/echo "governance_funds_genesis: Start: $(${coreutils}/bin/date --rfc-3339=seconds)" + governance_funds_genesis \ + "''${node_str}" \ + ${toString genesis_funds_vkey} \ + ${toString genesis_funds_skey} + ${coreutils}/bin/echo "governance_funds_genesis: End: $(${coreutils}/bin/date --rfc-3339=seconds)" + #- Preparing ----------------------------------------------------------------# + ${if create_proposals + then '' + ${coreutils}/bin/echo "governance_create_constitution: Start: $(${coreutils}/bin/date --rfc-3339=seconds)" + governance_create_constitution \ + "''${node_str}" \ + ${toString genesis_funds_vkey} \ + ${toString genesis_funds_skey} + ${coreutils}/bin/echo "governance_create_constitution: End: $(${coreutils}/bin/date --rfc-3339=seconds)" + '' + else '' + ${coreutils}/bin/echo "No governance_create_constitution today!" + '' + } + #- Waiting ------------------------------------------------------------------# + ${coreutils}/bin/echo "wait_proposals_count: Start: $(${coreutils}/bin/date --rfc-3339=seconds)" + wait_proposals_count "''${node_str}" ${toString proposals_count} + ${coreutils}/bin/echo "wait_proposals_count: End: $(${coreutils}/bin/date --rfc-3339=seconds)" + #- Log ----------------------------------------------------------------------# + # Keep a job that periodically stores the proposals from the gov-state. + workflow_generator_log_proposals "''${node_str}" & + +} + +function workflow_producer { + # Run the producer workflow for each deployed producer. + local producers=${toString producers_bash_array} + for producer_name in ''${producers[*]} + do + if test -d "../../''${producer_name}" + then + workflow_producer_deployed "''${producer_name}" + fi + done +} + +function workflow_producer_deployed { + # Function arguments. + local node_str=$1 # node name / folder to find the socket to use. + + local producer_i + producer_i="$( \ + ${jq}/bin/jq --raw-output \ + --arg keyName "''${node_str}" \ + '.[$keyName].i' \ + ../../node-specs.json \ + )" + + #- Splitting 1 --------------------------------------------------------------# + ${coreutils}/bin/echo "governance_funds_producer: Start: $(${coreutils}/bin/date --rfc-3339=seconds)" + governance_funds_producer "''${node_str}" "''${node_str}" + ${coreutils}/bin/echo "governance_funds_producer: End: $(${coreutils}/bin/date --rfc-3339=seconds)" + #- Preparing ----------------------------------------------------------------# + ${if create_proposals + then '' + ${coreutils}/bin/echo "governance_create_withdrawal(s): Start: $(${coreutils}/bin/date --rfc-3339=seconds)" + for i in {1..${toString withdrawal_proposals_per_producer}} + do + governance_create_withdrawal "''${node_str}" "''${producer_i}" 0 + done + ${coreutils}/bin/echo "governance_create_withdrawal(s): End: $(${coreutils}/bin/date --rfc-3339=seconds)" + #- Waiting ------------------------------------------------------------------# + ${coreutils}/bin/echo "wait_proposals_count: Start: $(${coreutils}/bin/date --rfc-3339=seconds)" + wait_proposals_count "''${node_str}" ${toString proposals_count} + ${coreutils}/bin/echo "wait_proposals_count: End: $(${coreutils}/bin/date --rfc-3339=seconds)" + #- Benchmarking -------------------------------------------------------------# + local socket_path + socket_path="$(get_socket_path "''${node_str}")" + # Store actual gov-state + ${cardano-cli}/bin/cardano-cli conway query gov-state \ + --testnet-magic ${toString testnet_magic} \ + --socket-path "''${socket_path}" \ + > "./gov-state.start.json" + ${coreutils}/bin/echo "governance_vote_all: Start: $(${coreutils}/bin/date --rfc-3339=seconds)" + governance_vote_all "''${node_str}" "''${producer_i}" + ${coreutils}/bin/echo "governance_vote_all: End: $(${coreutils}/bin/date --rfc-3339=seconds)" + socket_path="$(get_socket_path "''${node_str}")" + # Store actual gov-state + ${cardano-cli}/bin/cardano-cli conway query gov-state \ + --testnet-magic ${toString testnet_magic} \ + --socket-path "''${socket_path}" \ + > "./gov-state.end.json" + #----------------------------------------------------------------------------# + '' + else '' + ${coreutils}/bin/echo "No governance_create_withdrawal(s) today!" + '' + } + +} + +'' diff --git a/wb_profiles.mk b/wb_profiles.mk index b4c52a9f432..7f7ab2b2dcb 100644 --- a/wb_profiles.mk +++ b/wb_profiles.mk @@ -1,14 +1,14 @@ PROFILES_EMPTY := fast-solo fast fast-p2p fast-oldtracing fast-notracer fast-plutus ci-test ci-test-rtview ci-test-notracer ci-test-p2p ci-test-plutus ci-test-hydra ci-test-hydra trace-bench trace-bench-rtview trace-bench-oldtracing trace-bench-notracer trace-full trace-full-rtview default default-p2p oldtracing plutus plutus-secp-ecdsa plutus-secp-schnorr epoch-transition -PROFILES_MINIATURE := ci-bench ci-bench-lmdb ci-bench-rtview ci-bench-p2p ci-bench-notracer ci-bench-drep ci-bench-plutus ci-bench-plutus24 ci-bench-plutus-secp-ecdsa ci-bench-plutus-secp-schnorr ci-bench-plutusv3-blst ci-bench-plutusv3-ripemd 10 10-p2p 10-notracer 10-plutus 6-dense 6-dense-rtsprof 6-dense-1h 6-dense-1h-rtsprof 6-dense-4h 6-dense-4h-rtsprof +PROFILES_MINIATURE := ci-bench ci-bench-lmdb ci-bench-rtview ci-bench-p2p ci-bench-notracer ci-bench-drep ci-bench-plutus ci-bench-plutus24 ci-bench-plutus-secp-ecdsa ci-bench-plutus-secp-schnorr ci-bench-plutusv3-blst ci-bench-plutusv3-ripemd 10 10-p2p 10-notracer 10-plutus 6-dense 6-dense-rtsprof 6-dense-1h 6-dense-1h-rtsprof 6-dense-4h 6-dense-4h-rtsprof development-voting PROFILES_FORGE_STRESS := forge-stress-solo-xs forge-stress-solo forge-stress-plutus-solo forge-stress-pre-solo-xs forge-stress-pre-solo forge-stress-pre-solo-xl forge-stress forge-stress-notracer forge-stress-p2p forge-stress-plutus forge-stress-pre forge-stress-pre-rtsA4m forge-stress-pre-rtsA64m forge-stress-pre-rtsN3 forge-stress-pre-rtsA4mN3 forge-stress-pre-rtsA64mN3 forge-stress-pre-rtsxn forge-stress-pre-notracer forge-stress-pre-plutus forge-stress-large PROFILES_PLUTUSCALL := plutuscall-loop-plain plutuscall-loop-half plutuscall-loop-double plutuscall-secp-ecdsa-plain plutuscall-secp-ecdsa-half plutuscall-secp-ecdsa-double plutuscall-secp-schnorr-plain plutuscall-secp-schnorr-half plutuscall-secp-schnorr-double plutuscall-volt-loop plutuscall-volt-blst plutuscall-volt-ripemd PROFILES_MODEL := model-secp-ecdsa-double model-secp-ecdsa-half model-secp-ecdsa-plain model-value model-value-test PROFILES_K3 := k3-3ep-5kTx-10000kU-1300kD-64kbs-fixed-loaded k3-3ep-9kTx-10000kU-1300kD-64kbs-5tps-fixed-loaded k3-3ep-18kTx-10000kU-1300kD-64kbs-10tps-fixed-loaded k3-3ep-22kTx-10000kU-1300kD-64kbs-fixed-loaded -PROFILES_SCENARIOS := chainsync-early-byron chainsync-early-byron-notracer chainsync-early-byron-oldtracing chainsync-early-alonzo chainsync-early-alonzo-notracer chainsync-early-alonzo-p2p chainsync-early-alonzo-oldtracing devops idle latency-nomadperf latency-nomadperfssd tracer-only +PROFILES_SCENARIOS := chainsync-early-byron chainsync-early-byron-notracer chainsync-early-byron-oldtracing chainsync-early-alonzo chainsync-early-alonzo-notracer chainsync-early-alonzo-p2p chainsync-early-alonzo-oldtracing devops idle tracer-only PROFILES_LEGACY := ci-test-dense10 dish dish-10M dish-plutus dish-10M-plutus PROFILES_SCALING := faststartup-24M -PROFILES_NOMAD_PERF := value-nomadperf value-nomadperf-nop2p value-drep1k-nomadperf value-drep2k-nomadperf value-drep10k-nomadperf value-drep100k-nomadperf value-oldtracing-nomadperf value-oldtracing-nomadperf-nop2p value-volt-nomadperf plutus-nomadperf plutus-nomadperf-nop2p plutus-drep1k-nomadperf plutus-drep2k-nomadperf plutus-drep10k-nomadperf plutus-drep100k-nomadperf plutus24-nomadperf plutus-secp-ecdsa-nomadperf plutus-secp-schnorr-nomadperf plutusv3-blst-nomadperf plutusv3-blst-double-nomadperf plutusv3-blst-half-nomadperf plutus-volt-nomadperf fast-nomadperf fast-nomadperf-nop2p ci-test-nomadperf ci-test-nomadperf-nop2p ci-test-oldtracing-nomadperf default-nomadperf-nop2p default-nomadperf oldtracing-nomadperf oldtracing-nomadperf-nop2p ci-bench-nomadperf ci-bench-nomadperf-nop2p ci-bench-oldtracing-nomadperf -PROFILES_NOMAD_PERFSSD := utxoscale-solo-12M16G-nomadperfssd utxoscale-solo-12M64G-nomadperfssd utxoscale-solo-24M64G-nomadperfssd fast-nomadperfssd value-nomadperfssd +PROFILES_NOMAD_PERF := value-nomadperf value-nomadperf-nop2p value-drep1k-nomadperf value-drep2k-nomadperf value-drep10k-nomadperf value-drep100k-nomadperf value-oldtracing-nomadperf value-oldtracing-nomadperf-nop2p value-volt-nomadperf plutus-nomadperf plutus-nomadperf-nop2p plutus-drep1k-nomadperf plutus-drep2k-nomadperf plutus-drep10k-nomadperf plutus-drep100k-nomadperf plutus24-nomadperf plutus-secp-ecdsa-nomadperf plutus-secp-schnorr-nomadperf plutusv3-blst-nomadperf plutusv3-blst-double-nomadperf plutusv3-blst-half-nomadperf plutus-volt-nomadperf fast-nomadperf fast-nomadperf-nop2p ci-test-nomadperf ci-test-nomadperf-nop2p ci-test-oldtracing-nomadperf default-nomadperf-nop2p default-nomadperf oldtracing-nomadperf oldtracing-nomadperf-nop2p ci-bench-nomadperf ci-bench-nomadperf-nop2p ci-bench-oldtracing-nomadperf value-voting-utxo-volt-nomadperf value-voting-volt-nomadperf value-voting-double-volt-nomadperf plutus-voting-utxo-volt-nomadperf plutus-voting-volt-nomadperf plutus-voting-double-volt-nomadperf latency-nomadperf +PROFILES_NOMAD_PERFSSD := utxoscale-solo-12M16G-nomadperfssd utxoscale-solo-12M64G-nomadperfssd utxoscale-solo-24M64G-nomadperfssd fast-nomadperfssd value-nomadperfssd latency-nomadperfssd LOCAL_PROFILES += $(PROFILES_EMPTY) LOCAL_PROFILES += $(PROFILES_MINIATURE)