|
73 | 73 | git gc
|
74 | 74 | - name: Building Python and running pyperformance
|
75 | 75 | run: |
|
76 |
| - python workflow_bootstrap.py ${{ inputs.fork }} ${{ inputs.ref }} ${{ inputs.machine }} ${{ inputs.benchmarks || 'all' }} "${{ env.flags }}" ${{ inputs.force && '--force' || '' }} ${{ inputs.pgo && '--pgo' || '' }} --run_id ${{ github.run_id }} |
| 76 | + python workflow_bootstrap.py ${{ inputs.fork }} ${{ inputs.ref }} ` |
| 77 | + ${{ (inputs.machine == 'all' || inputs.machine == '__really_all') && inputs.machine || '$env:BENCHMARK_RUNNER_NAME' }} ` |
| 78 | + ${{ inputs.benchmarks || 'all' }} "${{ env.flags }}" ${{ inputs.force && '--force' || '' }} ${{ inputs.pgo && '--pgo' || '' }} --run_id ${{ github.run_id }} |
77 | 79 | # Pull again, since another job may have committed results in the meantime
|
78 | 80 | - name: Pull benchmarking
|
79 | 81 | run: |
|
@@ -112,7 +114,9 @@ jobs:
|
112 | 114 | python-version: "3.11"
|
113 | 115 | - name: Building Python and running pyperformance
|
114 | 116 | run: |
|
115 |
| - python workflow_bootstrap.py ${{ inputs.fork }} ${{ inputs.ref }} ${{ inputs.machine }} ${{ inputs.benchmarks || 'all' }} ${{ env.flags }} ${{ inputs.force && '--force' || '' }} ${{ inputs.pgo && '--pgo' || '' }} ${{ inputs.perf && '--perf' || '' }} --run_id ${{ github.run_id }} |
| 117 | + python workflow_bootstrap.py ${{ inputs.fork }} ${{ inputs.ref }} \ |
| 118 | + ${{ (inputs.machine == 'all' || inputs.machine == '__really_all') && inputs.machine || '"$BENCHMARK_RUNNER_NAME"' }} \ |
| 119 | + ${{ inputs.benchmarks || 'all' }} ${{ env.flags }} ${{ inputs.force && '--force' || '' }} ${{ inputs.pgo && '--pgo' || '' }} ${{ inputs.perf && '--perf' || '' }} --run_id ${{ github.run_id }} |
116 | 120 | # Pull again, since another job may have committed results in the meantime
|
117 | 121 | - name: Pull benchmarking
|
118 | 122 | if: ${{ !inputs.perf }}
|
@@ -154,7 +158,9 @@ jobs:
|
154 | 158 | git gc
|
155 | 159 | - name: Building Python and running pyperformance
|
156 | 160 | run: |
|
157 |
| - python3 workflow_bootstrap.py ${{ inputs.fork }} ${{ inputs.ref }} ${{ inputs.machine }} ${{ inputs.benchmarks || 'all' }} ${{ env.flags }} ${{ inputs.force && '--force' || '' }} ${{ inputs.pgo && '--pgo' || '' }} --run_id ${{ github.run_id }} |
| 161 | + python3 workflow_bootstrap.py ${{ inputs.fork }} ${{ inputs.ref }} \ |
| 162 | + ${{ (inputs.machine == 'all' || inputs.machine == '__really_all') && inputs.machine || '"$BENCHMARK_RUNNER_NAME"' }} \ |
| 163 | + ${{ inputs.benchmarks || 'all' }} ${{ env.flags }} ${{ inputs.force && '--force' || '' }} ${{ inputs.pgo && '--pgo' || '' }} --run_id ${{ github.run_id }} |
158 | 164 | # Pull again, since another job may have committed results in the meantime
|
159 | 165 | - name: Pull benchmarking
|
160 | 166 | run: |
|
|
0 commit comments