Skip to content
This repository was archived by the owner on Nov 17, 2023. It is now read-only.

Commit c8922fe

Browse files
authored
Python string formatting (#21136)
* Improve string formatting * Another formatting fix * Minor fixes * Review suggestion * Review suggestions * Review suggestions once again * Review additional suggestions * A few more fixes * Remove f-strings from logging functions * Fix lack of 'f' * Minor fixes * Fix lint errors
1 parent bd6405b commit c8922fe

File tree

138 files changed

+835
-928
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

138 files changed

+835
-928
lines changed

benchmark/opperf/opperf.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -193,9 +193,9 @@ def main():
193193
'Valid Inputs - positive integers')
194194

195195
args = parser.parse_args()
196-
logging.info("Running MXNet operator benchmarks with the following options: {args}".format(args=args))
196+
logging.info(f"Running MXNet operator benchmarks with the following options: {args}")
197197
assert not os.path.isfile(args.output_file),\
198-
"Output file {output_file} already exists.".format(output_file=args.output_file)
198+
f"Output file {args.output_file} already exists."
199199

200200
# 2. RUN BENCHMARKS
201201
ctx = _parse_mxnet_context(args.ctx)
@@ -218,7 +218,7 @@ def main():
218218
# 4. Generate list of MXNet operators not covered in benchmarks
219219
ops_not_covered = get_operators_with_no_benchmark(final_benchmark_results.keys())
220220
for idx, op in enumerate(ops_not_covered):
221-
print("{idx}. {op}".format(idx=idx, op=op))
221+
print(f"{idx}. {op}")
222222

223223
return 0
224224

benchmark/opperf/utils/benchmark_utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ def _run_operator_performance_test(op, inputs, run_backward, warmup, runs, kwarg
189189

190190
# Run Benchmarks
191191
op_benchmark_result = {op.__name__: []}
192-
logging.info("Begin Benchmark - {name}".format(name=op.__name__))
192+
logging.info(f"Begin Benchmark - {op.__name__}")
193193

194194
for idx, kwargs in enumerate(kwargs_list):
195195
_, profiler_output = benchmark_helper_func(op, runs, **kwargs)
@@ -199,7 +199,7 @@ def _run_operator_performance_test(op, inputs, run_backward, warmup, runs, kwarg
199199
new_inp = parse_input_ndarray(inputs[idx])
200200
profiler_output = merge_map_list([{"inputs": new_inp}] + [profiler_output])
201201
op_benchmark_result[op.__name__].append(profiler_output)
202-
logging.info("Complete Benchmark - {name}".format(name=op.__name__))
202+
logging.info(f"Complete Benchmark - {op.__name__}")
203203
return op_benchmark_result
204204

205205

@@ -250,7 +250,7 @@ def run_performance_test(ops, inputs, run_backward=True,
250250
kwargs_list = _prepare_op_inputs(inputs, run_backward, dtype, ctx, op.__module__)
251251
benchmark_result = _run_operator_performance_test(op, inputs, run_backward, warmup, runs, kwargs_list, profiler)
252252
else:
253-
raise ValueError("Unknown {0} operator provided to benchmark. - {1}".format(op.__module__, op.__name__))
253+
raise ValueError(f"Unknown {op.__module__} operator provided to benchmark. - {op.__name__}")
254254
op_benchmark_result.append(benchmark_result)
255255
return op_benchmark_result
256256

benchmark/opperf/utils/common_utils.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ def save_to_file(inp_dict, out_filepath, out_format='json', runtime_features=Non
7676
with open(out_filepath, "w") as result_file:
7777
result_file.write(_prepare_markdown(inp_dict, runtime_features, profiler))
7878
else:
79-
raise ValueError("Invalid output file format provided - '{}'. Supported - json, md".format(format))
79+
raise ValueError(f"Invalid output file format provided - '{out_format}'. Supported - json, md")
8080

8181

8282
def get_json(inp_dict):
@@ -126,10 +126,9 @@ def _prepare_op_benchmark_result(op, op_bench_result, profiler):
126126

127127
result = ""
128128
if profiler == "native":
129-
result = "| {} | {} | {} | {} | {} |".format(operator_name,
130-
inputs, max_mem_usage, avg_forward_time, avg_backward_time)
129+
result = f"| {operator_name} | {inputs} | {max_mem_usage} | {avg_forward_time} | {avg_backward_time} |"
131130
elif profiler == "python":
132-
result = "| {} | {} | {} | {} | {} | {} |".format(operator_name, avg_time, p50_time, p90_time, p99_time, inputs)
131+
result = f"| {operator_name} | {avg_time} | {p50_time} | {p90_time} | {p99_time} | {inputs} |"
133132
return result
134133

135134

@@ -139,7 +138,7 @@ def _prepare_markdown(results, runtime_features=None, profiler='native'):
139138
results_markdown.append("# Runtime Features")
140139
idx = 0
141140
for key, value in runtime_features['runtime_features'].items():
142-
results_markdown.append('{}. {} : {}'.format(idx, key, value))
141+
results_markdown.append(f'{idx}. {key} : {value}')
143142

144143
results_markdown.append("# Benchmark Results")
145144
if profiler == 'native':

benchmark/python/control_flow/rnn.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,8 @@ def run_benchmark(cell_type, ctx, seq_len, batch_size, hidden_dim):
9797
layer.initialize(ctx=ctx)
9898
if is_hyb_layer:
9999
layer.hybridize(static_alloc=True)
100-
print("is_train = %r, hybridize_cell = %r, hybridize_layer = %r" % (is_train, is_hyb_cell, is_hyb_layer))
100+
print(
101+
f"is_train = {repr(is_train)}, hybridize_cell = {repr(is_hyb_cell)}, hybridize_layer = {repr(is_hyb_layer)}")
101102
times = []
102103
for _ in range(args.warmup_rounds + args.test_rounds):
103104
tick = time()
@@ -112,7 +113,7 @@ def run_benchmark(cell_type, ctx, seq_len, batch_size, hidden_dim):
112113
tock = time()
113114
times.append((tock - tick) * 1000.0)
114115
times = times[args.warmup_rounds: ]
115-
print("Time used: mean = %.3f ms, std = %.3f ms" % (onp.mean(times), onp.std(times)))
116+
print(f"Time used: mean = {onp.mean(times):.3f} ms, std = {onp.std(times):.3f} ms")
116117

117118

118119
def main():
@@ -131,8 +132,7 @@ def main():
131132
for cell_type, ctx, seq_len, batch_size, hidden_dim in product( \
132133
cell_types, ctxs, seq_lens, batch_sizes, hidden_dims):
133134
print("--------------------------------------")
134-
print("cell: %s ctx: %s length: %d batch size: %d dim: %d" % \
135-
(cell_type.__name__, str(ctx), seq_len, batch_size, hidden_dim))
135+
print(f"cell: {cell_type.__name__} ctx: {str(ctx)} length: {seq_len} batch size: {batch_size} dim: {hidden_dim}")
136136
run_benchmark(cell_type, ctx, seq_len, batch_size, hidden_dim)
137137

138138

benchmark/python/dnnl/fc_add.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -59,9 +59,9 @@ def print_header(header):
5959

6060
def print_value(shape, hidden, mean):
6161
if table_left_colums:
62-
print("| ({:4},{:4}) | {:6} | {:9.3f} |".format(shape[0], shape[1], hidden, mean))
62+
print(f"| ({shape[0]:4},{shape[1]:4}) | {hidden:6} | {mean:9.3f} |")
6363
else:
64-
print(" {:9.3f} |".format(mean))
64+
print(f" {mean:9.3f} |")
6565

6666

6767
def measure(net, data0, data1, data2, shape, nhid):

benchmark/python/einsum/benchmark_einsum.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -38,48 +38,48 @@ def test_np_einsum():
3838
a = np.ones(64).reshape(2,4,8)
3939
args = ['ijk,ilm,njm,nlk,abc->', a, a, a, a, a]
4040
cost = measure_cost(500, np.einsum, *args)
41-
print("Basic einsum: {} ms".format(cost * 1000))
41+
print(f"Basic einsum: {cost * 1000} ms")
4242

4343
# Sub-optimal einsum
4444
# cost = measure_cost(500, np.einsum, *args, optimize='optimal')
4545
# print("Optimal einsum: {} ms".format(cost * 1000))
4646

4747
# Greedy einsum
4848
cost = measure_cost(500, np.einsum, *args, optimize=True)
49-
print("Greedy einsum: {} ms".format(cost * 1000))
49+
print(f"Greedy einsum: {cost * 1000} ms")
5050

5151
print("RNN Use Case:")
5252
a = np.random.uniform(0, 1, size=(64, 128, 512))
5353
b = np.random.uniform(0, 1, size=(128, 512, 2, 2))
5454
args = ['bij, ijkl->bkl', a, b]
5555
cost = measure_cost(2, np.einsum, *args, optimize=True)
56-
print('Greedy einsum: {} ms'.format(cost * 1000))
56+
print(f'Greedy einsum: {cost * 1000} ms')
5757
cost = measure_cost(2, np.einsum, *args)
58-
print('Basic einsum: {} ms'.format(cost * 1000))
58+
print(f'Basic einsum: {cost * 1000} ms')
5959

6060
print('Inner Product:')
6161
a = np.ones(6000000)
6262
b = np.ones(6000000)
6363
args = [a, b]
6464
cost = measure_cost(50, np.tensordot, *args, axes=([0],[0]))
65-
print('Tensordot: {} ms'.format(cost * 1000))
65+
print(f'Tensordot: {cost * 1000} ms')
6666
args = ['i, i', a, b]
6767
cost = measure_cost(50, np.einsum, *args, optimize=True)
68-
print('Greedy einsum: {} ms'.format(cost * 1000))
68+
print(f'Greedy einsum: {cost * 1000} ms')
6969
cost = measure_cost(50, np.einsum, *args)
70-
print('Basic einsum: {} ms'.format(cost * 1000))
70+
print(f'Basic einsum: {cost * 1000} ms')
7171

7272
print('Matrix Product:')
7373
a = np.ones(600000).reshape(200, 3000)
7474
b = np.ones(600000).reshape(3000, 200)
7575
args = [a, b]
7676
cost = measure_cost(50, np.tensordot, *args, axes=([1],[0]))
77-
print('Tensordot: {} ms'.format(cost * 1000))
77+
print(f'Tensordot: {cost * 1000} ms')
7878
args = ['ij, jk', a, b]
7979
cost = measure_cost(50, np.einsum, *args, optimize=True)
80-
print('Greedy einsum: {} ms'.format(cost * 1000))
80+
print(f'Greedy einsum: {cost * 1000} ms')
8181
cost = measure_cost(50, np.einsum, *args)
82-
print('Basic einsum: {} ms'.format(cost * 1000))
82+
print(f'Basic einsum: {cost * 1000} ms')
8383

8484

8585
if __name__ == "__main__":

benchmark/python/ffi/benchmark_ffi.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def add_workload(funcname, *args, **kwargs):
3131
_specifier = kwargs["_specififer"]
3232
del kwargs["_specififer"]
3333
if _specifier in OpArgMngr.args:
34-
raise ValueError("duplicate {}".format(_specifier))
34+
raise ValueError(f"duplicate {_specifier}")
3535
OpArgMngr.args[_specifier] = {'args': args, 'kwargs': kwargs, 'funcname': funcname}
3636

3737

@@ -43,7 +43,7 @@ def generate_workloads():
4343
for shape in shapes:
4444
name = 'x'.join(str(i) for i in shape)
4545
if name in array_pool:
46-
raise ValueError("duplicate array {}".format(name))
46+
raise ValueError(f"duplicate array {name}")
4747
array_pool[name] = dnp.ones(shape)
4848
return array_pool
4949

@@ -229,7 +229,7 @@ def run_benchmark(packages):
229229
for (k, v) in OpArgMngr.args.items():
230230
result = {}
231231
for (name, package) in packages.items():
232-
print('{}.{} running...'.format(name, k))
232+
print(f'{name}.{k} running...')
233233
op = get_op(package["module"], v["funcname"])
234234
args = [package["data"](arg) for arg in v["args"]]
235235
kwargs = {k: package["data"](v) for (k, v) in v["kwargs"].items()}
@@ -240,10 +240,10 @@ def run_benchmark(packages):
240240

241241

242242
def show_results(results):
243-
print("{:>24}{:>24}{:>24}".format("name", "package", "time(us)"))
243+
print(f'{"name":>24}{"package":>24}{"time(us)":>24}')
244244
for (specifier, d) in results.items():
245245
for (k, v) in d.items():
246-
print("{:>24}{:>24}{:>24}".format(specifier, k, v * 10 ** 6))
246+
print(f"{specifier:>24}{k:>24}{v * 10 ** 6:>24}")
247247

248248

249249
if __name__ == "__main__":

benchmark/python/metric/benchmark_metric.py

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -75,12 +75,10 @@ def run_metric(name, data_gen_cls, i, n, c, pred_ctx, label_ctx, **kwargs):
7575
metric.update([label] * i, [pred] * i)
7676
mx.nd.waitall()
7777
elapsed = time.time() - before
78-
elapsed_str = "{:<.5}".format(elapsed)
78+
elapsed_str = f"{elapsed:<.5}"
7979
except mx.MXNetError:
8080
elapsed_str = "FAILED"
81-
print("{metric:<15}{pctx:<10}{lctx:<12}{niter:<12}{bs:<15}{out_dim:<15}{elapsed:<}".format(
82-
metric=name, pctx=str(pred_ctx), lctx=str(label_ctx), niter=i * n, bs=data_gen.batch_size,
83-
out_dim=data_gen.output_dim, elapsed=elapsed_str), file=sys.stderr)
81+
print(f"{name:<15}{pred_ctx:<10}{label_ctx:<12}{i * n:<12}{data_gen.batch_size:<15}{data_gen.output_dim:<15}{elapsed_str:<}", file=sys.stderr)
8482

8583

8684
def test_metric_performance():
@@ -107,14 +105,12 @@ def test_metric_performance():
107105

108106
print("\nmx.gluon.metric benchmarks", file=sys.stderr)
109107
print(
110-
"{:15}{:10}{:12}{:12}{:15}{:15}{}".format(
111-
'Metric', 'Data-Ctx', 'Label-Ctx', 'Data Size', 'Batch Size', 'Output Dim', 'Elapsed Time'),
108+
f"{'Metric':15}{'Data-Ctx':10}{'Label-Ctx':12}{'Data Size':12}{'Batch Size':15}{'Output Dim':15}{'Elapsed Time'}",
112109
file=sys.stderr)
113-
print("{:-^90}".format(''), file=sys.stderr)
110+
print(f"{'':-^90}", file=sys.stderr)
114111
for k, v in metrics:
115112
for c in output_dims:
116113
for n in batch_sizes:
117114
for pred_ctx, label_ctx in itertools.product(ctxs, ctxs):
118-
run_metric(k, v[1], (data_size * 128)//(n * c), n, c, pred_ctx, label_ctx, **v[0])
119-
print("{:-^90}".format(''), file=sys.stderr)
120-
115+
run_metric(k, v[1], (data_size * 128), (n * c), n, c, pred_ctx, label_ctx, **v[0])
116+
print(f"{'':-^90}", file=sys.stderr)

benchmark/python/quantization/benchmark_op.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -61,11 +61,10 @@ def benchmark_convolution(data_shape, kernel, num_filter, pad, stride, no_bias=T
6161
grad_req='null', typ='forward') * 1000
6262

6363
print('==================================================================================================')
64-
print('data=%s, kernel=%s, num_filter=%s, pad=%s, stride=%s, no_bias=%s, layout=%s, repeats=%s'
65-
% (data_shape, kernel, num_filter, pad, stride, no_bias, layout, repeats))
66-
print('%s , ctx=%s, time=%.2f ms' % (conv_cudnn.name + '-FP32', ctx_gpu, conv_cudnn_time))
67-
print('%s, ctx=%s, time=%.2f ms' % (quantized_conv2d.name, ctx_gpu, qconv_time))
68-
print('quantization speedup: %.1fX' % (conv_cudnn_time / qconv_time))
64+
print(f'data={data_shape}, kernel={kernel}, num_filter={num_filter}, pad={pad}, stride={stride}, no_bias={no_bias}, layout={layout}, repeats={repeats}')
65+
print(f'{conv_cudnn.name}-FP32 , ctx={ctx_gpu}, time={conv_cudnn_time:.2f} ms')
66+
print(f'{quantized_conv2d.name}, ctx={ctx_gpu}, time={qconv_time:.2f} ms')
67+
print(f'quantization speedup: {conv_cudnn_time / qconv_time:.1f}X')
6968
print('\n')
7069

7170

benchmark/python/sparse/cast_storage.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ def dense_to_sparse(m, n, density, ctx, repeat, stype):
5151

5252
# start benchmarking
5353
cost = measure_cost(repeat, mx.nd.cast_storage, dns_data, stype)
54-
results = '{:10.1f} {:>10} {:8d} {:8d} {:10.2f}'.format(density*100, str(ctx), m, n, cost*1000)
54+
results = f'{density*100:10.1f} {str(ctx):>10} {m:8d} {n:8d} {cost * 1000:10.2f}'
5555
print(results)
5656

5757
check_call(_LIB.MXSetNumOMPThreads(ctypes.c_int(args.num_omp_threads)))
@@ -82,10 +82,10 @@ def dense_to_sparse(m, n, density, ctx, repeat, stype):
8282
stype = 'row_sparse'
8383
print(" cast_storage benchmark: dense to rsp, size m x n ")
8484
else:
85-
print("invalid benchmark: %s" %b)
85+
print(f"invalid benchmark: {b}")
8686
continue
8787
print("==================================================")
88-
headline = '{:>10} {:>10} {:>8} {:>8} {:>10}'.format('density(%)', 'context', 'm', 'n', 'time(ms)')
88+
headline = f"{'density(%)':>10} {'context':>10} {'m':>8} {'n':>8} {'time(ms)':>10}"
8989
print(headline)
9090
for i in range(len(n)):
9191
for ctx in contexts:

0 commit comments

Comments
 (0)