Skip to content

Commit b78a54c

Browse files
committed
🚨 Format stray python scripts
Problem: - Quality build is not clean because some python scripts are not formatted with black. Solution: - Format them.
1 parent 383a55b commit b78a54c

File tree

2 files changed

+58
-39
lines changed

2 files changed

+58
-39
lines changed

test/msg/gen_handler_data.py

Lines changed: 21 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,35 +1,40 @@
11
import random
22

33

4-
big_vals = [int(random.expovariate(10) * (1 << 28)) & 0xffffffff for i in range(0, 100)]
5-
med_vals = [int(random.expovariate(10) * (1 << 14)) & 0xffff for i in range(0, 50)]
6-
small_vals = [int(random.expovariate(10) * (1 << 6)) & 0xff for i in range(0, 25)]
4+
big_vals = [int(random.expovariate(10) * (1 << 28)) & 0xFFFFFFFF for i in range(0, 100)]
5+
med_vals = [int(random.expovariate(10) * (1 << 14)) & 0xFFFF for i in range(0, 50)]
6+
small_vals = [int(random.expovariate(10) * (1 << 6)) & 0xFF for i in range(0, 25)]
77

8-
combos = [(
9-
random.choice(big_vals),
10-
random.choice(med_vals),
11-
random.choice(small_vals)
12-
) for i in range(0, 256)]
8+
combos = [
9+
(random.choice(big_vals), random.choice(med_vals), random.choice(small_vals))
10+
for i in range(0, 256)
11+
]
1312

1413

15-
print("""
14+
print(
15+
"""
1616
template<typename T>
1717
struct test_project {
1818
constexpr static auto config = cib::config(
1919
cib::exports<T>,
20-
cib::extend<T>(""")
20+
cib::extend<T>("""
21+
)
2122
for c in combos:
2223
print(f" cb<{c[0]}, {c[1]}, {c[2]}>,")
2324
print(
24-
""" )
25+
""" )
2526
);
2627
};
27-
""")
28+
"""
29+
)
2830

29-
print("""
30-
auto msgs = std::array{""")
31+
print(
32+
"""
33+
auto msgs = std::array{"""
34+
)
3135
for c in combos:
3236
print(f" m<{c[0]}, {c[1]}, {c[2]}>,")
3337
print(
34-
""" };
35-
""")
38+
""" };
39+
"""
40+
)

tools/benchmark/parse_bench_data.py

Lines changed: 37 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -4,24 +4,25 @@
44
import csv
55
import re
66

7+
78
def parse_file(file_path):
89
data = {}
910
current_dataset = None
1011
current_algorithm = None
1112
current_size = None
1213

13-
with open(file_path, 'r') as file:
14+
with open(file_path, "r") as file:
1415
for line in file:
1516
line = line.strip()
1617

17-
if line.startswith('dataset:'):
18-
current_dataset = line.split(':')[1].strip()
19-
elif line.startswith('algorithm:'):
20-
current_algorithm = line.split(':')[1].strip()
21-
elif line.startswith('size:'):
22-
current_size = int(line.split(':')[1].strip())
23-
elif line.startswith('|'):
24-
fields = line.split('|')[1:]
18+
if line.startswith("dataset:"):
19+
current_dataset = line.split(":")[1].strip()
20+
elif line.startswith("algorithm:"):
21+
current_algorithm = line.split(":")[1].strip()
22+
elif line.startswith("size:"):
23+
current_size = int(line.split(":")[1].strip())
24+
elif line.startswith("|"):
25+
fields = line.split("|")[1:]
2526
if len(fields) >= 5:
2627
try:
2728
ns_op = float(fields[0].strip())
@@ -34,44 +35,57 @@ def parse_file(file_path):
3435
data[current_dataset] = {}
3536
if current_algorithm not in data[current_dataset]:
3637
data[current_dataset][current_algorithm] = {
37-
'size': current_size
38+
"size": current_size
3839
}
3940

4041
if "chained" in key_gen_type:
41-
data[current_dataset][current_algorithm]['ns_op_chained'] = ns_op
42+
data[current_dataset][current_algorithm][
43+
"ns_op_chained"
44+
] = ns_op
4245
elif "independent" in key_gen_type:
43-
data[current_dataset][current_algorithm]['ns_op_independent'] = ns_op
44-
46+
data[current_dataset][current_algorithm][
47+
"ns_op_independent"
48+
] = ns_op
4549

4650
return data
4751

4852

4953
def generate_csv_tables(data, output_prefix):
5054
datasets = list(data.keys())
51-
algorithms = sorted(set(algo for dataset in data.values() for algo in dataset.keys()))
55+
algorithms = sorted(
56+
set(algo for dataset in data.values() for algo in dataset.keys())
57+
)
5258

5359
# Table 1: Chained Algorithm Performance (ns/op) vs Dataset
54-
with open(f'{output_prefix}_chained_performance.csv', 'w', newline='') as file:
60+
with open(f"{output_prefix}_chained_performance.csv", "w", newline="") as file:
5561
writer = csv.writer(file)
56-
writer.writerow(['Dataset'] + algorithms)
62+
writer.writerow(["Dataset"] + algorithms)
5763
for dataset in datasets:
58-
row = [dataset] + [data[dataset].get(algo, {}).get('ns_op_chained', '') for algo in algorithms]
64+
row = [dataset] + [
65+
data[dataset].get(algo, {}).get("ns_op_chained", "")
66+
for algo in algorithms
67+
]
5968
writer.writerow(row)
6069

6170
# Table 2: Independent Algorithm Performance (ns/op) vs Dataset
62-
with open(f'{output_prefix}_independent_performance.csv', 'w', newline='') as file:
71+
with open(f"{output_prefix}_independent_performance.csv", "w", newline="") as file:
6372
writer = csv.writer(file)
64-
writer.writerow(['Dataset'] + algorithms)
73+
writer.writerow(["Dataset"] + algorithms)
6574
for dataset in datasets:
66-
row = [dataset] + [data[dataset].get(algo, {}).get('ns_op_independent', '') for algo in algorithms]
75+
row = [dataset] + [
76+
data[dataset].get(algo, {}).get("ns_op_independent", "")
77+
for algo in algorithms
78+
]
6779
writer.writerow(row)
6880

6981
# Table 3: Algorithm Size (bytes) vs Dataset
70-
with open(f'{output_prefix}_algorithm_size.csv', 'w', newline='') as file:
82+
with open(f"{output_prefix}_algorithm_size.csv", "w", newline="") as file:
7183
writer = csv.writer(file)
72-
writer.writerow(['Dataset'] + algorithms)
84+
writer.writerow(["Dataset"] + algorithms)
7385
for dataset in datasets:
74-
row = [dataset] + [data[dataset].get(algo, {}).get('size', '') for algo in algorithms]
86+
row = [dataset] + [
87+
data[dataset].get(algo, {}).get("size", "") for algo in algorithms
88+
]
7589
writer.writerow(row)
7690

7791

0 commit comments

Comments
 (0)