Skip to content

Commit 3d42a25

Browse files
committed
Update logs
1 parent 4d8d5f6 commit 3d42a25

File tree

7 files changed

+184
-24
lines changed

7 files changed

+184
-24
lines changed

scripts/print_aggregated_errors.py

Lines changed: 39 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import os
22
import json
3+
import sys
34

45
ERROR_DIR = "test_output" # Directory where error files are stored
56

@@ -28,13 +29,22 @@ def load_aggregated_error_file():
2829
return {}
2930

3031
def print_error_summary():
31-
print("\n\n📊 Global Error Summary:\n")
32+
print("\n\n📊 Error Breakdown by Node:\n")
3233

34+
# Check if a specific node is being tested
35+
node_to_test = os.getenv("NODE_TO_TEST")
36+
3337
# Try individual files first (preferred method)
3438
error_files = load_error_files()
3539

3640
if error_files:
3741
print("📁 Using individual node error files:")
42+
43+
# If a specific node is being tested, only show that node's errors
44+
if node_to_test:
45+
target_filename = f"errors_{node_to_test.replace(' ', '_')}.json"
46+
error_files = [f for f in error_files if f == target_filename]
47+
3848
for filename in sorted(error_files):
3949
node_name = filename.replace("errors_", "").replace(".json", "").replace("_", " ")
4050
print(f"🔧 {node_name}")
@@ -63,18 +73,36 @@ def print_error_summary():
6373
print("✅ No error data found")
6474
return
6575

66-
for node_name in sorted(error_data.keys()):
67-
print(f"🔧 {node_name}")
68-
69-
node_errors = error_data[node_name]
70-
71-
if not node_errors:
72-
print(" ✅ No errors")
76+
# If a specific node is being tested, only show that node's errors
77+
if node_to_test:
78+
if node_to_test in error_data:
79+
print(f"🔧 {node_to_test}")
80+
node_errors = error_data[node_to_test]
81+
82+
if not node_errors:
83+
print(" ✅ No errors")
84+
else:
85+
for error_key, count in node_errors.items():
86+
print(f" • {count}x {error_key}")
87+
print()
7388
else:
74-
for error_key, count in node_errors.items():
75-
print(f" • {count}x {error_key}")
89+
print(f"🔧 {node_to_test}")
90+
print(" ✅ No errors")
91+
print()
92+
else:
93+
# Show all nodes
94+
for node_name in sorted(error_data.keys()):
95+
print(f"🔧 {node_name}")
7696

77-
print()
97+
node_errors = error_data[node_name]
98+
99+
if not node_errors:
100+
print(" ✅ No errors")
101+
else:
102+
for error_key, count in node_errors.items():
103+
print(f" • {count}x {error_key}")
104+
105+
print()
78106

79107
if __name__ == "__main__":
80108
print_error_summary()

tests/mainnet/Base_Mainnet.py

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,16 @@ def log_error(error, node_name, step='unknown', remote_node=None):
105105
else:
106106
key = f"{step}{type(error).__name__}: {error_message}"
107107

108-
# Store errors in a temporary file to ensure persistence across test session
108+
# Store errors in the global error_stats for this node
109+
if node_name not in error_stats:
110+
error_stats[node_name] = {}
111+
112+
if key in error_stats[node_name]:
113+
error_stats[node_name][key] += 1
114+
else:
115+
error_stats[node_name][key] = 1
116+
117+
# Also store in a temporary file to ensure persistence across test session
109118
error_file = "test_output/error_stats.json"
110119
os.makedirs("test_output", exist_ok=True)
111120

@@ -266,8 +275,21 @@ def avg(times): return round(sum(times) / len(times), 2) if times else 0.0
266275
os.makedirs("test_output", exist_ok=True)
267276
with open(f"test_output/summary_{name.replace(' ', '_')}.json", "w") as f:
268277
json.dump(summary, f, indent=2)
278+
279+
# Write error file by merging with existing errors from the aggregated file
280+
error_file = "test_output/error_stats.json"
281+
if os.path.exists(error_file):
282+
with open(error_file, 'r') as f:
283+
try:
284+
all_errors = json.load(f)
285+
node_errors = all_errors.get(name, {})
286+
except:
287+
node_errors = {}
288+
else:
289+
node_errors = {}
290+
269291
with open(f"test_output/errors_{name.replace(' ', '_')}.json", "w") as f:
270-
json.dump(error_stats.get(name, {}), f, indent=2)
292+
json.dump(node_errors, f, indent=2)
271293

272294
print(f"\n──────────── Summary for {name} ────────────")
273295
if failed_assets:

tests/mainnet/Gnosis_Mainnet.py

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,16 @@ def log_error(error, node_name, step='unknown', remote_node=None):
105105
else:
106106
key = f"{step}{type(error).__name__}: {error_message}"
107107

108-
# Store errors in a temporary file to ensure persistence across test session
108+
# Store errors in the global error_stats for this node
109+
if node_name not in error_stats:
110+
error_stats[node_name] = {}
111+
112+
if key in error_stats[node_name]:
113+
error_stats[node_name][key] += 1
114+
else:
115+
error_stats[node_name][key] = 1
116+
117+
# Also store in a temporary file to ensure persistence across test session
109118
error_file = "test_output/error_stats.json"
110119
os.makedirs("test_output", exist_ok=True)
111120

@@ -266,8 +275,21 @@ def avg(times): return round(sum(times) / len(times), 2) if times else 0.0
266275
os.makedirs("test_output", exist_ok=True)
267276
with open(f"test_output/summary_{name.replace(' ', '_')}.json", "w") as f:
268277
json.dump(summary, f, indent=2)
278+
279+
# Write error file by merging with existing errors from the aggregated file
280+
error_file = "test_output/error_stats.json"
281+
if os.path.exists(error_file):
282+
with open(error_file, 'r') as f:
283+
try:
284+
all_errors = json.load(f)
285+
node_errors = all_errors.get(name, {})
286+
except:
287+
node_errors = {}
288+
else:
289+
node_errors = {}
290+
269291
with open(f"test_output/errors_{name.replace(' ', '_')}.json", "w") as f:
270-
json.dump(error_stats.get(name, {}), f, indent=2)
292+
json.dump(node_errors, f, indent=2)
271293

272294
print(f"\n──────────── Summary for {name} ────────────")
273295
if failed_assets:

tests/mainnet/Neuroweb_Mainnet.py

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,16 @@ def log_error(error, node_name, step='unknown', remote_node=None):
105105
else:
106106
key = f"{step}{type(error).__name__}: {error_message}"
107107

108-
# Store errors in a temporary file to ensure persistence across test session
108+
# Store errors in the global error_stats for this node
109+
if node_name not in error_stats:
110+
error_stats[node_name] = {}
111+
112+
if key in error_stats[node_name]:
113+
error_stats[node_name][key] += 1
114+
else:
115+
error_stats[node_name][key] = 1
116+
117+
# Also store in a temporary file to ensure persistence across test session
109118
error_file = "test_output/error_stats.json"
110119
os.makedirs("test_output", exist_ok=True)
111120

@@ -266,8 +275,21 @@ def avg(times): return round(sum(times) / len(times), 2) if times else 0.0
266275
os.makedirs("test_output", exist_ok=True)
267276
with open(f"test_output/summary_{name.replace(' ', '_')}.json", "w") as f:
268277
json.dump(summary, f, indent=2)
278+
279+
# Write error file by merging with existing errors from the aggregated file
280+
error_file = "test_output/error_stats.json"
281+
if os.path.exists(error_file):
282+
with open(error_file, 'r') as f:
283+
try:
284+
all_errors = json.load(f)
285+
node_errors = all_errors.get(name, {})
286+
except:
287+
node_errors = {}
288+
else:
289+
node_errors = {}
290+
269291
with open(f"test_output/errors_{name.replace(' ', '_')}.json", "w") as f:
270-
json.dump(error_stats.get(name, {}), f, indent=2)
292+
json.dump(node_errors, f, indent=2)
271293

272294
print(f"\n──────────── Summary for {name} ────────────")
273295
if failed_assets:

tests/testnet/Base_Testnet.py

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,16 @@ def log_error(error, node_name, step='unknown', remote_node=None):
112112
else:
113113
key = f"{step}{type(error).__name__}: {error_message}"
114114

115-
# Store errors in a temporary file to ensure persistence across test session
115+
# Store errors in the global error_stats for this node
116+
if node_name not in error_stats:
117+
error_stats[node_name] = {}
118+
119+
if key in error_stats[node_name]:
120+
error_stats[node_name][key] += 1
121+
else:
122+
error_stats[node_name][key] = 1
123+
124+
# Also store in a temporary file to ensure persistence across test session
116125
error_file = "test_output/error_stats.json"
117126
os.makedirs("test_output", exist_ok=True)
118127

@@ -273,8 +282,21 @@ def avg(times): return round(sum(times) / len(times), 2) if times else 0.0
273282
os.makedirs("test_output", exist_ok=True)
274283
with open(f"test_output/summary_{name.replace(' ', '_')}.json", "w") as f:
275284
json.dump(summary, f, indent=2)
285+
286+
# Write error file by merging with existing errors from the aggregated file
287+
error_file = "test_output/error_stats.json"
288+
if os.path.exists(error_file):
289+
with open(error_file, 'r') as f:
290+
try:
291+
all_errors = json.load(f)
292+
node_errors = all_errors.get(name, {})
293+
except:
294+
node_errors = {}
295+
else:
296+
node_errors = {}
297+
276298
with open(f"test_output/errors_{name.replace(' ', '_')}.json", "w") as f:
277-
json.dump(error_stats.get(name, {}), f, indent=2)
299+
json.dump(node_errors, f, indent=2)
278300

279301
print(f"\n──────────── Summary for {name} ────────────")
280302
if failed_assets:

tests/testnet/Gnosis_Testnet.py

Lines changed: 25 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,16 @@ def log_error(error, node_name, step='unknown', remote_node=None):
112112
else:
113113
key = f"{step}{type(error).__name__}: {error_message}"
114114

115-
# Store errors in a temporary file to ensure persistence across test session
115+
# Store errors in the global error_stats for this node
116+
if node_name not in error_stats:
117+
error_stats[node_name] = {}
118+
119+
if key in error_stats[node_name]:
120+
error_stats[node_name][key] += 1
121+
else:
122+
error_stats[node_name][key] = 1
123+
124+
# Also store in a temporary file to ensure persistence across test session
116125
error_file = "test_output/error_stats.json"
117126
os.makedirs("test_output", exist_ok=True)
118127

@@ -164,7 +173,7 @@ def run_test_for_node(node, index):
164173
publish_times, query_times, local_get_times, remote_get_times = [], [], [], []
165174
failed_assets = []
166175

167-
for i in range(10):
176+
for i in range(1):
168177
print(f"\n📡 Publishing KA #{i + 1} on {name}")
169178
content = get_random_content(name)
170179
ual = None
@@ -273,8 +282,21 @@ def avg(times): return round(sum(times) / len(times), 2) if times else 0.0
273282
os.makedirs("test_output", exist_ok=True)
274283
with open(f"test_output/summary_{name.replace(' ', '_')}.json", "w") as f:
275284
json.dump(summary, f, indent=2)
285+
286+
# Write error file by merging with existing errors from the aggregated file
287+
error_file = "test_output/error_stats.json"
288+
if os.path.exists(error_file):
289+
with open(error_file, 'r') as f:
290+
try:
291+
all_errors = json.load(f)
292+
node_errors = all_errors.get(name, {})
293+
except:
294+
node_errors = {}
295+
else:
296+
node_errors = {}
297+
276298
with open(f"test_output/errors_{name.replace(' ', '_')}.json", "w") as f:
277-
json.dump(error_stats.get(name, {}), f, indent=2)
299+
json.dump(node_errors, f, indent=2)
278300

279301
print(f"\n──────────── Summary for {name} ────────────")
280302
if failed_assets:

tests/testnet/Neuroweb_Testnet.py

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,16 @@ def log_error(error, node_name, step='unknown', remote_node=None):
112112
else:
113113
key = f"{step}{type(error).__name__}: {error_message}"
114114

115-
# Store errors in a temporary file to ensure persistence across test session
115+
# Store errors in the global error_stats for this node
116+
if node_name not in error_stats:
117+
error_stats[node_name] = {}
118+
119+
if key in error_stats[node_name]:
120+
error_stats[node_name][key] += 1
121+
else:
122+
error_stats[node_name][key] = 1
123+
124+
# Also store in a temporary file to ensure persistence across test session
116125
error_file = "test_output/error_stats.json"
117126
os.makedirs("test_output", exist_ok=True)
118127

@@ -273,8 +282,21 @@ def avg(times): return round(sum(times) / len(times), 2) if times else 0.0
273282
os.makedirs("test_output", exist_ok=True)
274283
with open(f"test_output/summary_{name.replace(' ', '_')}.json", "w") as f:
275284
json.dump(summary, f, indent=2)
285+
286+
# Write error file by merging with existing errors from the aggregated file
287+
error_file = "test_output/error_stats.json"
288+
if os.path.exists(error_file):
289+
with open(error_file, 'r') as f:
290+
try:
291+
all_errors = json.load(f)
292+
node_errors = all_errors.get(name, {})
293+
except:
294+
node_errors = {}
295+
else:
296+
node_errors = {}
297+
276298
with open(f"test_output/errors_{name.replace(' ', '_')}.json", "w") as f:
277-
json.dump(error_stats.get(name, {}), f, indent=2)
299+
json.dump(node_errors, f, indent=2)
278300

279301
print(f"\n──────────── Summary for {name} ────────────")
280302
if failed_assets:

0 commit comments

Comments
 (0)