Skip to content

Commit 33d5064

Browse files
committed
Update aggregate
1 parent 4e3bcf8 commit 33d5064

File tree

1 file changed

+78
-8
lines changed

1 file changed

+78
-8
lines changed

scripts/print_aggregated_errors.py

Lines changed: 78 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,8 @@ def create_aggregated_error_file():
1616
"""Create aggregated error_stats.json from individual node files"""
1717
aggregated_errors = {}
1818

19-
# Define all possible nodes
19+
# Determine which nodes to check based on environment
20+
# Check if we're in a mainnet or testnet context by looking at existing files
2021
testnet_nodes = [
2122
"Node 01", "Node 04", "Node 05", "Node 06", "Node 07", "Node 08",
2223
"Node 09", "Node 10", "Node 13", "Node 14", "Node 21", "Node 23", "Node 37"
@@ -26,25 +27,45 @@ def create_aggregated_error_file():
2627
"Node 25", "Node 26", "Node 27", "Node 28", "Node 29", "Node 30"
2728
]
2829

29-
all_nodes = testnet_nodes + mainnet_nodes
30+
# Check which type of nodes have error files to determine context
31+
testnet_files_exist = any(os.path.exists(os.path.join(ERROR_DIR, f"errors_{node.replace(' ', '_')}.json")) for node in testnet_nodes)
32+
mainnet_files_exist = any(os.path.exists(os.path.join(ERROR_DIR, f"errors_{node.replace(' ', '_')}.json")) for node in mainnet_nodes)
33+
34+
# Determine which nodes to process
35+
if mainnet_files_exist and not testnet_files_exist:
36+
# Mainnet context
37+
nodes_to_check = mainnet_nodes
38+
print(f"🔍 Mainnet context detected - checking {len(nodes_to_check)} mainnet nodes")
39+
elif testnet_files_exist and not mainnet_files_exist:
40+
# Testnet context
41+
nodes_to_check = testnet_nodes
42+
print(f"🔍 Testnet context detected - checking {len(nodes_to_check)} testnet nodes")
43+
else:
44+
# Mixed context or no files - check all
45+
nodes_to_check = testnet_nodes + mainnet_nodes
46+
print(f"🔍 Mixed context detected - checking all {len(nodes_to_check)} nodes")
3047

3148
# Read each individual node error file
32-
for node_name in all_nodes:
49+
for node_name in nodes_to_check:
3350
node_file = os.path.join(ERROR_DIR, f"errors_{node_name.replace(' ', '_')}.json")
3451
if os.path.exists(node_file):
3552
try:
3653
with open(node_file, 'r') as f:
3754
node_errors = json.load(f)
55+
print(f"📄 Found error file for {node_name}: {len(node_errors)} errors")
3856
if node_errors: # Only add if there are errors
3957
aggregated_errors[node_name] = node_errors
4058
except Exception as e:
4159
print(f"⚠️ Warning: Could not read {node_file}: {e}")
60+
else:
61+
print(f"📄 No error file found for {node_name}")
4262

4363
# Write aggregated file
4464
aggregated_file = os.path.join(ERROR_DIR, "error_stats.json")
4565
with open(aggregated_file, 'w') as f:
4666
json.dump(aggregated_errors, f, indent=2)
4767

68+
print(f"📊 Aggregated {len(aggregated_errors)} nodes with errors")
4869
return aggregated_errors
4970

5071
def get_all_errors_for_node(node_name):
@@ -94,7 +115,7 @@ def print_all_errors():
94115
# Create aggregated error file from individual files
95116
aggregated_errors = create_aggregated_error_file()
96117

97-
# Define all possible nodes for both testnet and mainnet
118+
# Determine which nodes to show based on context
98119
testnet_nodes = [
99120
"Node 01", "Node 04", "Node 05", "Node 06", "Node 07", "Node 08",
100121
"Node 09", "Node 10", "Node 13", "Node 14", "Node 21", "Node 23", "Node 37"
@@ -104,14 +125,27 @@ def print_all_errors():
104125
"Node 25", "Node 26", "Node 27", "Node 28", "Node 29", "Node 30"
105126
]
106127

107-
all_nodes = testnet_nodes + mainnet_nodes
128+
# Check which type of nodes have error files to determine context
129+
testnet_files_exist = any(os.path.exists(os.path.join(ERROR_DIR, f"errors_{node.replace(' ', '_')}.json")) for node in testnet_nodes)
130+
mainnet_files_exist = any(os.path.exists(os.path.join(ERROR_DIR, f"errors_{node.replace(' ', '_')}.json")) for node in mainnet_nodes)
131+
132+
# Determine which nodes to process
133+
if mainnet_files_exist and not testnet_files_exist:
134+
# Mainnet context
135+
nodes_to_show = mainnet_nodes
136+
elif testnet_files_exist and not mainnet_files_exist:
137+
# Testnet context
138+
nodes_to_show = testnet_nodes
139+
else:
140+
# Mixed context or no files - check all
141+
nodes_to_show = testnet_nodes + mainnet_nodes
108142

109143
# Get nodes that have errors
110144
nodes_with_errors = list(aggregated_errors.keys())
111145

112146
# If no aggregated errors, check individual files
113147
if not nodes_with_errors:
114-
for node_name in all_nodes:
148+
for node_name in nodes_to_show:
115149
node_file = os.path.join(ERROR_DIR, f"errors_{node_name.replace(' ', '_')}.json")
116150
if os.path.exists(node_file):
117151
try:
@@ -124,10 +158,10 @@ def print_all_errors():
124158

125159
# If still no nodes with errors, check all nodes anyway
126160
if not nodes_with_errors:
127-
nodes_with_errors = all_nodes
161+
nodes_with_errors = nodes_to_show
128162

129163
# Process each node
130-
for node_name in nodes_with_errors:
164+
for node_name in nodes_to_show:
131165
errors = get_all_errors_for_node(node_name)
132166

133167
if errors:
@@ -155,8 +189,44 @@ def print_error_for_node():
155189
print(f" • {count}x {error_key}")
156190
print()
157191

192+
def debug_error_files():
193+
"""Debug function to see what's actually in the error files"""
194+
print("\n🔍 Debug: Checking error files in test_output directory:")
195+
196+
if not os.path.exists(ERROR_DIR):
197+
print(f"❌ Directory {ERROR_DIR} does not exist")
198+
return
199+
200+
error_files = [f for f in os.listdir(ERROR_DIR) if f.endswith('.json') and 'error' in f.lower()]
201+
202+
if not error_files:
203+
print("❌ No error files found")
204+
return
205+
206+
print(f"📁 Found {len(error_files)} error files:")
207+
for filename in sorted(error_files):
208+
file_path = os.path.join(ERROR_DIR, filename)
209+
try:
210+
with open(file_path, 'r') as f:
211+
data = json.load(f)
212+
if isinstance(data, dict):
213+
if data: # Not empty
214+
print(f" 📄 {filename}: {len(data)} entries")
215+
for key, value in data.items():
216+
if isinstance(value, dict):
217+
print(f" - {key}: {len(value)} errors")
218+
else:
219+
print(f" - {key}: {value}")
220+
else:
221+
print(f" 📄 {filename}: Empty")
222+
else:
223+
print(f" 📄 {filename}: Not a dict ({type(data)})")
224+
except Exception as e:
225+
print(f" ❌ {filename}: Error reading - {e}")
226+
158227
if __name__ == "__main__":
159228
if os.getenv("AGGREGATE_MODE") == "true":
229+
debug_error_files() # Add debug output
160230
print_all_errors()
161231
else:
162232
print_error_for_node()

0 commit comments

Comments
 (0)