Skip to content

Commit 7cc18be

Browse files
author
Sunil Thaha
committed
chore: run hatch fmt to format to 120 width
Signed-off-by: Sunil Thaha <[email protected]>
1 parent 16ef378 commit 7cc18be

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

75 files changed

+805
-545
lines changed

cmd/main.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
# -*- coding: utf-8 -*-
44
import re
55
import sys
6+
67
from kepler_model.cmd.main import run
78

89
if __name__ == "__main__":

model_training/s3/src/s3/loader.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
# <provider>_upload(client, mnt_path)
55
import argparse
66
import os
7+
78
from . import util
89

910
model_dir = "models"

model_training/s3/src/s3/pusher.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,9 @@
22
# client = new_<provider>_client(args)
33
## upload all files in mnt path
44
# <provider>_upload(client, mnt_path)
5-
import os
65
import argparse
6+
import os
7+
78
from . import util
89

910
model_dir = "models"

model_training/s3/src/s3/util.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import argparse
2+
23
import s3.__about__ as about
34

45

src/kepler_model/cmd/cmd_plot.py

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
import os
2-
from kepler_model.util.prom_types import TIMESTAMP_COL
3-
from kepler_model.util import PowerSourceMap
42

5-
from kepler_model.util.train_types import FeatureGroup, ModelOutputType, weight_support_trainers
6-
from kepler_model.util.loader import load_metadata, load_scaler, get_model_group_path
7-
from kepler_model.train.profiler.node_type_index import NodeTypeIndexCollection
83
from kepler_model.estimate import load_model
4+
from kepler_model.train.profiler.node_type_index import NodeTypeIndexCollection
5+
from kepler_model.util import PowerSourceMap
6+
from kepler_model.util.loader import get_model_group_path, load_metadata, load_scaler
7+
from kepler_model.util.prom_types import TIMESTAMP_COL
8+
from kepler_model.util.train_types import FeatureGroup, ModelOutputType, weight_support_trainers
99

1010
markers = ["o", "s", "^", "v", "<", ">", "p", "P", "*", "x", "+", "|", "_"]
1111

@@ -18,14 +18,14 @@ def ts_plot(data, cols, title, output_folder, name, labels=None, subtitles=None,
1818

1919
sns.set(font_scale=1.2)
2020
fig, axes = plt.subplots(len(cols), 1, figsize=(plot_width, len(cols) * plot_height))
21-
for i in range(0, len(cols)):
21+
for i in range(len(cols)):
2222
if len(cols) == 1:
2323
ax = axes
2424
else:
2525
ax = axes[i]
2626
if isinstance(cols[i], list):
2727
# multiple lines
28-
for j in range(0, len(cols[i])):
28+
for j in range(len(cols[i])):
2929
sns.lineplot(data=data, x=TIMESTAMP_COL, y=cols[i][j], ax=ax, label=labels[j])
3030
ax.set_title(subtitles[i])
3131
else:
@@ -52,27 +52,26 @@ def feature_power_plot(data, model_id, output_type, energy_source, feature_cols,
5252
col_num = len(actual_power_cols)
5353
width = max(10, col_num * plot_width)
5454
fig, axes = plt.subplots(row_num, col_num, figsize=(width, row_num * plot_height))
55-
for xi in range(0, row_num):
55+
for xi in range(row_num):
5656
feature_col = feature_cols[xi]
57-
for yi in range(0, col_num):
57+
for yi in range(col_num):
5858
if row_num == 1:
5959
if col_num == 1:
6060
ax = axes
6161
else:
6262
ax = axes[yi]
63+
elif col_num == 1:
64+
ax = axes[xi]
6365
else:
64-
if col_num == 1:
65-
ax = axes[xi]
66-
else:
67-
ax = axes[xi][yi]
66+
ax = axes[xi][yi]
6867
sorted_data = data.sort_values(by=[feature_col])
6968
sns.scatterplot(data=sorted_data, x=feature_col, y=actual_power_cols[yi], ax=ax, label="actual")
7069
sns.lineplot(data=sorted_data, x=feature_col, y=predicted_power_cols[yi], ax=ax, label="predicted", color="C1")
7170
if xi == 0:
7271
ax.set_title(actual_power_cols[yi])
7372
if yi == 0:
7473
ax.set_ylabel("Power (W)")
75-
title = "{} {} prediction correlation \n by {}".format(energy_source, output_type, model_id)
74+
title = f"{energy_source} {output_type} prediction correlation \n by {model_id}"
7675
plt.suptitle(title, x=0.5, y=0.99)
7776
plt.tight_layout()
7877
filename = os.path.join(output_folder, name + ".png")
@@ -96,7 +95,7 @@ def summary_plot(args, energy_source, summary_df, output_folder, name):
9695
energy_components = PowerSourceMap[energy_source]
9796
col_num = len(energy_components)
9897
fig, axes = plt.subplots(col_num, 1, figsize=(plot_width, plot_height * col_num))
99-
for i in range(0, col_num):
98+
for i in range(col_num):
10099
component = energy_components[i]
101100
data = summary_df[(summary_df["energy_source"] == energy_source) & (summary_df["energy_component"] == component)]
102101
data = data.sort_values(by=["Feature Group", "MAE"])
@@ -111,7 +110,7 @@ def summary_plot(args, energy_source, summary_df, output_folder, name):
111110
if i < col_num - 1:
112111
ax.set_xlabel("")
113112
ax.legend(bbox_to_anchor=(1.05, 1.05))
114-
plt.suptitle("{} {} error".format(energy_source, args.output_type))
113+
plt.suptitle(f"{energy_source} {args.output_type} error")
115114
plt.tight_layout()
116115
filename = os.path.join(output_folder, name + ".png")
117116
fig.savefig(filename)
@@ -134,7 +133,7 @@ def metadata_plot(args, energy_source, metadata_df, output_folder, name):
134133
energy_components = PowerSourceMap[energy_source]
135134
col_num = len(energy_components)
136135
fig, axes = plt.subplots(col_num, 1, figsize=(plot_width, plot_height * col_num))
137-
for i in range(0, col_num):
136+
for i in range(col_num):
138137
component = energy_components[i]
139138
metadata_df = metadata_df.sort_values(by="feature_group")
140139
if col_num == 1:
@@ -149,7 +148,7 @@ def metadata_plot(args, energy_source, metadata_df, output_folder, name):
149148
if i < col_num - 1:
150149
ax.set_xlabel("")
151150
# ax.legend(bbox_to_anchor=(1.05, 1.05))
152-
plt.suptitle("Pipieline metadata of {} {}".format(energy_source.upper(), args.output_type))
151+
plt.suptitle(f"Pipieline metadata of {energy_source.upper()} {args.output_type}")
153152
plt.tight_layout()
154153
plt.legend(frameon=False)
155154
filename = os.path.join(output_folder, name + ".png")
@@ -174,7 +173,7 @@ def power_curve_plot(args, data_path, energy_source, output_folder, name):
174173

175174
def _get_model(model_toppath, trainer, model_node_type, output_type, name, energy_source):
176175
feature_group = FeatureGroup.BPFOnly
177-
model_name = "{}_{}".format(trainer, model_node_type)
176+
model_name = f"{trainer}_{model_node_type}"
178177
group_path = get_model_group_path(model_toppath, output_type, feature_group, energy_source, name)
179178
model_path = os.path.join(group_path, model_name)
180179
model = load_model(model_path)
@@ -204,11 +203,12 @@ def _load_all_models(model_toppath, output_type, name, node_types, energy_source
204203

205204

206205
def _plot_models(models, cpu_ms_max, energy_source, output_folder, name, max_plot=15, cpu_time_bin_num=10, sample_num=20):
207-
from kepler_model.util.train_types import BPF_FEATURES
208206
import numpy as np
209207
import pandas as pd
210208
import seaborn as sns
211209

210+
from kepler_model.util.train_types import BPF_FEATURES
211+
212212
sns.set_palette("Paired")
213213

214214
import matplotlib.pyplot as plt
@@ -253,7 +253,7 @@ def _plot_models(models, cpu_ms_max, energy_source, output_folder, name, max_plo
253253
ax = axes[axes_index // num_cols][axes_index % num_cols]
254254
node_type = data_with_prediction_index[0]
255255
data_with_prediction = data_with_prediction_index[1]
256-
sns.lineplot(data=data_with_prediction, x=main_feature_col, y=predicted_col[energy_source], label="type={}".format(node_type), marker=markers[index], ax=ax)
256+
sns.lineplot(data=data_with_prediction, x=main_feature_col, y=predicted_col[energy_source], label=f"type={node_type}", marker=markers[index], ax=ax)
257257
index += 1
258258
index = index % len(markers)
259259
if index % max_plot == 0:

src/kepler_model/cmd/cmd_util.py

Lines changed: 30 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,30 @@
1-
import os
21
import datetime
3-
import pandas as pd
2+
import os
43

4+
import pandas as pd
55

6-
from kepler_model.util.prom_types import node_info_column, prom_responses_to_results, SOURCE_COL, energy_component_to_query
7-
from kepler_model.util.train_types import ModelOutputType, FeatureGroup, PowerSourceMap
8-
from kepler_model.util.loader import load_json, get_pipeline_path, default_node_type
6+
from kepler_model.util.loader import default_node_type, get_pipeline_path, load_json
7+
from kepler_model.util.prom_types import (
8+
SOURCE_COL,
9+
energy_component_to_query,
10+
node_info_column,
11+
prom_responses_to_results,
12+
)
913
from kepler_model.util.saver import assure_path, save_csv
14+
from kepler_model.util.train_types import FeatureGroup, ModelOutputType, PowerSourceMap
1015

1116
UTC_OFFSET_TIMEDELTA = datetime.datetime.utcnow() - datetime.datetime.now()
1217

1318

1419
def print_file_to_stdout(data_path, args):
1520
file_path = os.path.join(data_path, args.output)
1621
try:
17-
with open(file_path, "r") as file:
22+
with open(file_path) as file:
1823
contents = file.read()
1924
print(contents)
2025
except FileNotFoundError:
2126
print(f"Error: Output '{file_path}' not found.")
22-
except IOError:
27+
except OSError:
2328
print(f"Error: Unable to read output '{file_path}'.")
2429

2530

@@ -41,7 +46,7 @@ def extract_time(data_path, benchmark_filename):
4146

4247
def save_query_results(data_path, output_filename, query_response):
4348
query_results = prom_responses_to_results(query_response)
44-
save_path = os.path.join(data_path, "{}_csv".format(output_filename))
49+
save_path = os.path.join(data_path, f"{output_filename}_csv")
4550
assure_path(save_path)
4651
for query, data in query_results.items():
4752
save_csv(save_path, query, data)
@@ -80,13 +85,13 @@ def summary_validation(validate_df):
8085
no_data_df = target_df[target_df["count"] == 0]
8186
zero_data_df = target_df[target_df[">0"] == 0]
8287
valid_df = target_df[target_df[">0"] > 0]
83-
print("==== {} ====".format(metric))
88+
print(f"==== {metric} ====")
8489
if len(no_data_df) > 0:
8590
print("{} pods: \tNo data for {}".format(len(no_data_df), pd.unique(no_data_df["scenarioID"])))
8691
if len(zero_data_df) > 0:
8792
print("{} pods: \tZero data for {}".format(len(zero_data_df), pd.unique(zero_data_df["scenarioID"])))
8893

89-
print("{} pods: \tValid\n".format(len(valid_df)))
94+
print(f"{len(valid_df)} pods: \tValid\n")
9095
print("Valid data points:")
9196
print("Empty" if len(valid_df[">0"]) == 0 else valid_df.groupby(["scenarioID"]).sum()[[">0"]])
9297
for metric, query in metric_to_validate_power.items():
@@ -246,7 +251,7 @@ def check_ot_fg(args, valid_fg):
246251
try:
247252
fg = FeatureGroup[args.feature_group]
248253
if args.feature_group not in valid_fg_name_list:
249-
print("feature group: {} is not available in your data. please choose from the following list: {}".format(args.feature_group, valid_fg_name_list))
254+
print(f"feature group: {args.feature_group} is not available in your data. please choose from the following list: {valid_fg_name_list}")
250255
exit()
251256
except KeyError:
252257
print("invalid feature group: {}. valid feature group are {}.".format((args.feature_group, [fg.name for fg in valid_fg])))
@@ -268,14 +273,21 @@ def assert_train(trainer, data, energy_components):
268273
try:
269274
output = trainer.predict(node_type, component, X_values)
270275
if output is not None:
271-
assert len(output) == len(X_values), "length of predicted values != features ({}!={})".format(len(output), len(X_values))
276+
assert len(output) == len(X_values), f"length of predicted values != features ({len(output)}!={len(X_values)})"
272277
except sklearn.exceptions.NotFittedError:
273278
pass
274279

275280

276281
def get_isolator(data_path, isolator, profile, pipeline_name, target_hints, bg_hints, abs_pipeline_name, replace_node_type=default_node_type):
277282
pipeline_path = get_pipeline_path(data_path, pipeline_name=pipeline_name)
278-
from kepler_model.train import MinIdleIsolator, NoneIsolator, DefaultProfiler, ProfileBackgroundIsolator, TrainIsolator, generate_profiles
283+
from kepler_model.train import (
284+
DefaultProfiler,
285+
MinIdleIsolator,
286+
NoneIsolator,
287+
ProfileBackgroundIsolator,
288+
TrainIsolator,
289+
generate_profiles,
290+
)
279291

280292
supported_isolator = {
281293
MinIdleIsolator().get_name(): MinIdleIsolator(),
@@ -306,13 +318,12 @@ def get_isolator(data_path, isolator, profile, pipeline_name, target_hints, bg_h
306318
if abs_pipeline_name != "":
307319
trainer_isolator = TrainIsolator(idle_data=idle_data, profiler=DefaultProfiler, target_hints=target_hints, bg_hints=bg_hints, abs_pipeline_name=abs_pipeline_name)
308320
supported_isolator[trainer_isolator.get_name()] = trainer_isolator
309-
else:
310-
if abs_pipeline_name != "":
311-
trainer_isolator = TrainIsolator(target_hints=target_hints, bg_hints=bg_hints, abs_pipeline_name=abs_pipeline_name)
312-
supported_isolator[trainer_isolator.get_name()] = trainer_isolator
321+
elif abs_pipeline_name != "":
322+
trainer_isolator = TrainIsolator(target_hints=target_hints, bg_hints=bg_hints, abs_pipeline_name=abs_pipeline_name)
323+
supported_isolator[trainer_isolator.get_name()] = trainer_isolator
313324

314325
if isolator not in supported_isolator:
315-
print("isolator {} is not supported. supported isolator: {}".format(isolator, supported_isolator.keys()))
326+
print(f"isolator {isolator} is not supported. supported isolator: {supported_isolator.keys()}")
316327
return None
317328
return supported_isolator[isolator]
318329

@@ -322,7 +333,7 @@ def get_extractor(extractor):
322333

323334
supported_extractor = {DefaultExtractor().get_name(): DefaultExtractor(), SmoothExtractor().get_name(): SmoothExtractor()}
324335
if extractor not in supported_extractor:
325-
print("extractor {} is not supported. supported extractor: {}".format(extractor, supported_extractor.keys()))
336+
print(f"extractor {extractor} is not supported. supported extractor: {supported_extractor.keys()}")
326337
return None
327338
return supported_extractor[extractor]
328339

0 commit comments

Comments
 (0)