Skip to content

Commit c05963d

Browse files
fix: fix lint problems
1 parent 3360801 commit c05963d

File tree

3 files changed

+12
-11
lines changed

3 files changed

+12
-11
lines changed

graphgen/models/evaluate/reward_evaluator.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ class RewardEvaluator:
1414
results: list[float] = None
1515

1616
def __post_init__(self):
17+
import torch
1718
self.num_gpus = torch.cuda.device_count()
1819

1920
@staticmethod

graphgen/version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,4 +25,4 @@ def parse_version_info(version_str: str) -> Tuple:
2525
return tuple(_version_info)
2626

2727

28-
version_info = parse_version_info(__version__)
28+
version_info = parse_version_info(__version__)

webui/app.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ def init_graph_gen(config: dict, env: dict) -> GraphGen:
5757

5858
return graph_gen
5959

60-
60+
# pylint: disable=too-many-statements
6161
def run_graphgen(*arguments: list, progress=gr.Progress()):
6262
# Unpack arguments
6363
config = {
@@ -241,7 +241,7 @@ def run_graphgen(*arguments: list, progress=gr.Progress()):
241241
value="Qwen/Qwen2.5-7B-Instruct",
242242
info=_("Trainee Model Info"),
243243
interactive=True,
244-
visible=(if_trainee_model.value == True))
244+
visible=if_trainee_model.value is True)
245245

246246
with gr.Accordion(label=_("Generation Config"), open=False):
247247
chunk_size = gr.Slider(label="Chunk Size",
@@ -261,7 +261,7 @@ def run_graphgen(*arguments: list, progress=gr.Progress()):
261261
value=2,
262262
minimum=1,
263263
interactive=True,
264-
visible=(if_trainee_model.value == True))
264+
visible=if_trainee_model.value is True)
265265
bidirectional = gr.Checkbox(label="Bidirectional",
266266
value=True,
267267
interactive=True)
@@ -277,7 +277,7 @@ def run_graphgen(*arguments: list, progress=gr.Progress()):
277277
label="Max Extra Edges",
278278
step=1,
279279
interactive=True,
280-
visible=(expand_method.value == "max_width"))
280+
visible=expand_method.value == "max_width")
281281
max_tokens = gr.Slider(minimum=64,
282282
maximum=1024,
283283
value=256,
@@ -298,7 +298,7 @@ def run_graphgen(*arguments: list, progress=gr.Progress()):
298298
label="Edge Sampling",
299299
value="max_loss",
300300
interactive=True,
301-
visible=(if_trainee_model.value == True))
301+
visible=if_trainee_model.value is True)
302302
isolated_node_strategy = gr.Radio(choices=["add", "ignore"],
303303
label="Isolated Node Strategy",
304304
value="ignore",
@@ -355,15 +355,15 @@ def run_graphgen(*arguments: list, progress=gr.Progress()):
355355
outputs=[])
356356

357357
expand_method.change(lambda method:
358-
(gr.update(visible=(method == "max_width")),
359-
gr.update(visible=(method != "max_width"))),
358+
(gr.update(visible=method == "max_width"),
359+
gr.update(visible=method != "max_width")),
360360
inputs=expand_method,
361361
outputs=[max_extra_edges, max_tokens])
362362

363363
if_trainee_model.change(
364-
lambda use_trainee: (gr.update(visible=(use_trainee == True)),
365-
gr.update(visible=(use_trainee == True)),
366-
gr.update(visible=(use_trainee == True))),
364+
lambda use_trainee: (gr.update(visible=use_trainee is True),
365+
gr.update(visible=use_trainee is True),
366+
gr.update(visible=use_trainee is True)),
367367
inputs=if_trainee_model,
368368
outputs=[trainee_model, quiz_samples, edge_sampling])
369369

0 commit comments

Comments
 (0)