Skip to content

Commit 282cdb8

Browse files
committed
lint
1 parent 70964fc commit 282cdb8

23 files changed

+74
-57
lines changed

eval/chat_benchmarks/HumanEval/eval_instruct.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,9 @@ def generate_responses(self, model: LM) -> Dict[str, Any]:
112112
self.logger.info("Generating responses for Human Eval...")
113113
outputs = self.compute(model, all_instances)
114114

115-
is_main_process = model.accelerator.process_index == 0 if hasattr(model, 'accelerator') else model.world_size <= 1
115+
is_main_process = (
116+
model.accelerator.process_index == 0 if hasattr(model, "accelerator") else model.world_size <= 1
117+
)
116118
if not is_main_process:
117119
continue
118120

eval/chat_benchmarks/IFEval/eval_instruct.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,9 @@ def generate_responses(self, model: LM) -> Dict[str, Any]:
115115
self.logger.info("Generating responses...")
116116
outputs = self.compute(model, all_instances)
117117

118-
is_main_process = model.accelerator.process_index == 0 if hasattr(model, 'accelerator') else model.world_size <= 1
118+
is_main_process = (
119+
model.accelerator.process_index == 0 if hasattr(model, "accelerator") else model.world_size <= 1
120+
)
119121
if not is_main_process:
120122
return None
121123

eval/chat_benchmarks/MBPP/eval_instruct.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,9 @@ def generate_responses(self, model: LM) -> Dict[str, Any]:
161161
outputs = self.compute(model, all_instances)
162162

163163
# Return None early for non-primary ranks
164-
is_main_process = model.accelerator.process_index == 0 if hasattr(model, 'accelerator') else model.world_size <= 1
164+
is_main_process = (
165+
model.accelerator.process_index == 0 if hasattr(model, "accelerator") else model.world_size <= 1
166+
)
165167
if not is_main_process:
166168
return None
167169

eval/chat_benchmarks/MTBench/eval_instruct.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,9 @@ def get_model_answers(self, model: LM, model_id: str, questions: List[Dict[str,
151151
all_convs[q_idx].append({"role": "assistant", "content": output})
152152
all_choices[q_idx]["turns"].append(output)
153153

154-
is_main_process = model.accelerator.process_index == 0 if hasattr(model, 'accelerator') else model.world_size <= 1
154+
is_main_process = (
155+
model.accelerator.process_index == 0 if hasattr(model, "accelerator") else model.world_size <= 1
156+
)
155157
if not is_main_process:
156158
continue
157159

eval/chat_benchmarks/MTBench/fastchat/data/clean_sharegpt.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
Usage:
66
python3 -m fastchat.data.clean_sharegpt --in sharegpt_html.json --out sharegpt_clean.json
77
"""
8+
89
import argparse
910
from concurrent.futures import ProcessPoolExecutor
1011
import json
@@ -19,9 +20,7 @@
1920

2021
div_pattern = re.compile("<div.*?>")
2122
span_pattern = re.compile("<span.*?>")
22-
code_lang_pattern = re.compile(
23-
"```\s*" + "(.*?)" + "(?:Copy code)+" + "(.+?)" + "\s*?```", re.DOTALL
24-
)
23+
code_lang_pattern = re.compile("```\s*" + "(.*?)" + "(?:Copy code)+" + "(.+?)" + "\s*?```", re.DOTALL)
2524
code_lang_format = "```\g<1>\n\g<2>\n```"
2625
regenerate_pattern = re.compile("\d+ / \d+")
2726
copy_chars_pattern = re.compile("Copy\d+ chars / \d+ words")
@@ -155,9 +154,7 @@ def clean_html_all(content, begin, end):
155154
content = content[begin:end]
156155
processed = []
157156
with ProcessPoolExecutor() as executor:
158-
for result in tqdm(
159-
executor.map(clean_html_one_sample, content), total=len(content)
160-
):
157+
for result in tqdm(executor.map(clean_html_one_sample, content), total=len(content)):
161158
processed.append(result)
162159

163160
visited = {}

eval/chat_benchmarks/MTBench/fastchat/data/extract_gpt4_only.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
44
Usage: python3 -m fastchat.data.extract_gpt4_only --in sharegpt.json
55
"""
6+
67
import argparse
78
import json
89

eval/chat_benchmarks/MTBench/fastchat/data/extract_single_round.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
44
Usage: python3 -m fastchat.data.extract_single_round --in sharegpt.json
55
"""
6+
67
import argparse
78
import json
89

eval/chat_benchmarks/MTBench/fastchat/data/filter_wrong_format.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
python3 -m fastchat.data.filter_wrong_format --in input.json --out output.json
66
77
"""
8+
89
import argparse
910
import json
1011
import re

eval/chat_benchmarks/MTBench/fastchat/data/get_stats.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -26,9 +26,7 @@ def tokenize_one_sample(c):
2626
def tokenize_dataset(content):
2727
processed = []
2828
with ProcessPoolExecutor() as executor:
29-
for result in tqdm(
30-
executor.map(tokenize_one_sample, content), total=len(content)
31-
):
29+
for result in tqdm(executor.map(tokenize_one_sample, content), total=len(content)):
3230
processed.append(result)
3331

3432
return processed
@@ -59,9 +57,7 @@ def compute_stats(content):
5957
if __name__ == "__main__":
6058
parser = argparse.ArgumentParser()
6159
parser.add_argument("--in-file", type=str)
62-
parser.add_argument(
63-
"--model-name-or-path", type=str, default="meta-llama/Llama-2-7b-chat-hf"
64-
)
60+
parser.add_argument("--model-name-or-path", type=str, default="meta-llama/Llama-2-7b-chat-hf")
6561
args = parser.parse_args()
6662

6763
content = json.load(open(args.in_file, "r"))

eval/chat_benchmarks/MTBench/fastchat/data/hardcoded_questions.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
"""
22
Hardcoded question and answers.
33
"""
4+
45
import json
56

67

0 commit comments

Comments
 (0)