Skip to content

Commit c446289

Browse files
committed
formating evaluation.py
1 parent 056f817 commit c446289

File tree

1 file changed

+21
-13
lines changed

1 file changed

+21
-13
lines changed

supporting-blog-content/github-assistant/evaluation.py

Lines changed: 21 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,9 @@
2020
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
2121
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
2222

23-
parser = argparse.ArgumentParser(description="Process documents and questions for evaluation.")
23+
parser = argparse.ArgumentParser(
24+
description="Process documents and questions for evaluation."
25+
)
2426
parser.add_argument("--num_documents",
2527
type=int,
2628
default=None,
@@ -59,7 +61,7 @@
5961

6062
print(f"Number of documents loaded: {len(documents)}")
6163

62-
llm = OpenAI(model="gpt-4o", request_timeout=120)
64+
llm = OpenAI(model="gpt-4o", request_timeout=120)
6365

6466
data_generator = DatasetGenerator.from_documents(documents, llm=llm)
6567

@@ -72,13 +74,13 @@
7274
eval_questions_list = [q for q in eval_questions_list if q.strip()]
7375

7476
if args.skip_questions > 0:
75-
eval_questions_list = eval_questions_list[args.skip_questions:]
77+
eval_questions_list = eval_questions_list[args.skip_questions :]
7678

7779
if args.num_questions is not None:
7880
if args.process_last_questions:
79-
eval_questions_list = eval_questions_list[-args.num_questions:]
81+
eval_questions_list = eval_questions_list[-args.num_questions :]
8082
else:
81-
eval_questions_list = eval_questions_list[:args.num_questions]
83+
eval_questions_list = eval_questions_list[: args.num_questions]
8284

8385
print("\All available questions generated:")
8486
for idx, q in enumerate(eval_questions):
@@ -88,7 +90,9 @@
8890
for idx, q in enumerate(eval_questions_list, start=1):
8991
print(f"{idx}. {q}")
9092
except ReadTimeout as e:
91-
print("Request to Ollama timed out during question generation. Please check the server or increase the timeout duration.")
93+
print(
94+
"Request to Ollama timed out during question generation. Please check the server or increase the timeout duration."
95+
)
9296
traceback.print_exc()
9397
sys.exit(1)
9498
except Exception as e:
@@ -150,7 +154,11 @@ def wrap_text(text, width=50):
150154
eval_df = pd.DataFrame([eval_data])
151155

152156
print("\nEvaluation Result:")
153-
print(tabulate(eval_df, headers="keys", tablefmt="grid", showindex=False, stralign="left"))
157+
print(
158+
tabulate(
159+
eval_df, headers="keys", tablefmt="grid", showindex=False, stralign="left"
160+
)
161+
)
154162

155163
query_engine = vector_index.as_query_engine(llm=llm)
156164

@@ -161,17 +169,17 @@ def wrap_text(text, width=50):
161169
eval_result_relevancy = evaluator_relevancy.evaluate_response(
162170
query=question, response=response_vector
163171
)
164-
eval_result_faith = evaluator_faith.evaluate_response(
165-
response=response_vector
166-
)
172+
eval_result_faith = evaluator_faith.evaluate_response(response=response_vector)
167173

168174
print(f"\nProcessing Question {idx} of {total_questions}:")
169-
display_eval_df(question, response_vector, eval_result_relevancy, eval_result_faith)
175+
display_eval_df(
176+
question, response_vector, eval_result_relevancy, eval_result_faith
177+
)
170178
except ReadTimeout as e:
171179
print(f"Request to OpenAI timed out while processing question {idx}.")
172180
traceback.print_exc()
173-
continue
181+
continue
174182
except Exception as e:
175183
print(f"An error occurred while processing question {idx}: {e}")
176184
traceback.print_exc()
177-
continue
185+
continue

0 commit comments

Comments
 (0)