Skip to content

Commit 8e6b2be

Browse files
committed
Merge commit '54659c52c7f863d0aa886ff1f12099cf1e6b8b78' into minh/s2s_context_summary
2 parents 4c10b2e + 54659c5 commit 8e6b2be

37 files changed

+4974
-6
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,7 @@ examples/fine-tuned_qa/local_cache/*
140140

141141
# PyCharm files
142142
.idea/
143+
.cursorignore
143144

144145
# VS Code files
145146
.vscode/

examples/Speech_transcription_methods.ipynb

Lines changed: 672 additions & 0 deletions
Large diffs are not rendered by default.

examples/agents_sdk/parallel_agents.ipynb

Lines changed: 340 additions & 0 deletions
Large diffs are not rendered by default.
3.1 MB
Binary file not shown.
608 KB
Binary file not shown.
322 KB
Binary file not shown.

examples/fine-tuned_qa/ft_retrieval_augmented_generation_qdrant.ipynb

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,8 @@
1111
"\n",
1212
"We will also be integrating Qdrant and Few-Shot Learning to boost the model's performance and reduce hallucinations. This could serve as a practical guide for ML practitioners, data scientists, and AI Engineers interested in leveraging the power of OpenAI models for specific use-cases. 🤩\n",
1313
"\n",
14+
"Note: This notebook uses the gpt-3.5-turbo model. Fine-tuning on the SQuAD dataset with this setup yields only minimal gains for more advanced models such as gpt-4o or gpt-4.1. As such, this notebook is primarily intended as a guide for fine-tuning workflows and retrieval-augmented generation (RAG) practices\n",
15+
"\n",
1416
"## Why should you read this blog?\n",
1517
"\n",
1618
"You want to learn how to \n",
@@ -559,7 +561,7 @@
559561
"\n",
560562
" def create_openai_file(self):\n",
561563
" self.file_object = client.files.create(\n",
562-
" file=open(self.training_file_path, \"r\"),\n",
564+
" file=open(self.training_file_path, \"rb\"),\n",
563565
" purpose=\"fine-tune\",\n",
564566
" )\n",
565567
"\n",
@@ -571,19 +573,22 @@
571573
"\n",
572574
" def create_fine_tuning_job(self):\n",
573575
" self.fine_tuning_job = client.fine_tuning.jobs.create(\n",
574-
" training_file=self.file_object[\"id\"],\n",
576+
" training_file=self.file_object.id,\n",
575577
" model=self.model_name,\n",
576578
" suffix=self.suffix,\n",
577579
" )\n",
578580
"\n",
579581
" def wait_for_fine_tuning(self, sleep_time=45):\n",
580-
" while self.fine_tuning_job.status != 'succeeded':\n",
582+
" while True:\n",
583+
" # Retrieve the latest fine-tuning job status\n",
584+
" self.fine_tuning_job = client.fine_tuning.jobs.retrieve(self.fine_tuning_job.id)\n",
585+
" print(\"Job Status:\", self.fine_tuning_job.status)\n",
586+
" if self.fine_tuning_job.status in {'succeeded', 'failed', 'cancelled'}:\n",
587+
" break\n",
581588
" time.sleep(sleep_time)\n",
582-
" self.fine_tuning_job.refresh()\n",
583-
" print(\"Job Status: \", self.fine_tuning_job.status)\n",
584589
"\n",
585590
" def retrieve_fine_tuned_model(self):\n",
586-
" self.model_id = client.fine_tuning.jobs.retrieve(self.fine_tuning_job[\"id\"]).fine_tuned_model\n",
591+
" self.model_id = client.fine_tuning.jobs.retrieve(self.fine_tuning_job.id).fine_tuned_model\n",
587592
" return self.model_id\n",
588593
"\n",
589594
" def fine_tune_model(self):\n",
15.2 KB
Loading
40.8 KB
Loading
16.7 KB
Loading

0 commit comments

Comments
 (0)