Skip to content

Commit a7b30fc

Browse files
committed
Make run.py configurable with config file
1 parent 63575b7 commit a7b30fc

File tree

1 file changed

+36
-18
lines changed

1 file changed

+36
-18
lines changed

llm-complete-guide/run.py

Lines changed: 36 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,12 @@
147147
default=False,
148148
help="Generate chunks for Hugging Face dataset",
149149
)
150+
@click.option(
151+
"--config",
152+
"config",
153+
default=None,
154+
help="Generate chunks for Hugging Face dataset",
155+
)
150156
def main(
151157
rag: bool = False,
152158
deploy: bool = False,
@@ -159,6 +165,7 @@ def main(
159165
argilla: bool = False,
160166
reranked: bool = False,
161167
chunks: bool = False,
168+
config: str = None,
162169
):
163170
"""Main entry point for the pipeline execution.
164171
@@ -170,11 +177,11 @@ def main(
170177
model (str): The model to use for the completion. Default is OPENAI_MODEL.
171178
no_cache (bool): If `True`, cache will be disabled.
172179
synthetic (bool): If `True`, the synthetic data pipeline will be run.
173-
local (bool): If `True`, the local LLM via Ollama will be used.
174180
embeddings (bool): If `True`, the embeddings will be fine-tuned.
175181
argilla (bool): If `True`, the Argilla annotations will be used.
176182
chunks (bool): If `True`, the chunks pipeline will be run.
177183
reranked (bool): If `True`, rerankers will be used
184+
config (str: Path to config
178185
"""
179186
pipeline_args = {"enable_cache": not no_cache}
180187
embeddings_finetune_args = {
@@ -196,13 +203,21 @@ def main(
196203
md = Markdown(response)
197204
console.print(md)
198205

199-
print(f"Running Pipeline with pipeline args: {pipeline_args}")
200-
if rag:
206+
config_path = None
207+
if config:
201208
config_path = os.path.join(
202209
os.path.dirname(os.path.realpath(__file__)),
203210
"configs",
204211
"rag_local_dev.yaml",
205212
)
213+
214+
if rag:
215+
if not config_path:
216+
config_path = os.path.join(
217+
os.path.dirname(os.path.realpath(__file__)),
218+
"configs",
219+
"rag_local_dev.yaml",
220+
)
206221
llm_basic_rag.with_options(config_path=config_path, **pipeline_args)()
207222
if deploy:
208223
rag_deployment.with_options(
@@ -211,28 +226,31 @@ def main(
211226
if deploy:
212227
rag_deployment.with_options(**pipeline_args)()
213228
if evaluation:
214-
config_path = os.path.join(
215-
os.path.dirname(os.path.realpath(__file__)),
216-
"configs",
217-
"rag_eval.yaml",
218-
)
229+
if not config_path:
230+
config_path = os.path.join(
231+
os.path.dirname(os.path.realpath(__file__)),
232+
"configs",
233+
"rag_eval.yaml",
234+
)
219235
pipeline_args["enable_cache"] = False
220236
llm_eval.with_options(config_path=config_path)()
221237
if synthetic:
222-
config_path = os.path.join(
223-
os.path.dirname(os.path.realpath(__file__)),
224-
"configs",
225-
"synthetic.yaml",
226-
)
238+
if not config_path:
239+
config_path = os.path.join(
240+
os.path.dirname(os.path.realpath(__file__)),
241+
"configs",
242+
"synthetic.yaml",
243+
)
227244
generate_synthetic_data.with_options(
228245
config_path=config_path, **pipeline_args
229246
)()
230247
if embeddings:
231-
config_path = os.path.join(
232-
os.path.dirname(os.path.realpath(__file__)),
233-
"configs",
234-
"embeddings.yaml",
235-
)
248+
if not config_path:
249+
config_path = os.path.join(
250+
os.path.dirname(os.path.realpath(__file__)),
251+
"configs",
252+
"embeddings.yaml",
253+
)
236254
finetune_embeddings.with_options(
237255
config_path=config_path, **embeddings_finetune_args
238256
)()

0 commit comments

Comments
 (0)