Skip to content

Commit 86f6276

Browse files
committed
Update dataset name from htahir1 to zenml namespace in configuration files
1 parent aecfe12 commit 86f6276

File tree

3 files changed

+6
-6
lines changed

3 files changed

+6
-6
lines changed

llm-finetuning/configs/finetune_aws.yaml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ settings:
66
model:
77
name: "peft-lora-zencoder15B-personal-copilot"
88
description: "Fine-tuned `starcoder15B-personal-copilot-A100-40GB-colab` for ZenML pipelines."
9-
audience: "Data Scientists / ML Engineers"
9+
audience: "Data Scientists / ML Engineers"
1010
use_cases: "Code Generation for ZenML MLOps pipelines."
1111
limitations: "There is no guarantee that this model will work for your use case. Please test it thoroughly before using it in production."
1212
trade_offs: "This model is optimized for ZenML pipelines. It is not optimized for other libraries."
@@ -23,13 +23,13 @@ steps:
2323
step_operator: sagemaker-eu
2424
settings:
2525
step_operator.sagemaker:
26-
estimator_args:
26+
estimator_args:
2727
instance_type: "ml.p4d.24xlarge"
2828

2929
parameters:
3030
args:
3131
model_path: "bigcode/starcoder"
32-
dataset_name: "htahir1/zenml-codegen-v1"
32+
dataset_name: "zenml/zenml-codegen-v1"
3333
subset: "data"
3434
data_column: "content"
3535
split: "train"

llm-finetuning/configs/finetune_gcp.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ settings:
66
model:
77
name: "peft-lora-zencoder15B-personal-copilot"
88
description: "Fine-tuned `starcoder15B-personal-copilot-A100-40GB-colab` for ZenML pipelines."
9-
audience: "Data Scientists / ML Engineers"
9+
audience: "Data Scientists / ML Engineers"
1010
use_cases: "Code Generation for ZenML MLOps pipelines."
1111
limitations: "There is no guarantee that this model will work for your use case. Please test it thoroughly before using it in production."
1212
trade_offs: "This model is optimized for ZenML pipelines. It is not optimized for other libraries."
@@ -29,7 +29,7 @@ steps:
2929
parameters:
3030
args:
3131
model_path: "bigcode/starcoder"
32-
dataset_name: "htahir1/zenml-codegen-v1"
32+
dataset_name: "zenml/zenml-codegen-v1"
3333
subset: "data"
3434
data_column: "content"
3535
split: "train"

llm-finetuning/configs/generate_code_dataset.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ settings:
1010

1111
# pipeline configuration
1212
parameters:
13-
dataset_id: htahir1/zenml-codegen-v1
13+
dataset_id: zenml/zenml-codegen-v1
1414

1515
steps:
1616
mirror_repositories:

0 commit comments

Comments
 (0)