Skip to content

Commit 0e5f273

Browse files
committed
remove useless parameters from training config
1 parent b10e562 commit 0e5f273

File tree

6 files changed

+10
-12
lines changed

6 files changed

+10
-12
lines changed

helpers/model_init_scripts/init_dummy_model.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,5 +60,8 @@
6060
model.generation_config.max_length = int(30 * model.audio_encoder.config.frame_rate)
6161
model.generation_config.do_sample = True # True
6262
model.generation_config.guidance_scale = 1 # 3.0
63+
64+
model.config.pad_token_id = encodec_vocab_size
65+
model.config.decoder_start_token_id = encodec_vocab_size+1
6366

6467
model.save_pretrained(os.path.join(args.save_directory, "tiny-model"))

helpers/model_init_scripts/init_model_300M.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,5 +60,8 @@
6060
model.generation_config.max_length = int(30 * model.audio_encoder.config.frame_rate)
6161
model.generation_config.do_sample = True # True
6262
model.generation_config.guidance_scale = 1 # 3.0
63+
64+
model.config.pad_token_id = encodec_vocab_size
65+
model.config.decoder_start_token_id = encodec_vocab_size+1
6366

6467
model.save_pretrained(os.path.join(args.save_directory, "parler-tts-untrained-300M/"))

helpers/training_configs/librispeech_tts_r_300M_dummy.json

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -37,9 +37,6 @@
3737

3838
"preprocessing_num_workers": 8,
3939

40-
"pad_token_id": 1024,
41-
"decoder_start_token_id": 1025,
42-
4340
"do_train": true,
4441
"num_train_epochs": 50,
4542
"gradient_accumulation_steps": 1,

helpers/training_configs/starting_point_0.01.json

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -39,9 +39,6 @@
3939

4040
"preprocessing_num_workers": 8,
4141

42-
"pad_token_id": 1024,
43-
"decoder_start_token_id": 1025,
44-
4542
"do_train": true,
4643
"num_train_epochs": 40,
4744
"gradient_accumulation_steps": 1,

training/TRAINING.md

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -124,8 +124,6 @@ accelerate launch ./training/run_parler_tts_training.py \
124124
--add_audio_samples_to_wandb true \
125125
--id_column_name "id" \
126126
--preprocessing_num_workers 8 \
127-
--pad_token_id 1024 \
128-
--decoder_start_token_id 1025 \
129127
--do_train true \
130128
--num_train_epochs 50 \
131129
--gradient_accumulation_steps 1 \

training/run_parler_tts_training.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -235,11 +235,11 @@ class ModelArguments:
235235
metadata={"help": "Whether to freeze the text encoder."},
236236
)
237237
do_sample: bool = field(
238-
default=False,
238+
default=True,
239239
metadata={"help": "Whether to do sampling or greedy decoding."},
240240
)
241241
temperature: float = field(
242-
default=0.4,
242+
default=1.0,
243243
metadata={"help": "Temperature if sampling."},
244244
)
245245
max_length: int = field(
@@ -1018,10 +1018,10 @@ def main():
10181018
{
10191019
"pad_token_id": model_args.pad_token_id
10201020
if model_args.pad_token_id is not None
1021-
else model.config.pad_token_id,
1021+
else config.pad_token_id,
10221022
"decoder_start_token_id": model_args.decoder_start_token_id
10231023
if model_args.decoder_start_token_id is not None
1024-
else model.config.decoder_start_token_id,
1024+
else config.decoder_start_token_id,
10251025
}
10261026
)
10271027

0 commit comments

Comments
 (0)