Skip to content

Commit 50c3cc4

Browse files
committed
keep only required configs
1 parent 106e726 commit 50c3cc4

File tree

1 file changed

+0
-31
lines changed

1 file changed

+0
-31
lines changed

docs/transformers/LoRA/GPT2.py

Lines changed: 0 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -4,43 +4,12 @@
44

55
tokenizer = AutoTokenizer.from_pretrained("gpt2")
66

7-
# config from GPT
87
config = {
9-
"_name_or_path": "gpt2",
10-
"activation_function": "gelu_new",
11-
"architectures": [
12-
"GPT2LMHeadModel"
13-
],
14-
"attn_pdrop": 0.1,
15-
"bos_token_id": 50256,
16-
"embd_pdrop": 0.1,
17-
"eos_token_id": 0,
18-
"initializer_range": 0.02,
198
"layer_norm_epsilon": 1e-05,
20-
"model_type": "gpt2",
21-
"n_ctx": 1024,
229
"n_embd": 768,
2310
"n_head": 12,
24-
"n_inner": None,
2511
"n_layer": 12,
2612
"n_positions": 1024,
27-
"reorder_and_upcast_attn": False,
28-
"resid_pdrop": 0.1,
29-
"scale_attn_by_inverse_layer_idx": False,
30-
"scale_attn_weights": True,
31-
"summary_activation": None,
32-
"summary_first_dropout": 0.1,
33-
"summary_proj_to_labels": True,
34-
"summary_type": "cls_index",
35-
"summary_use_proj": True,
36-
"task_specific_params": {
37-
"text-generation": {
38-
"do_sample": True,
39-
"max_length": 50
40-
}
41-
},
42-
"transformers_version": "4.42.4",
43-
"use_cache": True,
4413
"vocab_size": 50257
4514
}
4615

0 commit comments

Comments
 (0)