微调后调用微调后报错
#926
Replies: 1 comment 4 replies
-
我和你一样情况,但报的是下面的错误: |
Beta Was this translation helpful? Give feedback.
4 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.
-
/mnt/workspace/ChatGLM3/finetune_demo> python inference_hf.py output/checkpoint-3000/ --prompt "你是谁?"

Loading checkpoint shards: 100%|██████████████████████████████████████████████████████████| 7/7 [00:04<00:00, 1.69it/s]
Setting eos_token is not supported, use the default one.
Setting pad_token is not supported, use the default one.
Setting unk_token is not supported, use the default one.
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮
│ /mnt/workspace/ChatGLM3/finetune_demo/inference_hf.py:50 in main │
│ │
│ 47 │ │ model_dir: Annotated[str, typer.Argument(help='')], │
│ 48 │ │ prompt: Annotated[str, typer.Option(help='')], │
│ 49 ): │
│ ❱ 50 │ model, tokenizer = load_model_and_tokenizer(model_dir) │
│ 51 │ response, _ = model.chat(tokenizer, prompt) │
│ 52 │ print(response) │
│ 53 │
│ │
│ /mnt/workspace/ChatGLM3/finetune_demo/inference_hf.py:30 in load_model_and_tokenizer │
│ │
│ 27 def load_model_and_tokenizer(model_dir: Union[str, Path]) -> tuple[ModelType, TokenizerT │
│ 28 │ model_dir = _resolve_path(model_dir) │
│ 29 │ if (model_dir / 'adapter_config.json').exists(): │
│ ❱ 30 │ │ model = AutoPeftModelForCausalLM.from_pretrained( │
│ 31 │ │ │ model_dir, trust_remote_code=True, device_map='auto' │
│ 32 │ │ ) │
│ 33 │ │ tokenizer_dir = model.peft_config['default'].base_model_name_or_path │
│ │
│ /home/pai/lib/python3.11/site-packages/peft/auto.py:126 in from_pretrained │
│ │
│ 123 │ │ │ tokenizer = AutoTokenizer.from_pretrained( │
│ 124 │ │ │ │ pretrained_model_name_or_path, trust_remote_code=kwargs.get("trust_remot │
│ 125 │ │ │ ) │
│ ❱ 126 │ │ │ base_model.resize_token_embeddings(len(tokenizer)) │
│ 127 │ │ │
│ 128 │ │ return cls._target_peft_class.from_pretrained( │
│ 129 │ │ │ base_model, │
│ │
│ /home/pai/lib/python3.11/site-packages/transformers/modeling_utils.py:1786 in │
│ resize_token_embeddings │
│ │
│ 1783 │ │ Return: │
│ 1784 │ │ │
torch.nn.Embedding
: Pointer to the input tokens Embeddings Module of the m ││ 1785 │ │ """ │
│ ❱ 1786 │ │ model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of) │
│ 1787 │ │ if new_num_tokens is None and pad_to_multiple_of is None: │
│ 1788 │ │ │ return model_embeds │
│ 1789 │
│ │
│ /home/pai/lib/python3.11/site-packages/transformers/modeling_utils.py:1807 in │
│ resize_token_embeddings │
│ │
│ 1804 │ │ │ add_hook_to_module(new_embeddings, hook) │
│ 1805 │ │ old_embeddings_requires_grad = old_embeddings.weight.requires_grad │
│ 1806 │ │ new_embeddings.requires_grad(old_embeddings_requires_grad) │
│ ❱ 1807 │ │ self.set_input_embeddings(new_embeddings) │
│ 1808 │ │ │
│ 1809 │ │ # Update new_num_tokens with the actual size of new_embeddings │
│ 1810 │ │ if pad_to_multiple_of is not None: │
│ │
│ /home/pai/lib/python3.11/site-packages/transformers/modeling_utils.py:1585 in │
│ set_input_embeddings │
│ │
│ 1582 │ │ """ │
│ 1583 │ │ base_model = getattr(self, self.base_model_prefix, self) │
│ 1584 │ │ if base_model is not self: │
│ ❱ 1585 │ │ │ base_model.set_input_embeddings(value) │
│ 1586 │ │ else: │
│ 1587 │ │ │ raise NotImplementedError │
│ 1588 │
│ │
│ /home/pai/lib/python3.11/site-packages/transformers/modeling_utils.py:1587 in │
│ set_input_embeddings │
│ │
│ 1584 │ │ if base_model is not self: │
│ 1585 │ │ │ base_model.set_input_embeddings(value) │
│ 1586 │ │ else: │
│ ❱ 1587 │ │ │ raise NotImplementedError │
│ 1588 │ │
│ 1589 │ def get_output_embeddings(self) -> nn.Module: │
│ 1590 │ │ """ │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯
NotImplementedError
Beta Was this translation helpful? Give feedback.
All reactions