-
Notifications
You must be signed in to change notification settings - Fork 25
Add support for gemma3-text #70
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 6 commits
9744167
9824aec
9a2a513
4f43910
9de0ce9
1d70ef1
ab9669c
82c5a7d
14cec9f
7f1249f
801afdb
7c86839
94b7645
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,25 @@ | ||
| """Simple example: Export Gemma3 270M to ONNX and generate text. | ||
|
|
||
| Usage: | ||
| uv pip install onnxruntime | ||
| uv run examples/gemma3.py | ||
| """ | ||
|
|
||
| from transformers import AutoTokenizer | ||
|
|
||
| from optimum.onnxruntime import ORTModelForCausalLM | ||
|
|
||
|
|
||
| model_id = "google/gemma-3-270m-it" | ||
| tokenizer = AutoTokenizer.from_pretrained(model_id) | ||
| model = ORTModelForCausalLM.from_pretrained(model_id, export=True) | ||
|
|
||
| # Chat with instruction-tuned model | ||
| conversation = [{"role": "user", "content": "Hello! How are you?"}] | ||
| prompt = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True) | ||
| inputs = tokenizer(prompt, return_tensors="pt") | ||
|
|
||
| outputs = model.generate(**inputs, max_new_tokens=50, pad_token_id=tokenizer.eos_token_id) | ||
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) | ||
|
|
||
| print(response) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -517,6 +517,13 @@ class Gemma2OnnxConfig(TextDecoderOnnxConfig): | |
| MIN_TRANSFORMERS_VERSION = version.parse("4.53.0") | ||
|
|
||
|
|
||
| @register_tasks_manager_onnx("gemma3", *COMMON_TEXT_GENERATION_TASKS) | ||
| @register_tasks_manager_onnx("gemma3_text", *COMMON_TEXT_GENERATION_TASKS) | ||
| class Gemma3OnnxConfig(GemmaOnnxConfig): | ||
| """ONNX config for Gemma3 text-only models.""" | ||
| MIN_TRANSFORMERS_VERSION = version.parse("4.52.0") | ||
|
||
|
|
||
|
|
||
| @register_tasks_manager_onnx("gpt_oss", *COMMON_TEXT_GENERATION_TASKS) | ||
| class GPTOssOnnxConfig(GemmaOnnxConfig): | ||
| MIN_TRANSFORMERS_VERSION = version.parse("4.55.0") | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.