|
| 1 | +# Copyright (c) 2025 Intel Corporation |
| 2 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 3 | +# you may not use this file except in compliance with the License. |
| 4 | +# You may obtain a copy of the License at |
| 5 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 6 | +# Unless required by applicable law or agreed to in writing, software |
| 7 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 8 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 9 | +# See the License for the specific language governing permissions and |
| 10 | +# limitations under the License. |
| 11 | + |
| 12 | +import warnings |
| 13 | + |
| 14 | +import numpy as np |
| 15 | +from optimum.intel.openvino import OVModelForCausalLM |
| 16 | +from torch.jit import TracerWarning |
| 17 | +from transformers import AutoTokenizer |
| 18 | +from transformers import logging |
| 19 | + |
| 20 | +import nncf |
| 21 | + |
| 22 | +logging.set_verbosity_error() |
| 23 | +warnings.filterwarnings("ignore", category=TracerWarning) |
| 24 | + |
| 25 | + |
| 26 | +MODEL_ID = "HuggingFaceTB/SmolLM2-360M-Instruct" |
| 27 | +COMPRESSED_MODEL_ID = "smollm2_360m_compressed_codebook" |
| 28 | + |
| 29 | + |
| 30 | +def generate_answers( |
| 31 | + questions: list[str], model: OVModelForCausalLM, tokenizer: AutoTokenizer, max_new_tokens: int = 50 |
| 32 | +) -> dict[str, str]: |
| 33 | + """ |
| 34 | + Generate answers for a list of questions using the provided model and tokenizer. |
| 35 | +
|
| 36 | + :param questions: List of questions to be answered. |
| 37 | + :param model: The model to use for generating answers. |
| 38 | + :param tokenizer: The tokenizer to use for processing the input and output. |
| 39 | + :param max_new_tokens: Maximum number of new tokens to generate for each answer. Defaults to 50. |
| 40 | + :return: A dictionary mapping each question to its corresponding answer. |
| 41 | + """ |
| 42 | + messages = [ |
| 43 | + {"role": "system", "content": "You are a chatbot who always responds as short as possible."}, |
| 44 | + {"role": "user", "content": "What is the capital of Spain?"}, |
| 45 | + {"role": "assistant", "content": "Madrid."}, |
| 46 | + ] |
| 47 | + answers_by_questions = {} |
| 48 | + |
| 49 | + for question in questions: |
| 50 | + messages.append({"role": "user", "content": question}) |
| 51 | + input_ids = tokenizer.apply_chat_template( |
| 52 | + messages, tokenize=True, add_generation_prompt=True, return_tensors="pt" |
| 53 | + ).to(device=model.device) |
| 54 | + input_len = len(input_ids[0]) |
| 55 | + |
| 56 | + output = model.generate(input_ids, max_new_tokens=max_new_tokens, do_sample=False)[0] |
| 57 | + answer = tokenizer.decode(output[input_len:], skip_special_tokens=True) |
| 58 | + answers_by_questions[question] = answer |
| 59 | + messages.append({"role": "assistant", "content": answer}) |
| 60 | + |
| 61 | + return answers_by_questions |
| 62 | + |
| 63 | + |
| 64 | +def print_answers(header: str, answers_by_questions: list[str]) -> None: |
| 65 | + """ |
| 66 | + Print the answers to the console. |
| 67 | +
|
| 68 | + :param header: Header to print before the answers. |
| 69 | + :param answers_by_questions: Dictionary mapping questions to their answers. |
| 70 | + """ |
| 71 | + print(header) |
| 72 | + for question, answer in answers_by_questions.items(): |
| 73 | + print(f"Q: {question}\nA: {answer}\n") |
| 74 | + |
| 75 | + |
| 76 | +QUESTIONS = [ |
| 77 | + "What is the capital of France?", |
| 78 | + "What is the highest peak in the Alps?", |
| 79 | + "What is the largest city in Canada?", |
| 80 | + "What is the most visited city in Japan?", |
| 81 | +] |
| 82 | + |
| 83 | + |
| 84 | +def load_model_and_tokenizer(model_id: str, export=True) -> tuple[OVModelForCausalLM, AutoTokenizer]: |
| 85 | + """ |
| 86 | + Load the model and tokenizer from the specified model ID. |
| 87 | +
|
| 88 | + :param model_id: The identifier of the model to load. |
| 89 | + :param export: Whether to export the model for OpenVINO. Defaults to True. |
| 90 | + :return: A tuple containing the loaded model and tokenizer. |
| 91 | + """ |
| 92 | + tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False) |
| 93 | + model = OVModelForCausalLM.from_pretrained( |
| 94 | + model_id, |
| 95 | + export=export, |
| 96 | + load_in_8bit=False, |
| 97 | + ) |
| 98 | + return model, tokenizer |
| 99 | + |
| 100 | + |
| 101 | +def default_codebook_example(model_id: str, compressed_model_id: str) -> list[str]: |
| 102 | + """ |
| 103 | + Example of using the default codebook compression. |
| 104 | +
|
| 105 | + :param model_id: The identifier of the model to load. |
| 106 | + :param compressed_model_id: The identifier for the compressed model to save. |
| 107 | + :return: A list of answers generated by the model after compression. |
| 108 | + """ |
| 109 | + model, tokenizer = load_model_and_tokenizer(model_id) |
| 110 | + answers_by_questions = generate_answers(QUESTIONS, model, tokenizer) |
| 111 | + print_answers("Non-optimized model outputs:\n", answers_by_questions) |
| 112 | + |
| 113 | + model.model = nncf.compress_weights(model.model, mode=nncf.CompressWeightsMode.CB4_F8E4M3, ratio=1.0, group_size=64) |
| 114 | + model.save_pretrained(compressed_model_id) |
| 115 | + tokenizer.save_pretrained(compressed_model_id) |
| 116 | + |
| 117 | + model, tokenizer = load_model_and_tokenizer(compressed_model_id, False) |
| 118 | + answers_by_questions = generate_answers(QUESTIONS, model, tokenizer) |
| 119 | + print_answers("Optimized model outputs:\n", answers_by_questions) |
| 120 | + |
| 121 | + return list(answers_by_questions.values()) |
| 122 | + |
| 123 | + |
| 124 | +def custom_codebook_example(model_id: str, compressed_model_id: str) -> list[str]: |
| 125 | + """ |
| 126 | + Example of using the custom codebook compression. |
| 127 | +
|
| 128 | + :param model_id: The identifier of the model to load. |
| 129 | + :param compressed_model_id: The identifier for the compressed model to save. |
| 130 | + :return: A list of answers generated by the model after compression. |
| 131 | + """ |
| 132 | + model, tokenizer = load_model_and_tokenizer(model_id) |
| 133 | + |
| 134 | + answers_by_questions = generate_answers(QUESTIONS, model, tokenizer) |
| 135 | + print_answers("Non-optimized model outputs:\n", answers_by_questions) |
| 136 | + |
| 137 | + codebook = np.array([-8, -4, -2, -1, 0, 1, 2, 4, 8], dtype=np.int8) |
| 138 | + |
| 139 | + model.model = nncf.compress_weights( |
| 140 | + model.model, |
| 141 | + mode=nncf.CompressWeightsMode.CODEBOOK, |
| 142 | + ratio=1.0, |
| 143 | + group_size=-1, |
| 144 | + advanced_parameters=nncf.AdvancedCompressionParameters(codebook=codebook), |
| 145 | + ) |
| 146 | + model.save_pretrained(compressed_model_id) |
| 147 | + tokenizer.save_pretrained(compressed_model_id) |
| 148 | + |
| 149 | + model, tokenizer = load_model_and_tokenizer(compressed_model_id, False) |
| 150 | + answers_by_questions = generate_answers(QUESTIONS, model, tokenizer) |
| 151 | + print_answers("Optimized model outputs:\n", answers_by_questions) |
| 152 | + |
| 153 | + return list(answers_by_questions.values()) |
| 154 | + |
| 155 | + |
| 156 | +def main(): |
| 157 | + res = default_codebook_example(MODEL_ID, COMPRESSED_MODEL_ID) |
| 158 | + res += custom_codebook_example(MODEL_ID, COMPRESSED_MODEL_ID + "_custom") |
| 159 | + return res |
| 160 | + |
| 161 | + |
| 162 | +if __name__ == "__main__": |
| 163 | + main() |
0 commit comments