@@ -64,17 +64,17 @@ result = model.evaluate(dataset)
6464print (f " Perplexity of the evalution: { result} " )
6565
6666```
67- 3 . __ ` INT4 ` Precision__ - You can now use and fine-tune any LLM with ` INT4 Precision ` using ` GenericKbitModel ` .
67+ 3 . __ ` INT4 ` Precision__ - You can now use and fine-tune any LLM with ` INT4 Precision ` using ` GenericLoraKbitModel ` .
6868``` python
6969# Make the necessary imports
7070from xturing.datasets import InstructionDataset
71- from xturing.models import GenericKbitModel
71+ from xturing.models import GenericLoraKbitModel
7272
7373# Load the desired dataset
7474dataset = InstructionDataset(' ../llama/alpaca_data' )
7575
7676# Load the desired model for INT4 bit fine-tuning
77- model = GenericKbitModel (' tiiuae/falcon-7b' )
77+ model = GenericLoraKbitModel (' tiiuae/falcon-7b' )
7878
7979# Run the fine-tuning
8080model.finetune(dataset)
@@ -84,13 +84,13 @@ model.finetune(dataset)
8484``` python
8585# Make the necessary imports
8686from xturing.datasets import InstructionDataset
87- from xturing.models import GenericKbitModel
87+ from xturing.models import GenericLoraKbitModel
8888
8989# Load the desired dataset
9090dataset = InstructionDataset(' ../llama/alpaca_data' )
9191
9292# Load the desired model for INT4 bit fine-tuning
93- model = GenericKbitModel (' tiiuae/falcon-7b' )
93+ model = GenericLoraKbitModel (' tiiuae/falcon-7b' )
9494
9595# Generate outputs on desired prompts
9696outputs = model.generate(dataset = dataset, batch_size = 10 )
0 commit comments