File tree Expand file tree Collapse file tree 3 files changed +5
-5
lines changed
vllm/model_executor/models Expand file tree Collapse file tree 3 files changed +5
-5
lines changed Original file line number Diff line number Diff line change 10
10
tokenizers>=0.15.0
11
11
huggingface_hub<0.18,>=0.16.4
12
12
einops # Required for phi-1_5
13
- transformers >= 4.34 .0 # Required for Mistral .
13
+ transformers >= 4.36 .0 # Required for Mixtral .
14
14
fastapi
15
15
uvicorn[standard]
16
16
pydantic == 1.10.13 # Required for OpenAI server.
Original file line number Diff line number Diff line change @@ -7,7 +7,7 @@ sentencepiece # Required for LLaMA tokenizer.
7
7
numpy
8
8
einops # Required for phi-1_5
9
9
torch >= 2.1.1
10
- transformers >= 4.34 .0 # Required for Mistral .
10
+ transformers >= 4.36 .0 # Required for Mixtral .
11
11
xformers >= 0.0.23 # Required for CUDA 12.1.
12
12
fastapi
13
13
uvicorn [standard ]
Original file line number Diff line number Diff line change 29
29
import torch .nn .functional as F
30
30
31
31
from torch import nn
32
- from transformers import MistralConfig
32
+ from transformers import MixtralConfig
33
33
34
34
try :
35
35
import megablocks .ops as ops
@@ -395,7 +395,7 @@ class MixtralDecoderLayer(nn.Module):
395
395
396
396
def __init__ (
397
397
self ,
398
- config : MistralConfig ,
398
+ config : MixtralConfig ,
399
399
) -> None :
400
400
super ().__init__ ()
401
401
self .hidden_size = config .hidden_size
@@ -443,7 +443,7 @@ class MixtralForCausalLM(nn.Module):
443
443
444
444
def __init__ (
445
445
self ,
446
- config : MistralConfig ,
446
+ config : MixtralConfig ,
447
447
linear_method : Optional [LinearMethodBase ] = None ,
448
448
) -> None :
449
449
super ().__init__ ()
You can’t perform that action at this time.
0 commit comments