diff --git a/Dockerfile b/Dockerfile index 2c3c8bf8..f980f62d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -165,7 +165,7 @@ RUN cd server && \ pip install ".[accelerate]" --no-cache-dir # temp: install newer transformers lib that optimum clashes with -RUN pip install transformers==4.40.0 tokenizers==0.19.1 --no-cache-dir +RUN pip install transformers==4.48.0 tokenizers==0.21.1 --no-cache-dir # Patch codegen model changes into transformers RUN cp server/transformers_patch/modeling_codegen.py ${SITE_PACKAGES}/transformers/models/codegen/modeling_codegen.py @@ -292,7 +292,7 @@ COPY server server RUN cd server && make gen-server && pip install ".[accelerate, ibm-fms, onnx-gpu, quantize]" --no-cache-dir --extra-index-url=https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/ # temp: install newer transformers lib that optimum clashes with -RUN pip install transformers==4.40.0 tokenizers==0.19.1 --no-cache-dir +RUN pip install transformers==4.48.0 tokenizers==0.21.1 --no-cache-dir # Patch codegen model changes into transformers 4.35 RUN cp server/transformers_patch/modeling_codegen.py ${SITE_PACKAGES}/transformers/models/codegen/modeling_codegen.py