@@ -86,8 +86,8 @@ RUN wget -O /tmp/Miniforge.sh https://github.com/conda-forge/miniforge/releases/
8686 && source /Miniforge/etc/profile.d/conda.sh \
8787 && source /Miniforge/etc/profile.d/mamba.sh \
8888 && mamba update -y -q -n base -c defaults mamba \
89- && mamba create -y -q -n Code-Eval python=3.11 setuptools=69.5.1 \
90- && mamba activate Code-Eval \
89+ && mamba create -y -q -n BigCodeBench python=3.11 setuptools=69.5.1 \
90+ && mamba activate BigCodeBench \
9191 && mamba install -y -q -c conda-forge \
9292 charset-normalizer \
9393 gputil \
@@ -106,33 +106,35 @@ RUN wget -O /tmp/Miniforge.sh https://github.com/conda-forge/miniforge/releases/
106106# Install VLLM precompiled with appropriate CUDA and ensure PyTorch is installed form the same version channel
107107RUN source /Miniforge/etc/profile.d/conda.sh \
108108 && source /Miniforge/etc/profile.d/mamba.sh \
109- && mamba activate Code-Eval \
109+ && mamba activate BigCodeBench \
110110 && pip install https://github.com/vllm-project/vllm/releases/download/v0.4.0/vllm-0.4.0+cu118-cp311-cp311-manylinux1_x86_64.whl \
111111 --extra-index-url https://download.pytorch.org/whl/cu118
112112
113113# Install Flash Attention
114114RUN source /Miniforge/etc/profile.d/conda.sh \
115115 && source /Miniforge/etc/profile.d/mamba.sh \
116- && mamba activate Code-Eval \
116+ && mamba activate BigCodeBench \
117117 && export MAX_JOBS=$(($(nproc) - 2)) \
118118 && pip install --no-cache-dir ninja packaging psutil \
119119 && pip install flash-attn==2.5.8 --no-build-isolation
120120
121+ RUN rm -rf /bigcodebench
122+
121123# Acquire benchmark code to local
122- RUN git clone https://github.com/bigcode-project/code-eval .git /bigcodebench
124+ RUN git clone https://github.com/bigcode-project/bigcodebench .git /bigcodebench
123125
124- # Install Code-Eval and pre-load the dataset
126+ # Install BigCodeBench and pre-load the dataset
125127RUN source /Miniforge/etc/profile.d/conda.sh \
126128 && source /Miniforge/etc/profile.d/mamba.sh \
127- && mamba activate Code-Eval \
128- && pip install bigcodebench --upgrade \
129+ && mamba activate BigCodeBench \
130+ && cd /bigcodebench && pip install .[generate] \
129131 && python -c "from bigcodebench.data import get_bigcodebench; get_bigcodebench()"
130132
131133WORKDIR /bigcodebench
132134
133135# Declare an argument for the huggingface token
134136ARG HF_TOKEN
135- RUN if [[ -n "$HF_TOKEN" ]] ; then /Miniforge/envs/Code-Eval /bin/huggingface-cli login --token $HF_TOKEN ; \
137+ RUN if [[ -n "$HF_TOKEN" ]] ; then /Miniforge/envs/BigCodeBench /bin/huggingface-cli login --token $HF_TOKEN ; \
136138 else echo "No HuggingFace token specified. Access to gated or private models will be unavailable." ; fi
137139
138- ENTRYPOINT ["/Miniforge/envs/Code-Eval /bin/python" , "-m" , "bigcodebench.generate" ]
140+ ENTRYPOINT ["/Miniforge/envs/BigCodeBench /bin/python" , "-m" , "bigcodebench.generate" ]
0 commit comments