diff --git a/Dockerfile b/Dockerfile index a9c4c0f..0ad820e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM kernai/refinery-parent-images:v2.0.0-torch-cpu +FROM kernai/refinery-parent-images:v2.1.0-torch-cpu WORKDIR /program diff --git a/dev.Dockerfile b/dev.Dockerfile index 040d52b..e2aabe1 100644 --- a/dev.Dockerfile +++ b/dev.Dockerfile @@ -1,4 +1,4 @@ -FROM kernai/refinery-parent-images:v2.0.0-torch-cpu +FROM kernai/refinery-parent-images:v2.1.0-torch-cpu WORKDIR /app diff --git a/gpu-requirements.txt b/gpu-requirements.txt index a0755d5..5047753 100644 --- a/gpu-requirements.txt +++ b/gpu-requirements.txt @@ -84,6 +84,8 @@ fsspec==2025.7.0 # -r requirements/torch-cuda-requirements.txt # huggingface-hub # torch +greenlet==3.2.4 + # via sqlalchemy h11==0.16.0 # via # -r requirements/torch-cuda-requirements.txt @@ -130,9 +132,9 @@ langcodes==3.5.0 # via spacy language-data==1.3.0 # via langcodes -marisa-trie==1.2.1 +marisa-trie==1.3.0 # via language-data -markdown-it-py==3.0.0 +markdown-it-py==4.0.0 # via rich markupsafe==3.0.2 # via @@ -167,6 +169,43 @@ numpy==1.23.4 # thinc # torchvision # transformers +nvidia-cublas-cu12==12.8.4.1 + # via + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.8.90 + # via torch +nvidia-cuda-nvrtc-cu12==12.8.93 + # via torch +nvidia-cuda-runtime-cu12==12.8.90 + # via torch +nvidia-cudnn-cu12==9.10.2.21 + # via torch +nvidia-cufft-cu12==11.3.3.83 + # via torch +nvidia-cufile-cu12==1.13.1.3 + # via torch +nvidia-curand-cu12==10.3.9.90 + # via torch +nvidia-cusolver-cu12==11.7.3.90 + # via torch +nvidia-cusparse-cu12==12.5.8.93 + # via + # nvidia-cusolver-cu12 + # torch +nvidia-cusparselt-cu12==0.7.1 + # via torch +nvidia-nccl-cu12==2.27.3 + # via torch +nvidia-nvjitlink-cu12==12.8.93 + # via + # nvidia-cufft-cu12 + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 + # torch +nvidia-nvtx-cu12==12.8.90 + # via torch openai==1.97.1 # via -r requirements/gpu-requirements.in packaging==25.0 @@ -242,7 +281,7 @@ requests==2.32.4 # spacy # transformers # weasel -rich==14.0.0 +rich==14.1.0 # via typer s3transfer==0.13.1 # via @@ -312,12 +351,12 @@ tokenizers==0.21.2 # via # -r requirements/torch-cuda-requirements.txt # transformers -torch==2.7.1 +torch==2.8.0 # via # -r requirements/torch-cuda-requirements.txt # sentence-transformers # torchvision -torchvision==0.22.1 +torchvision==0.23.0 # via -r requirements/gpu-requirements.in tqdm==4.67.1 # via @@ -331,7 +370,9 @@ transformers==4.53.2 # via # -r requirements/torch-cuda-requirements.txt # sentence-transformers -typer==0.16.0 +triton==3.4.0 + # via torch +typer==0.16.1 # via # spacy # weasel @@ -364,7 +405,7 @@ wasabi==1.1.3 # weasel weasel==0.4.1 # via spacy -wrapt==1.17.2 +wrapt==1.17.3 # via smart-open # The following packages are considered to be unsafe in a requirements file: diff --git a/gpu.Dockerfile b/gpu.Dockerfile index c7a310e..9ba1376 100644 --- a/gpu.Dockerfile +++ b/gpu.Dockerfile @@ -1,4 +1,4 @@ -FROM kernai/refinery-parent-images:v2.0.0-torch-cuda +FROM kernai/refinery-parent-images:v2.1.0-torch-cuda WORKDIR /program diff --git a/requirements.txt b/requirements.txt index 9bc9341..86c0c92 100644 --- a/requirements.txt +++ b/requirements.txt @@ -84,6 +84,8 @@ fsspec==2025.7.0 # -r requirements/torch-cpu-requirements.txt # huggingface-hub # torch +greenlet==3.2.4 + # via sqlalchemy h11==0.16.0 # via # -r requirements/torch-cpu-requirements.txt @@ -130,9 +132,9 @@ langcodes==3.5.0 # via spacy language-data==1.3.0 # via langcodes -marisa-trie==1.2.1 +marisa-trie==1.3.0 # via language-data -markdown-it-py==3.0.0 +markdown-it-py==4.0.0 # via rich markupsafe==3.0.2 # via @@ -312,12 +314,12 @@ tokenizers==0.21.2 # via # -r requirements/torch-cpu-requirements.txt # transformers -torch==2.7.1 +torch==2.8.0+cpu # via # -r requirements/torch-cpu-requirements.txt # sentence-transformers # torchvision -torchvision==0.22.1 +torchvision==0.23.0+cpu # via -r requirements/requirements.in tqdm==4.67.1 # via @@ -331,7 +333,7 @@ transformers==4.53.2 # via # -r requirements/torch-cpu-requirements.txt # sentence-transformers -typer==0.16.0 +typer==0.16.1 # via # spacy # weasel @@ -364,7 +366,7 @@ wasabi==1.1.3 # weasel weasel==0.4.1 # via spacy -wrapt==1.17.2 +wrapt==1.17.3 # via smart-open # The following packages are considered to be unsafe in a requirements file: diff --git a/requirements/gpu-requirements.in b/requirements/gpu-requirements.in index 50ca41a..b1899e7 100644 --- a/requirements/gpu-requirements.in +++ b/requirements/gpu-requirements.in @@ -1,5 +1,5 @@ -r torch-cuda-requirements.txt spacy==3.7.5 -torchvision==0.22.1 # define version for torchvision to avoid dependency conflict +torchvision==0.23.0 # define version for torchvision to avoid dependency conflict sentence-transformers==5.0.0 # last version with default_prompt_name & pooling_mode_weightedmean_tokens # higher only possible with embedders/gates change openai==1.97.1 \ No newline at end of file diff --git a/requirements/requirements.in b/requirements/requirements.in index 6f337ed..9358991 100644 --- a/requirements/requirements.in +++ b/requirements/requirements.in @@ -1,5 +1,5 @@ -r torch-cpu-requirements.txt spacy==3.7.5 -torchvision==0.22.1 # define version for torchvision to avoid dependency conflict +torchvision==0.23.0 sentence-transformers==5.0.0 # last version with default_prompt_name & pooling_mode_weightedmean_tokens # higher only possible with embedders/gates change openai==1.97.1 \ No newline at end of file diff --git a/requirements/torch-cpu-requirements.txt b/requirements/torch-cpu-requirements.txt index 6c7b67e..761d732 100644 --- a/requirements/torch-cpu-requirements.txt +++ b/requirements/torch-cpu-requirements.txt @@ -145,7 +145,7 @@ threadpoolctl==3.6.0 # via scikit-learn tokenizers==0.21.2 # via transformers -torch==2.7.1 +torch==2.8.0 # via -r torch-cpu-requirements.in tqdm==4.67.1 # via diff --git a/requirements/torch-cuda-requirements.txt b/requirements/torch-cuda-requirements.txt index 09b95bb..ba40bcc 100644 --- a/requirements/torch-cuda-requirements.txt +++ b/requirements/torch-cuda-requirements.txt @@ -145,7 +145,7 @@ threadpoolctl==3.6.0 # via scikit-learn tokenizers==0.21.2 # via transformers -torch==2.7.1 +torch==2.8.0 # via -r torch-cuda-requirements.in tqdm==4.67.1 # via