Skip to content

Commit 00c544f

Browse files
authored
Improve Hugging Face API utilization in tests (#473)
* Remove isolation of downloaded model * Use k8s-util runners with model cache and API key * Minor cleanup in 'test' action
1 parent e85d14a commit 00c544f

File tree

5 files changed

+27
-25
lines changed

5 files changed

+27
-25
lines changed

.github/actions/test/action.yml

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,7 @@ inputs:
99
required: true
1010
code_coverage:
1111
description: whether to collect code coverage metrics during test run
12-
type: boolean
13-
default: false
12+
default: 'false'
1413
outputs:
1514
status:
1615
description: "final status from test"
@@ -51,7 +50,7 @@ runs:
5150
5251
if [[ "${ENABLE_COVERAGE}" == "true" ]]; then
5352
echo "::group::Installing code coverage requirements via pip"
54-
pip install bashlex https://github.com/neuralmagic/pytest-nm-releng/archive/v0.4.0.tar.gz
53+
pip install https://github.com/neuralmagic/pytest-nm-releng/archive/v0.4.0.tar.gz
5554
pip install coverage pytest-cov
5655
5756
# Adding Code coverage to the tests
@@ -76,7 +75,7 @@ runs:
7675
fi
7776
echo "::endgroup::"
7877
fi
79-
78+
8079
deactivate
8180
exit ${SUCCESS}
8281
shell: bash

.github/workflows/test-check.yaml

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,10 @@ on:
1212

1313
jobs:
1414
python-tests:
15-
runs-on: ubuntu-24.04
15+
runs-on: k8s-util
16+
env:
17+
HF_HOME: /model-cache
18+
HF_TOKEN: ${{ secrets.NM_HF_TOKEN_READ_ONLY }}
1619
steps:
1720
- uses: actions/setup-python@v5
1821
with:
@@ -21,11 +24,13 @@ jobs:
2124
with:
2225
fetch-depth: 0
2326
fetch-tags: true
27+
- name: Install system dependencies
28+
run: |-
29+
sudo apt-get update
30+
sudo apt-get install -y --no-install-recommends g++ gcc make
2431
- name: Set Env
25-
run: |
26-
pip3 install --upgrade pip && pip3 install --upgrade setuptools
32+
run: pip3 install --upgrade pip setuptools
2733
- name: "⚙️ Install dependencies"
2834
run: pip3 install .[dev,accelerate]
2935
- name: "🔬 Running tests"
3036
run: make test
31-

.github/workflows/test.yml

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,9 @@ jobs:
7171
contents: 'read'
7272
id-token: 'write'
7373
pages: 'write'
74+
env:
75+
HF_HOME: /model-cache
76+
HF_TOKEN: ${{ secrets.NM_HF_TOKEN_READ_ONLY }}
7477
environment:
7578
name: github-pages
7679
url: ${{ steps.coverage.outputs.page_url }}
@@ -90,13 +93,9 @@ jobs:
9093
python-version: ${{ inputs.python }}
9194

9295
- name: install system dependencies
93-
run: |
94-
if command -v g++ >/dev/null 2>&1; then
95-
echo "found g++ compiler"
96-
else
97-
echo "installing g++ etc compilers..."
98-
sudo apt update && sudo apt install -y g++ gcc
99-
fi
96+
run: |-
97+
sudo apt-get update
98+
sudo apt-get install -y --no-install-recommends g++ gcc make
10099
shell: bash
101100

102101
- name: checkout code

.github/workflows/trigger-all.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ on:
1919
type: boolean
2020
default: false
2121
gitref:
22-
description: "git commit hash or tag name"
22+
description: "git commit hash or tag name"
2323
type: string
2424
default: 'main'
2525

@@ -32,8 +32,8 @@ jobs:
3232
wf_category: ${{ inputs.wf_category || 'NIGHTLY' }}
3333
gitref: ${{ inputs.gitref || 'main' }}
3434
push_to_pypi: ${{ (github.event.schedule == '30 0 * * *') || inputs.push_to_pypi || false }}
35-
test_configs: '[{"python":"3.11.4","label":"ubuntu-24.04","timeout":"40","code_coverage":true},
36-
{"python":"3.10.12","label":"ubuntu-22.04","timeout":"40"},
35+
test_configs: '[{"python":"3.11.4","label":"k8s-util","timeout":"40","code_coverage":true},
36+
{"python":"3.10.12","label":"k8s-util","timeout":"40"},
3737
{"python":"3.9.17","label":"k8s-h100-solo","timeout":"40"},
3838
{"python":"3.12.6","label":"k8s-a100-duo","timeout":"40"}]'
3939

tests/test_utils/test_helpers.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -36,12 +36,11 @@ def tensors():
3636

3737

3838
@pytest.fixture
39-
def llama_model(tmp_path):
40-
model_name = "neuralmagic/llama2.c-stories110M-pruned50"
41-
model = AutoModelForCausalLM.from_pretrained(
42-
model_name, torch_dtype="auto", cache_dir=tmp_path
39+
def llama_model():
40+
return AutoModelForCausalLM.from_pretrained(
41+
"RedHatAI/llama2.c-stories110M-pruned50",
42+
torch_dtype="auto",
4343
)
44-
yield model
4544

4645

4746
def test_save_compressed_sparse_bitmask(tmp_path, tensors):
@@ -120,9 +119,9 @@ def test_load_compressed_dense(tmp_path, tensors):
120119

121120

122121
def test_load_compressed_sharded(tmp_path, llama_model):
123-
sharded_model_path = tmp_path / "shared_model"
122+
sharded_model_path = tmp_path / "sharded_model"
124123
llama_model.save_pretrained(sharded_model_path, max_shard_size="2MB")
125-
# make sure that model is shared on disk
124+
# make sure that model is sharded on disk
126125
assert len(os.listdir(sharded_model_path)) > 1
127126
loaded_state_dict = dict(load_compressed(sharded_model_path))
128127
for key, value in llama_model.state_dict().items():

0 commit comments

Comments
 (0)