Skip to content

Commit 14d55a7

Browse files
authored
Merge pull request #2292 from huggingface/inference_tests
Do full inference test against test vectors for test_* models
2 parents 44f1a34 + 1a2d8bb commit 14d55a7

File tree

2 files changed

+53
-8
lines changed

2 files changed

+53
-8
lines changed

.github/workflows/tests.yml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,11 +16,11 @@ jobs:
1616
strategy:
1717
matrix:
1818
os: [ubuntu-latest]
19-
python: ['3.10', '3.11']
20-
torch: [{base: '1.13.0', vision: '0.14.0'}, {base: '2.1.0', vision: '0.16.0'}]
19+
python: ['3.10', '3.12']
20+
torch: [{base: '1.13.0', vision: '0.14.0'}, {base: '2.4.1', vision: '0.19.1'}]
2121
testmarker: ['-k "not test_models"', '-m base', '-m cfg', '-m torchscript', '-m features', '-m fxforward', '-m fxbackward']
2222
exclude:
23-
- python: '3.11'
23+
- python: '3.12'
2424
torch: {base: '1.13.0', vision: '0.14.0'}
2525
runs-on: ${{ matrix.os }}
2626

@@ -46,7 +46,7 @@ jobs:
4646
sudo sed -i 's/azure\.//' /etc/apt/sources.list
4747
sudo apt update
4848
sudo apt install -y google-perftools
49-
pip install --no-cache-dir torch==${{ matrix.torch.base }}+cpu torchvision==${{ matrix.torch.vision }}+cpu -f https://download.pytorch.org/whl/torch_stable.html
49+
pip install --no-cache-dir torch==${{ matrix.torch.base }}+cpu torchvision==${{ matrix.torch.vision }}+cpu --index-url https://download.pytorch.org/whl/cpu
5050
- name: Install requirements
5151
run: |
5252
pip install -r requirements.txt

tests/test_models.py

Lines changed: 49 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
has_fx_feature_extraction = False
2727

2828
import timm
29-
from timm import list_models, create_model, set_scriptable, get_pretrained_cfg_value
29+
from timm import list_models, list_pretrained, create_model, set_scriptable, get_pretrained_cfg_value
3030
from timm.layers import Format, get_spatial_dim, get_channel_dim
3131
from timm.models import get_notrace_modules, get_notrace_functions
3232

@@ -39,7 +39,8 @@
3939
torch_device = os.environ.get('TORCH_DEVICE', 'cpu')
4040
timeout = os.environ.get('TIMEOUT')
4141
timeout120 = int(timeout) if timeout else 120
42-
timeout300 = int(timeout) if timeout else 300
42+
timeout240 = int(timeout) if timeout else 240
43+
timeout360 = int(timeout) if timeout else 360
4344

4445
if hasattr(torch._C, '_jit_set_profiling_executor'):
4546
# legacy executor is too slow to compile large models for unit tests
@@ -118,6 +119,50 @@ def _get_input_size(model=None, model_name='', target=None):
118119
return input_size
119120

120121

122+
@pytest.mark.base
123+
@pytest.mark.timeout(timeout240)
124+
@pytest.mark.parametrize('model_name', list_pretrained('test_*'))
125+
@pytest.mark.parametrize('batch_size', [1])
126+
def test_model_inference(model_name, batch_size):
127+
"""Run a single forward pass with each model"""
128+
from PIL import Image
129+
from huggingface_hub import snapshot_download
130+
import tempfile
131+
import safetensors
132+
133+
model = create_model(model_name, pretrained=True)
134+
model.eval()
135+
pp = timm.data.create_transform(**timm.data.resolve_data_config(model=model))
136+
137+
with tempfile.TemporaryDirectory() as temp_dir:
138+
snapshot_download(
139+
repo_id='timm/' + model_name, repo_type='model', local_dir=temp_dir, allow_patterns='test/*'
140+
)
141+
rand_tensors = safetensors.torch.load_file(os.path.join(temp_dir, 'test', 'rand_tensors.safetensors'))
142+
owl_tensors = safetensors.torch.load_file(os.path.join(temp_dir, 'test', 'owl_tensors.safetensors'))
143+
test_owl = Image.open(os.path.join(temp_dir, 'test', 'test_owl.jpg'))
144+
145+
with torch.no_grad():
146+
rand_output = model(rand_tensors['input'])
147+
rand_features = model.forward_features(rand_tensors['input'])
148+
rand_pre_logits = model.forward_head(rand_features, pre_logits=True)
149+
assert torch.allclose(rand_output, rand_tensors['output'], rtol=1e-3, atol=1e-4)
150+
assert torch.allclose(rand_features, rand_tensors['features'], rtol=1e-3, atol=1e-4)
151+
assert torch.allclose(rand_pre_logits, rand_tensors['pre_logits'], rtol=1e-3, atol=1e-4)
152+
153+
def _test_owl(owl_input):
154+
owl_output = model(owl_input)
155+
owl_features = model.forward_features(owl_input)
156+
owl_pre_logits = model.forward_head(owl_features.clone(), pre_logits=True)
157+
assert owl_output.softmax(1).argmax(1) == 24 # owl
158+
assert torch.allclose(owl_output, owl_tensors['output'], rtol=1e-3, atol=1e-4)
159+
assert torch.allclose(owl_features, owl_tensors['features'], rtol=1e-3, atol=1e-4)
160+
assert torch.allclose(owl_pre_logits, owl_tensors['pre_logits'], rtol=1e-3, atol=1e-4)
161+
162+
_test_owl(owl_tensors['input']) # test with original pp owl tensor
163+
_test_owl(pp(test_owl).unsqueeze(0)) # re-process from original jpg
164+
165+
121166
@pytest.mark.base
122167
@pytest.mark.timeout(timeout120)
123168
@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS))
@@ -182,7 +227,7 @@ def test_model_backward(model_name, batch_size):
182227
)
183228

184229
@pytest.mark.cfg
185-
@pytest.mark.timeout(timeout300)
230+
@pytest.mark.timeout(timeout360)
186231
@pytest.mark.parametrize('model_name', list_models(
187232
exclude_filters=EXCLUDE_FILTERS + NON_STD_FILTERS, include_tags=True))
188233
@pytest.mark.parametrize('batch_size', [1])
@@ -260,7 +305,7 @@ def test_model_default_cfgs(model_name, batch_size):
260305

261306

262307
@pytest.mark.cfg
263-
@pytest.mark.timeout(timeout300)
308+
@pytest.mark.timeout(timeout360)
264309
@pytest.mark.parametrize('model_name', list_models(filter=NON_STD_FILTERS, exclude_filters=NON_STD_EXCLUDE_FILTERS, include_tags=True))
265310
@pytest.mark.parametrize('batch_size', [1])
266311
def test_model_default_cfgs_non_std(model_name, batch_size):

0 commit comments

Comments
 (0)