|
26 | 26 | has_fx_feature_extraction = False
|
27 | 27 |
|
28 | 28 | import timm
|
29 |
| -from timm import list_models, create_model, set_scriptable, get_pretrained_cfg_value |
| 29 | +from timm import list_models, list_pretrained, create_model, set_scriptable, get_pretrained_cfg_value |
30 | 30 | from timm.layers import Format, get_spatial_dim, get_channel_dim
|
31 | 31 | from timm.models import get_notrace_modules, get_notrace_functions
|
32 | 32 |
|
|
39 | 39 | torch_device = os.environ.get('TORCH_DEVICE', 'cpu')
|
40 | 40 | timeout = os.environ.get('TIMEOUT')
|
41 | 41 | timeout120 = int(timeout) if timeout else 120
|
42 |
| -timeout300 = int(timeout) if timeout else 300 |
| 42 | +timeout240 = int(timeout) if timeout else 240 |
| 43 | +timeout360 = int(timeout) if timeout else 360 |
43 | 44 |
|
44 | 45 | if hasattr(torch._C, '_jit_set_profiling_executor'):
|
45 | 46 | # legacy executor is too slow to compile large models for unit tests
|
@@ -118,6 +119,50 @@ def _get_input_size(model=None, model_name='', target=None):
|
118 | 119 | return input_size
|
119 | 120 |
|
120 | 121 |
|
| 122 | +@pytest.mark.base |
| 123 | +@pytest.mark.timeout(timeout240) |
| 124 | +@pytest.mark.parametrize('model_name', list_pretrained('test_*')) |
| 125 | +@pytest.mark.parametrize('batch_size', [1]) |
| 126 | +def test_model_inference(model_name, batch_size): |
| 127 | + """Run a single forward pass with each model""" |
| 128 | + from PIL import Image |
| 129 | + from huggingface_hub import snapshot_download |
| 130 | + import tempfile |
| 131 | + import safetensors |
| 132 | + |
| 133 | + model = create_model(model_name, pretrained=True) |
| 134 | + model.eval() |
| 135 | + pp = timm.data.create_transform(**timm.data.resolve_data_config(model=model)) |
| 136 | + |
| 137 | + with tempfile.TemporaryDirectory() as temp_dir: |
| 138 | + snapshot_download( |
| 139 | + repo_id='timm/' + model_name, repo_type='model', local_dir=temp_dir, allow_patterns='test/*' |
| 140 | + ) |
| 141 | + rand_tensors = safetensors.torch.load_file(os.path.join(temp_dir, 'test', 'rand_tensors.safetensors')) |
| 142 | + owl_tensors = safetensors.torch.load_file(os.path.join(temp_dir, 'test', 'owl_tensors.safetensors')) |
| 143 | + test_owl = Image.open(os.path.join(temp_dir, 'test', 'test_owl.jpg')) |
| 144 | + |
| 145 | + with torch.no_grad(): |
| 146 | + rand_output = model(rand_tensors['input']) |
| 147 | + rand_features = model.forward_features(rand_tensors['input']) |
| 148 | + rand_pre_logits = model.forward_head(rand_features, pre_logits=True) |
| 149 | + assert torch.allclose(rand_output, rand_tensors['output'], rtol=1e-3, atol=1e-4) |
| 150 | + assert torch.allclose(rand_features, rand_tensors['features'], rtol=1e-3, atol=1e-4) |
| 151 | + assert torch.allclose(rand_pre_logits, rand_tensors['pre_logits'], rtol=1e-3, atol=1e-4) |
| 152 | + |
| 153 | + def _test_owl(owl_input): |
| 154 | + owl_output = model(owl_input) |
| 155 | + owl_features = model.forward_features(owl_input) |
| 156 | + owl_pre_logits = model.forward_head(owl_features.clone(), pre_logits=True) |
| 157 | + assert owl_output.softmax(1).argmax(1) == 24 # owl |
| 158 | + assert torch.allclose(owl_output, owl_tensors['output'], rtol=1e-3, atol=1e-4) |
| 159 | + assert torch.allclose(owl_features, owl_tensors['features'], rtol=1e-3, atol=1e-4) |
| 160 | + assert torch.allclose(owl_pre_logits, owl_tensors['pre_logits'], rtol=1e-3, atol=1e-4) |
| 161 | + |
| 162 | + _test_owl(owl_tensors['input']) # test with original pp owl tensor |
| 163 | + _test_owl(pp(test_owl).unsqueeze(0)) # re-process from original jpg |
| 164 | + |
| 165 | + |
121 | 166 | @pytest.mark.base
|
122 | 167 | @pytest.mark.timeout(timeout120)
|
123 | 168 | @pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS))
|
@@ -182,7 +227,7 @@ def test_model_backward(model_name, batch_size):
|
182 | 227 | )
|
183 | 228 |
|
184 | 229 | @pytest.mark.cfg
|
185 |
| -@pytest.mark.timeout(timeout300) |
| 230 | +@pytest.mark.timeout(timeout360) |
186 | 231 | @pytest.mark.parametrize('model_name', list_models(
|
187 | 232 | exclude_filters=EXCLUDE_FILTERS + NON_STD_FILTERS, include_tags=True))
|
188 | 233 | @pytest.mark.parametrize('batch_size', [1])
|
@@ -260,7 +305,7 @@ def test_model_default_cfgs(model_name, batch_size):
|
260 | 305 |
|
261 | 306 |
|
262 | 307 | @pytest.mark.cfg
|
263 |
| -@pytest.mark.timeout(timeout300) |
| 308 | +@pytest.mark.timeout(timeout360) |
264 | 309 | @pytest.mark.parametrize('model_name', list_models(filter=NON_STD_FILTERS, exclude_filters=NON_STD_EXCLUDE_FILTERS, include_tags=True))
|
265 | 310 | @pytest.mark.parametrize('batch_size', [1])
|
266 | 311 | def test_model_default_cfgs_non_std(model_name, batch_size):
|
|
0 commit comments