Skip to content

Commit 2836dd7

Browse files
authored
[Model][CI] Let more pooling models support v1 (#21747)
Signed-off-by: wang.yuqi <[email protected]>
1 parent d2aab33 commit 2836dd7

File tree

8 files changed

+14
-48
lines changed

8 files changed

+14
-48
lines changed

tests/models/language/pooling/test_classification.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,14 +6,6 @@
66

77
from vllm.platforms import current_platform
88

9-
# TODO: enable when float32 is supported by V1
10-
# @pytest.fixture(autouse=True)
11-
# def v1(run_with_both_engines):
12-
# # Simple autouse wrapper to run both engines for each test
13-
# # This can be promoted up to conftest.py to run for every
14-
# # test in a package
15-
# pass
16-
179

1810
@pytest.mark.parametrize(
1911
"model",

tests/models/language/pooling/test_gte.py

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -56,17 +56,10 @@
5656
enable_test=False),
5757
]
5858

59-
V1FlashAttentionImpNotSupported = [
60-
"Alibaba-NLP/gte-Qwen2-1.5B-instruct", "Alibaba-NLP/gte-modernbert-base"
61-
]
62-
6359

6460
@pytest.mark.parametrize("model_info", MODELS)
65-
def test_embed_models_mteb(hf_runner, vllm_runner, model_info: EmbedModelInfo,
66-
monkeypatch) -> None:
67-
if model_info.name in V1FlashAttentionImpNotSupported:
68-
monkeypatch.setenv("VLLM_USE_V1", "0")
69-
61+
def test_embed_models_mteb(hf_runner, vllm_runner,
62+
model_info: EmbedModelInfo) -> None:
7063
vllm_extra_kwargs: dict[str, Any] = {}
7164
if model_info.architecture == "GteNewModel":
7265
vllm_extra_kwargs["hf_overrides"] = {"architectures": ["GteNewModel"]}
@@ -77,11 +70,8 @@ def test_embed_models_mteb(hf_runner, vllm_runner, model_info: EmbedModelInfo,
7770

7871
@pytest.mark.parametrize("model_info", MODELS)
7972
def test_embed_models_correctness(hf_runner, vllm_runner,
80-
model_info: EmbedModelInfo, example_prompts,
81-
monkeypatch) -> None:
82-
if model_info.name in V1FlashAttentionImpNotSupported:
83-
monkeypatch.setenv("VLLM_USE_V1", "0")
84-
73+
model_info: EmbedModelInfo,
74+
example_prompts) -> None:
8575
vllm_extra_kwargs: dict[str, Any] = {}
8676
if model_info.architecture == "GteNewModel":
8777
vllm_extra_kwargs["hf_overrides"] = {"architectures": ["GteNewModel"]}

tests/models/language/pooling/test_jina.py

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44

55
import pytest
66

7-
import vllm.envs as envs
87
from vllm import PoolingParams
98

109
from ...utils import EmbedModelInfo, RerankModelInfo
@@ -24,14 +23,6 @@
2423
]
2524

2625

27-
@pytest.fixture(autouse=True)
28-
def v1(run_with_both_engines):
29-
# Simple autouse wrapper to run both engines for each test
30-
# This can be promoted up to conftest.py to run for every
31-
# test in a package
32-
pass
33-
34-
3526
@pytest.mark.parametrize("model_info", EMBEDDING_MODELS)
3627
def test_embed_models_mteb(hf_runner, vllm_runner,
3728
model_info: EmbedModelInfo) -> None:
@@ -63,10 +54,6 @@ def hf_model_callback(model):
6354
@pytest.mark.parametrize("model_info", RERANK_MODELS)
6455
def test_rerank_models_mteb(hf_runner, vllm_runner,
6556
model_info: RerankModelInfo) -> None:
66-
if (model_info.architecture == "XLMRobertaForSequenceClassification"
67-
and envs.VLLM_USE_V1):
68-
pytest.skip("Not supported yet")
69-
7057
mteb_test_rerank_models(hf_runner, vllm_runner, model_info)
7158

7259

tests/models/language/pooling/test_qwen3_reranker.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -83,9 +83,6 @@ def test_rerank_models_mteb(vllm_runner, model_info: RerankModelInfo) -> None:
8383
}
8484
}
8585

86-
if model_info.name == "Qwen/Qwen3-Reranker-4B":
87-
vllm_extra_kwargs["max_num_seqs"] = 1
88-
8986
mteb_test_rerank_models(Qwen3RerankerHfRunner, vllm_runner, model_info,
9087
vllm_extra_kwargs)
9188

@@ -106,9 +103,6 @@ def test_rerank_models_mteb_tp(vllm_runner,
106103
"tensor_parallel_size": 2,
107104
}
108105

109-
if model_info.name == "Qwen/Qwen3-Reranker-4B":
110-
vllm_extra_kwargs["max_num_seqs"] = 1
111-
112106
mteb_test_rerank_models(Qwen3RerankerHfRunner,
113107
vllm_runner,
114108
model_info,

vllm/config.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -776,6 +776,9 @@ def _task_to_convert(task: TaskOption) -> ConvertType:
776776
raise ValueError(
777777
"`override_neuron_config` is only supported on Neuron.")
778778

779+
# Avoid running try_verify_and_update_config multiple times
780+
self.config_updated = False
781+
779782
self._verify_quantization()
780783
self._verify_cuda_graph()
781784
self._verify_bnb_config()
@@ -4914,6 +4917,11 @@ def try_verify_and_update_config(self):
49144917
if self.model_config is None:
49154918
return
49164919

4920+
# Avoid running try_verify_and_update_config multiple times
4921+
if getattr(self.model_config, "config_updated", False):
4922+
return
4923+
self.model_config.config_updated = True
4924+
49174925
architecture = self.model_config.architecture
49184926
if architecture is None:
49194927
return

vllm/model_executor/models/bert_with_rope.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88
from transformers import PretrainedConfig
99

1010
from vllm.attention import Attention, AttentionType
11-
from vllm.compilation.decorators import support_torch_compile
1211
from vllm.config import CacheConfig, VllmConfig
1312
from vllm.distributed import (divide, get_tensor_model_parallel_rank,
1413
get_tensor_model_parallel_world_size,
@@ -26,7 +25,6 @@
2625
from vllm.model_executor.layers.vocab_parallel_embedding import (
2726
VocabParallelEmbedding)
2827
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
29-
from vllm.model_executor.models import SupportsV0Only
3028
from vllm.model_executor.models.interfaces import SupportsQuant
3129
from vllm.model_executor.models.utils import WeightsMapper
3230
from vllm.model_executor.utils import set_weight_attrs
@@ -360,7 +358,6 @@ def forward(self, positions: torch.Tensor, hidden_states: torch.Tensor):
360358
return hidden_states
361359

362360

363-
@support_torch_compile
364361
class BertWithRopeEncoder(nn.Module):
365362

366363
def __init__(self,
@@ -394,7 +391,7 @@ def forward(
394391
return hidden_states
395392

396393

397-
class BertWithRope(nn.Module, SupportsV0Only, SupportsQuant):
394+
class BertWithRope(nn.Module, SupportsQuant):
398395
hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={"model.": ""})
399396

400397
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):

vllm/model_executor/models/config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def verify_and_update_config(vllm_config: "VllmConfig") -> None:
9393
config.num_hidden_layers = config.n_layer
9494

9595
head_dim = config.hidden_size // config.num_attention_heads
96-
rotary_emb_dim = head_dim * config.rotary_emb_fraction
96+
rotary_emb_dim = int(head_dim * config.rotary_emb_fraction)
9797
max_trained_positions = getattr(config, "max_trained_positions", 2048)
9898
config.rotary_kwargs = {
9999
"head_size": head_dim,

vllm/model_executor/models/modernbert.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88
from transformers import ModernBertConfig
99

1010
from vllm.attention import Attention, AttentionType
11-
from vllm.compilation.decorators import support_torch_compile
1211
from vllm.config import VllmConfig
1312
from vllm.distributed import get_tensor_model_parallel_world_size
1413
from vllm.model_executor.layers.linear import (QKVParallelLinear,
@@ -200,7 +199,6 @@ def forward(
200199
return hidden_states
201200

202201

203-
@support_torch_compile
204202
class ModernBertModel(nn.Module):
205203
hf_to_vllm_mapper = WeightsMapper(
206204
orig_to_new_prefix={"layers.": "encoder_layer.layers."})

0 commit comments

Comments
 (0)