48
48
# yapf: enable
49
49
from vllm .transformers_utils .processor import cached_get_processor
50
50
from vllm .transformers_utils .tokenizer import AnyTokenizer , MistralTokenizer
51
- from vllm .utils import deprecate_kwargs , random_uuid
51
+ from vllm .utils import random_uuid
52
52
53
53
logger = init_logger (__name__ )
54
54
@@ -383,17 +383,12 @@ def resolve_mistral_chat_template(
383
383
return None
384
384
385
385
386
- @deprecate_kwargs (
387
- "trust_remote_code" ,
388
- additional_message = "Please use `model_config.trust_remote_code` instead." ,
389
- )
390
386
def resolve_hf_chat_template (
391
387
tokenizer : Union [PreTrainedTokenizer , PreTrainedTokenizerFast ],
392
388
chat_template : Optional [str ],
393
389
tools : Optional [list [dict [str , Any ]]],
394
390
* ,
395
391
model_config : ModelConfig ,
396
- trust_remote_code : Optional [bool ] = None ,
397
392
) -> Optional [str ]:
398
393
# 1st priority: The given chat template
399
394
if chat_template is not None :
@@ -488,18 +483,13 @@ def _log_chat_template_content_format(
488
483
)
489
484
490
485
491
- @deprecate_kwargs (
492
- "trust_remote_code" ,
493
- additional_message = "Please use `model_config.trust_remote_code` instead." ,
494
- )
495
486
def resolve_chat_template_content_format (
496
487
chat_template : Optional [str ],
497
488
tools : Optional [list [dict [str , Any ]]],
498
489
given_format : ChatTemplateContentFormatOption ,
499
490
tokenizer : AnyTokenizer ,
500
491
* ,
501
492
model_config : ModelConfig ,
502
- trust_remote_code : Optional [bool ] = None ,
503
493
) -> _ChatTemplateContentFormat :
504
494
if given_format != "auto" :
505
495
return given_format
@@ -568,17 +558,9 @@ def add(self, modality: ModalityStr, item: _T) -> Optional[str]:
568
558
569
559
input_modality = modality .replace ("_embeds" , "" )
570
560
571
- if mm_registry .has_processor (model_config ):
572
- mm_processor = mm_registry .create_processor (model_config )
573
- allowed_counts = mm_processor .info .get_allowed_mm_limits ()
574
- allowed_count = allowed_counts .get (input_modality , 0 )
575
- else :
576
- mm_config = model_config .multimodal_config
577
- if mm_config is None :
578
- msg = "This model does not support multi-modal inputs"
579
- raise ValueError (msg )
580
-
581
- allowed_count = mm_config .get_limit_per_prompt (input_modality )
561
+ mm_processor = mm_registry .create_processor (model_config )
562
+ allowed_counts = mm_processor .info .get_allowed_mm_limits ()
563
+ allowed_count = allowed_counts .get (input_modality , 0 )
582
564
583
565
current_count = len (self ._items_by_modality [modality ]) + 1
584
566
if current_count > allowed_count :
@@ -1285,10 +1267,6 @@ def parse_chat_messages_futures(
1285
1267
return conversation , mm_tracker .all_mm_data ()
1286
1268
1287
1269
1288
- @deprecate_kwargs (
1289
- "trust_remote_code" ,
1290
- additional_message = "Please use `model_config.trust_remote_code` instead." ,
1291
- )
1292
1270
def apply_hf_chat_template (
1293
1271
tokenizer : Union [PreTrainedTokenizer , PreTrainedTokenizerFast ],
1294
1272
conversation : list [ConversationMessage ],
@@ -1297,8 +1275,6 @@ def apply_hf_chat_template(
1297
1275
* ,
1298
1276
model_config : ModelConfig ,
1299
1277
tokenize : bool = False , # Different from HF's default
1300
- # Deprecated, explicitly capture here so it doesn't slit into kwargs.
1301
- trust_remote_code : Optional [bool ] = None ,
1302
1278
** kwargs : Any ,
1303
1279
) -> str :
1304
1280
hf_chat_template = resolve_hf_chat_template (
0 commit comments