You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
File "/mllm/yangjirui03/workspace/ThirdPartyMLLM/MiniCPM_V/infer_codes/miniCpmV_infer_vllm.py", line 37, in _build_vllm_and_tokenzier
[rank0]: llm = LLM(model=MINICPMV_PATH,
[rank0]: File "/mllm/yangjirui03/workspace/ThirdPartyMLLM/MiniCPM_V/offical_vllm_for_miniCPM/vllm/vllm/entrypoints/llm.py", line 155, in __init__
[rank0]: self.llm_engine = LLMEngine.from_engine_args(
[rank0]: File "/mllm/yangjirui03/workspace/ThirdPartyMLLM/MiniCPM_V/offical_vllm_for_miniCPM/vllm/vllm/engine/llm_engine.py", line 447, in from_engine_args
[rank0]: engine = cls(
[rank0]: File "/mllm/yangjirui03/workspace/ThirdPartyMLLM/MiniCPM_V/offical_vllm_for_miniCPM/vllm/vllm/engine/llm_engine.py", line 265, in __init__
[rank0]: self._initialize_kv_caches()
[rank0]: File "/mllm/yangjirui03/workspace/ThirdPartyMLLM/MiniCPM_V/offical_vllm_for_miniCPM/vllm/vllm/engine/llm_engine.py", line 364, in _initialize_kv_caches
[rank0]: self.model_executor.determine_num_available_blocks())
[rank0]: File "/mllm/yangjirui03/workspace/ThirdPartyMLLM/MiniCPM_V/offical_vllm_for_miniCPM/vllm/vllm/executor/gpu_executor.py", line 94, in determine_num_available_blocks
[rank0]: return self.driver_worker.determine_num_available_blocks()
[rank0]: File "/mllm/yangjirui03/envs/miniCpmV_vllm2/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
[rank0]: return func(*args, **kwargs)
[rank0]: File "/mllm/yangjirui03/workspace/ThirdPartyMLLM/MiniCPM_V/offical_vllm_for_miniCPM/vllm/vllm/worker/worker.py", line 179, in determine_num_available_blocks
[rank0]: self.model_runner.profile_run()
[rank0]: File "/mllm/yangjirui03/envs/miniCpmV_vllm2/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
[rank0]: return func(*args, **kwargs)
[rank0]: File "/mllm/yangjirui03/workspace/ThirdPartyMLLM/MiniCPM_V/offical_vllm_for_miniCPM/vllm/vllm/worker/model_runner.py", line 927, in profile_run
[rank0]: model_input = self.prepare_model_input(
[rank0]: File "/mllm/yangjirui03/workspace/ThirdPartyMLLM/MiniCPM_V/offical_vllm_for_miniCPM/vllm/vllm/worker/model_runner.py", line 1265, in prepare_model_input
[rank0]: model_input = self._prepare_model_input_tensors(
[rank0]: File "/mllm/yangjirui03/workspace/ThirdPartyMLLM/MiniCPM_V/offical_vllm_for_miniCPM/vllm/vllm/worker/model_runner.py", line 839, in _prepare_model_input_tensors
[rank0]: builder.add_seq_group(seq_group_metadata)
[rank0]: File "/mllm/yangjirui03/workspace/ThirdPartyMLLM/MiniCPM_V/offical_vllm_for_miniCPM/vllm/vllm/worker/model_runner.py", line 493, in add_seq_group
[rank0]: per_seq_group_fn(inter_data, seq_group_metadata)
[rank0]: File "/mllm/yangjirui03/workspace/ThirdPartyMLLM/MiniCPM_V/offical_vllm_for_miniCPM/vllm/vllm/worker/model_runner.py", line 468, in _compute_multi_modal_input
[rank0]: mm_kwargs = self.multi_modal_input_mapper(mm_data)
[rank0]: File "/mllm/yangjirui03/workspace/ThirdPartyMLLM/MiniCPM_V/offical_vllm_for_miniCPM/vllm/vllm/multimodal/registry.py", line 93, in map_input
[rank0]: .map_input(model_config, data_value)
[rank0]: File "/mllm/yangjirui03/workspace/ThirdPartyMLLM/MiniCPM_V/offical_vllm_for_miniCPM/vllm/vllm/multimodal/base.py", line 228, in map_input
[rank0]: return mapper(InputContext(model_config), data)
[rank0]: File "/mllm/yangjirui03/workspace/ThirdPartyMLLM/MiniCPM_V/offical_vllm_for_miniCPM/vllm/vllm/multimodal/image.py", line 117, in _default_input_mapper
[rank0]: image_processor = self._get_hf_image_processor(model_config)
[rank0]: File "/mllm/yangjirui03/workspace/ThirdPartyMLLM/MiniCPM_V/offical_vllm_for_miniCPM/vllm/vllm/multimodal/image.py", line 109, in _get_hf_image_processor
[rank0]: return cached_get_image_processor(
[rank0]: File "/mllm/yangjirui03/workspace/ThirdPartyMLLM/MiniCPM_V/offical_vllm_for_miniCPM/vllm/vllm/transformers_utils/image_processor.py", line 17, in get_image_processor
[rank0]: processor = AutoImageProcessor.from_pretrained(
[rank0]: File "/mllm/yangjirui03/envs/miniCpmV_vllm2/lib/python3.10/site-packages/transformers/models/auto/image_processing_auto.py", line 410, in from_pretrained
[rank0]: config_dict, _ = ImageProcessingMixin.get_image_processor_dict(pretrained_model_name_or_path, **kwargs)
[rank0]: File "/mllm/yangjirui03/envs/miniCpmV_vllm2/lib/python3.10/site-packages/transformers/image_processing_base.py", line 335, in get_image_processor_dict
[rank0]: resolved_image_processor_file = cached_file(
[rank0]: File "/mllm/yangjirui03/envs/miniCpmV_vllm2/lib/python3.10/site-packages/transformers/utils/hub.py", line 373, in cached_file
[rank0]: raise EnvironmentError(
[rank0]: OSError: /mllm/yangjirui03/workspace/ThirdPartyMLLM/MiniCPM_V/MiniCPM-V-2 does not appear to have a file named preprocessor_config.json. Checkout 'https://huggingface.co//mllm/yangjirui03/workspace/ThirdPartyMLLM/MiniCPM_V/MiniCPM-V-2/tree/None' for available files.
The text was updated successfully, but these errors were encountered:
Your current environment
🐛 Describe the bug
vllm can successfully load "MiniCPM-Llama3-V-2_5", but it throws an error when loading "MiniCPM-V-2". The code is as follows.
The following is detail logs:
The text was updated successfully, but these errors were encountered: