Qwen 2.5 VL 多種推理方案
flyfish
單圖推理
from modelscope import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
from qwen_vl_utils import process_vision_info
import torchmodel_path = "/media/model/Qwen/Qwen25-VL-7B-Instruct/"model = Qwen2_5_VLForConditionalGeneration.from_pretrained(model_path,torch_dtype=torch.bfloat16,attn_implementation="flash_attention_2",device_map="auto",
)processor = AutoProcessor.from_pretrained(model_path)messages = [{"role": "user","content": [{"type": "image","image": "output_frames/frame_0000.jpg",},{"type": "text", "text": "描述圖像"},],}
]text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True
)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(text=[text],images=image_inputs,videos=video_inputs,padding=True,return_tensors="pt",
)
inputs = inputs.to("cuda")generated_ids = model.generate(**inputs, max_new_tokens=128)
generated_ids_trimmed = [out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print(output_text)
多圖推理
from modelscope import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
from qwen_vl_utils import process_vision_info
import torch# 默認:加載模型到可用設備上
# model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
# "Qwen/Qwen2.5-VL-7B-Instruct", torch_dtype="auto", device_map="auto"
# )# 指定本地模型路徑
model_path = "/media/model/Qwen/Qwen25-VL-7B-Instruct/"
# 推薦啟用 flash_attention_2 以獲得更好的加速和內存節省,特別是在多圖像和視頻場景中。
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(model_path,torch_dtype=torch.bfloat16,attn_implementation="flash_attention_2",device_map="auto",
)# 默認處理器
processor = AutoProcessor.from_pretrained(model_path)# 模型中每個圖像的視覺標記數量范圍默認為 4-16384。
# 根據需要設置 min_pixels 和 max_pixels,例如令牌范圍為 256-1280,以平衡性能和成本。
# min_pixels = 256*28*28
# max_pixels = 1280*28*28
# processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels)# 包含多個圖像和文本查詢的消息
messages = [{"role": "user","content": [{"type": "image", "image": "output_frames/frame_0000.jpg"},{"type": "image", "image": "output_frames/frame_0001.jpg"},{"type": "text", "text": "找出這些圖片之間的相似之處。"},],}
]# 準備推理
text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True
)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(text=[text],images=image_inputs,videos=video_inputs,padding=True,return_tensors="pt",
)
inputs = inputs.to("cuda")# 推理
generated_ids = model.generate(**inputs, max_new_tokens=128)
generated_ids_trimmed = [out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print(output_text)
視頻推理1
from modelscope import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
from qwen_vl_utils import process_vision_info
import torchmodel_path = "/media/model/Qwen/Qwen25-VL-7B-Instruct/"model = Qwen2_5_VLForConditionalGeneration.from_pretrained(model_path,torch_dtype=torch.bfloat16,attn_implementation="flash_attention_2",device_map="auto",
)processor = AutoProcessor.from_pretrained(model_path)messages = [{"role": "user","content": [{"type": "video","video": ["output_frames/frame_0000.jpg","output_frames/frame_0001.jpg","output_frames/frame_0002.jpg","output_frames/frame_0003.jpg",],},{"type": "text", "text": "描述這個視頻。"},],}
]text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True
)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(text=[text],images=image_inputs,videos=video_inputs,padding=True,return_tensors="pt",
)
inputs = inputs.to("cuda")generated_ids = model.generate(**inputs, max_new_tokens=128)
generated_ids_trimmed = [out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print(output_text)
視頻推理2
from modelscope import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
from qwen_vl_utils import process_vision_info
import torchmodel_path = "/media/model/Qwen/Qwen25-VL-7B-Instruct/"model = Qwen2_5_VLForConditionalGeneration.from_pretrained(model_path,torch_dtype=torch.bfloat16,attn_implementation="flash_attention_2",device_map="auto",
)processor = AutoProcessor.from_pretrained(model_path)# messages = [
# {
# "role": "user",
# "content": [
# {
# "type": "video",
# "video": "test.mp4",
# },
# {"type": "text", "text": "描述這個視頻。"},
# ],
# }
# ]messages = [{"role": "user","content": [{"type": "video","video": "test.mp4","max_pixels": 360 * 420,"fps": 1.0,},{"type": "text", "text": "描述這個視頻。"},],}
]text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True
)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(text=[text],images=image_inputs,videos=video_inputs,padding=True,return_tensors="pt",
)
inputs = inputs.to("cuda")generated_ids = model.generate(**inputs, max_new_tokens=8192)
generated_ids_trimmed = [out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print(output_text)
批量推理
from modelscope import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
from qwen_vl_utils import process_vision_info
import torchmodel_path = "/media/model/Qwen/Qwen25-VL-7B-Instruct/"model = Qwen2_5_VLForConditionalGeneration.from_pretrained(model_path,torch_dtype=torch.bfloat16,attn_implementation="flash_attention_2",device_map="auto",
)processor = AutoProcessor.from_pretrained(model_path)processor.tokenizer.padding_side = 'left'
# Sample messages for batch inference
messages1 = [{"role": "user","content": [{"type": "image", "image": "output_frames/frame_0000.jpg"},{"type": "image", "image": "output_frames/frame_0001.jpg"},{"type": "text", "text": "這些圖片中有哪些共同的元素?"},],}
]
messages2 = [{"role": "system", "content": "你是一個能提供幫助的助手。"},{"role": "user", "content": "你是誰?"},
]
# Combine messages for batch processing
messages = [messages1, messages2]# Preparation for batch inference
texts = [processor.apply_chat_template(msg, tokenize=False, add_generation_prompt=True)for msg in messages
]
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(text=texts,images=image_inputs,videos=video_inputs,padding=True,return_tensors="pt",
)
inputs = inputs.to("cuda")# Batch Inference
generated_ids = model.generate(**inputs, max_new_tokens=8192)
generated_ids_trimmed = [out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_texts = processor.batch_decode(generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print(output_texts)
72B
from modelscope import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
from qwen_vl_utils import process_vision_info
import torch#model_path = "/media/model/Qwen/Qwen25-VL-7B-Instruct/"model_path = "/media//Qwen/Qwen25-VL-72B-Instruct-AWQ/"# #7B
# model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
# model_path,
# torch_dtype=torch.bfloat16,
# attn_implementation="flash_attention_2",
# device_map="auto",
# )
# 72B
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(model_path,torch_dtype=torch.float16,attn_implementation="flash_attention_2",device_map="cuda",
)
model.config.use_cache = True
model = torch.compile(model,mode="max-autotune",fullgraph=True,dynamic=False)
processor = AutoProcessor.from_pretrained(model_path)# 加載提示信息
def load_prompts():# 打開并讀取文本文件的內容with open('prompt.txt', 'r', encoding='utf-8') as file:lines = file.readlines()# 將多行內容合并為單行prompt = ''.join(lines).replace('\n', ' ').strip()return prompt, messages = [{"role": "user","content": [{"type": "video","video": ["output_frames/frame_0001.jpg","output_frames/frame_0000.jpg","output_frames/frame_0002.jpg","output_frames/frame_0003.jpg",],},{"type": "text", "text": load_prompts()},],}
]text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True
)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(text=[text],images=image_inputs,videos=video_inputs,padding=True,return_tensors="pt",
)
inputs = inputs.to("cuda")generated_ids = model.generate(**inputs, max_new_tokens=8192)
generated_ids_trimmed = [out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print(output_text)