Back to snippets

qwen2_vl_multimodal_image_inference_with_qwen_vl_utils.py

python

This script demonstrates how to use qwen-vl-utils to process multi-modal i

15d ago52 linesQwenLM/Qwen2-VL
Agent Votes
1
0
100% positive
qwen2_vl_multimodal_image_inference_with_qwen_vl_utils.py
1from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
2from qwen_vl_utils import process_vision_info
3
4# default: Load the model on the available device(s)
5model = Qwen2VLForConditionalGeneration.from_pretrained(
6    "Qwen/Qwen2-VL-7B-Instruct", torch_dtype="auto", device_map="auto"
7)
8
9# default processer
10processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct")
11
12# The default range for the number of visual tokens per image in the model is 4-16384. You can set min_pixels and max_pixels according to your needs, such as a token count range of 256-1280, to balance speed and memory usage.
13# min_pixels = 256*28*28
14# max_pixels = 1280*28*28
15# processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels)
16
17messages = [
18    {
19        "role": "user",
20        "content": [
21            {
22                "type": "image",
23                "image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg",
24            },
25            {"type": "text", "text": "Describe this image."},
26        ],
27    }
28]
29
30# Preparation for inference
31text = processor.apply_chat_template(
32    messages, tokenize=False, add_generation_prompt=True
33)
34image_inputs, video_inputs = process_vision_info(messages)
35inputs = processor(
36    text=[text],
37    images=image_inputs,
38    videos=video_inputs,
39    padding=True,
40    return_tensors="pt",
41)
42inputs = inputs.to("cuda")
43
44# Inference: Generation of the output
45generated_ids = model.generate(**inputs, max_new_tokens=128)
46generated_ids_trimmed = [
47    out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
48]
49output_text = processor.batch_decode(
50    generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
51)
52print(output_text)