You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 

36 lines
1.3 KiB

import os
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
# 如果您希望结果可复现,可以设置随机数种子。
# torch.manual_seed(1234)
model_path = "/home/majiahui/project/models-llm/Qwen-VL-Chat"
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_path, device_map="cuda", trust_remote_code=True).eval()
model.generation_config = GenerationConfig.from_pretrained(model_path, trust_remote_code=True)
query = tokenizer.from_list_format([
{'image': '/home/majiahui/project/baichuan-7B-main/picture/7.png'},
{'image': '/home/majiahui/project/baichuan-7B-main/picture/9.png'},
{'text': '图中鞋子有什么突出亮点,请列举出来'},
])
response, history = model.chat(tokenizer, query=query, history=None)
print(response)
print(history)
query = tokenizer.from_list_format([
# {'image': '/home/majiahui/project/baichuan-7B-main/picture/7.png'},
# {'image': '/home/majiahui/project/baichuan-7B-main/picture/9.png'},
{'text': '根据上面说的优点和卖点结合图片,写一段营销文本让商品卖的更好'},
])
response, history = model.chat(tokenizer, query=query, history=history)
print(response)