You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
57 lines
1.9 KiB
57 lines
1.9 KiB
import os
|
|
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
|
|
from transformers import pipeline
|
|
import redis
|
|
import uuid
|
|
import json
|
|
from threading import Thread
|
|
from vllm import LLM, SamplingParams
|
|
import time
|
|
import threading
|
|
import time
|
|
import concurrent.futures
|
|
import requests
|
|
import socket
|
|
|
|
|
|
pool = redis.ConnectionPool(host='localhost', port=63179, max_connections=50,db=11, password="zhicheng123*")
|
|
redis_ = redis.Redis(connection_pool=pool, decode_responses=True)
|
|
|
|
db_key_query = 'query'
|
|
db_key_query_articles_directory = 'query_articles_directory'
|
|
db_key_result = 'result'
|
|
batch_size = 512
|
|
|
|
sampling_params = SamplingParams(temperature=0.95, top_p=0.7,presence_penalty=0.9,stop="</s>", max_tokens=4096)
|
|
models_path = "/home/majiahui/project/models-llm/openbuddy-llama-7b-finetune"
|
|
llm = LLM(model=models_path, tokenizer_mode="slow")
|
|
|
|
def classify(batch_size): # 调用模型,设置最大batch_size
|
|
while True:
|
|
texts = []
|
|
query_ids = []
|
|
if redis_.llen(db_key_query) == 0: # 若队列中没有元素就继续获取
|
|
time.sleep(2)
|
|
continue
|
|
for i in range(min(redis_.llen(db_key_query), batch_size)):
|
|
query = redis_.lpop(db_key_query).decode('UTF-8') # 获取query的text
|
|
query_ids.append(json.loads(query)['id'])
|
|
texts.append(json.loads(query)['text']) # 拼接若干text 为batch
|
|
outputs = llm.generate(texts, sampling_params) # 调用模型
|
|
|
|
generated_text_list = [""] * len(texts)
|
|
print("outputs", len(outputs))
|
|
for i, output in enumerate(outputs):
|
|
index = output.request_id
|
|
generated_text = output.outputs[0].text
|
|
generated_text_list[int(index)] = generated_text
|
|
|
|
|
|
for (id_, output) in zip(query_ids, generated_text_list):
|
|
res = output
|
|
redis_.set(id_, json.dumps(res)) # 将模型结果送回队列
|
|
|
|
|
|
if __name__ == '__main__':
|
|
t = Thread(target=classify, args=(batch_size,))
|
|
t.start()
|