import os os.environ["CUDA_VISIBLE_DEVICES"] = "2" import flask from transformers import pipeline import redis import uuid import json from threading import Thread import time import requests from flask import request from vllm import LLM, SamplingParams app = flask.Flask(__name__) pool = redis.ConnectionPool(host='localhost', port=63179, max_connections=100, db=5, password="zhicheng123*") redis_ = redis.Redis(connection_pool=pool, decode_responses=True) db_key_query = 'query' db_key_result = 'result' sampling_params = SamplingParams(temperature=0.95, top_p=0.7,presence_penalty=1.1,stop="", max_tokens=4096) models_path = "/home/majiahui/model-llm/openbuddy-mistral-7b-v13.1" llm = LLM(model=models_path, tokenizer_mode="slow") def mistral_vllm_models(texts): outputs = llm.generate(texts, sampling_params) # 调用模型 generated_text_list = [""] * len(texts) # generated_text_list = ["" if len(i[0]) > 5 else i[0] for i in text_list] for i, output in enumerate(outputs): index = output.request_id generated_text = output.outputs[0].text generated_text_list[int(index)] = generated_text return generated_text_list def classify(): # 调用模型,设置最大batch_size while True: if redis_.llen(db_key_query) == 0: # 若队列中没有元素就继续获取 continue else: query = redis_.lpop(db_key_query).decode('UTF-8') # 获取query的text query_ids = json.loads(query)['id'] texts = json.loads(query)['texts'] # 拼接若干text 为batch result = mistral_vllm_models(texts) # 调用模型 print(result) redis_.set(query_ids, json.dumps(result)) # 将模型结果送回队列 @app.route("/predict", methods=["POST"]) def handle_query(): texts = request.json["texts"] # 获取用户query中的文本 例如"I love you" id_ = str(uuid.uuid1()) # 为query生成唯一标识 d = {'id': id_, 'texts': texts} # 绑定文本和query id redis_.rpush(db_key_query, json.dumps(d)) # 加入redis while True: result = redis_.get(id_) # 获取该query的模型结果 if result is not None: redis_.delete(id_) result_text = {'code': "200", 'resilt': json.loads(result.decode('UTF-8'))} break return flask.jsonify(result_text) # 返回结果 if __name__ == "__main__": t = Thread(target=classify) t.start() app.run(debug=False, host='0.0.0.0', port=14010)