from flask import Flask, jsonify from flask import request from transformers import pipeline import redis import uuid import json from threading import Thread from vllm import LLM, SamplingParams import time app = Flask(__name__) app.config["JSON_AS_ASCII"] = False pool = redis.ConnectionPool(host='localhost', port=63179, max_connections=50,db=11, password="zhicheng123*") redis_ = redis.Redis(connection_pool=pool, decode_responses=True) db_key_query = 'query' db_key_result = 'result' batch_size = 32 sampling_params = SamplingParams(temperature=0.95, top_p=0.7,presence_penalty=0.9,stop="", max_tokens=2048) models_path = "/home/majiahui/project/models-llm/openbuddy-llama-7b-finetune" llm = LLM(model=models_path, tokenizer_mode="slow") def classify(batch_size): # 调用模型,设置最大batch_size while True: texts = [] query_ids = [] if redis_.llen(db_key_query) == 0: # 若队列中没有元素就继续获取 time.sleep(2) continue for i in range(min(redis_.llen(db_key_query), batch_size)): query = redis_.lpop(db_key_query).decode('UTF-8') # 获取query的text query_ids.append(json.loads(query)['id']) texts.append(json.loads(query)['text']) # 拼接若干text 为batch outputs = llm.generate(texts, sampling_params) # 调用模型 for (id_, output) in zip(query_ids, outputs): res = output.outputs[0].text redis_.set(id_, json.dumps(res)) # 将模型结果送回队列 @app.route("/predict", methods=["POST"]) def handle_query(): text = request.json["texts"] # 获取用户query中的文本 例如"I love you" id_ = str(uuid.uuid1()) # 为query生成唯一标识 d = {'id': id_, 'text': text} # 绑定文本和query id redis_.rpush(db_key_query, json.dumps(d)) # 加入redis while True: result = redis_.get(id_) # 获取该query的模型结果 if result is not None: redis_.delete(id_) result_text = {'code': "200", 'data': json.loads(result)} break time.sleep(1) return jsonify(result_text) # 返回结果 if __name__ == "__main__": t = Thread(target=classify, args=(batch_size,)) t.start() app.run(debug=False, host='0.0.0.0', port=18000)