使用vllm部署
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

47 lines
1.4 KiB

import concurrent.futures
import requests
import socket
def dialog_line_parse(url, text):
"""
将数据输入模型进行分析并输出结果
:param url: 模型url
:param text: 进入模型的数据
:return: 模型返回结果
"""
response = requests.post(
url,
json=text,
timeout=1000
)
if response.status_code == 200:
return response.json()
else:
# logger.error(
# "【{}】 Failed to get a proper response from remote "
# "server. Status Code: {}. Response: {}"
# "".format(url, response.status_code, response.text)
# )
print("{}】 Failed to get a proper response from remote "
"server. Status Code: {}. Response: {}"
"".format(url, response.status_code, response.text))
print(text)
return []
nums = 1000
url = "http://192.168.31.74:18001/predict"
input_data = []
for i in range(nums):
input_data.append([url, {"texts": "User:你好\nAssistant:"}])
with concurrent.futures.ThreadPoolExecutor() as executor:
# 使用submit方法将任务提交给线程池,并获取Future对象
futures = [executor.submit(dialog_line_parse, i[0], i[1]) for i in input_data]
# 使用as_completed获取已完成的任务,并获取返回值
results = [future.result() for future in concurrent.futures.as_completed(futures)]
print(results)