
1 changed files with 272 additions and 0 deletions
@ -0,0 +1,272 @@ |
|||
#coding:utf-8 |
|||
# 这是一个示例 Python 脚本。 |
|||
|
|||
# 按 Shift+F10 执行或将其替换为您的代码。 |
|||
# 按 双击 Shift 在所有地方搜索类、文件、工具窗口、操作和设置。 |
|||
|
|||
|
|||
# def request_api_chatgpt(api_key, prompt): |
|||
# print(api_key) |
|||
# print(prompt) |
|||
# OPENAI_API_KEY = api_key |
|||
# url = "https://api.openai.com/v1/chat/completions" |
|||
# # url = "https://one.aiskt.com" |
|||
# headers = { |
|||
# "Content-Type": "application/json", |
|||
# "Authorization": f"Bearer {OPENAI_API_KEY}" |
|||
# } |
|||
# data = { |
|||
# "model": "gpt-4-turbo-preview", |
|||
# "messages": [ |
|||
# {"role": "user", "content": "你好"}, |
|||
# {"role": "assistant", "content": "你好!有什么我可以帮助你的吗?"}, |
|||
# # {"role": "user", "content": prompt} |
|||
# {"role": "user", "content": "一张信用卡为多个gpt4账号付费会风控吗"} |
|||
# ], |
|||
# "top_p": 0.9, |
|||
# "temperature": 0.95 |
|||
# } |
|||
# response = requests.post(url, |
|||
# headers=headers, |
|||
# data=json.dumps(data), |
|||
# timeout=1200) |
|||
# |
|||
# return response |
|||
|
|||
from flask import Flask, jsonify |
|||
from flask import request |
|||
import requests |
|||
import time |
|||
import socket |
|||
import re |
|||
|
|||
app = Flask(__name__) |
|||
app.config["JSON_AS_ASCII"] = False |
|||
|
|||
|
|||
def get_host_ip(): |
|||
""" |
|||
查询本机ip地址 |
|||
:return: ip |
|||
""" |
|||
try: |
|||
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) |
|||
s.connect(('8.8.8.8', 80)) |
|||
ip = s.getsockname()[0] |
|||
finally: |
|||
s.close() |
|||
|
|||
return ip |
|||
|
|||
chatgpt_url_predict = "http://{}:12001/predict".format(str(get_host_ip())) |
|||
chatgpt_url_search = "http://{}:12001/search".format(str(get_host_ip())) |
|||
|
|||
|
|||
def check_problems(input, output): |
|||
pantten_formula = r'\\\[.*?\\\]' |
|||
pantten_picture = r'<mermaidStart>.*?<mermaidEnd>' |
|||
pantten_tb = r'<tbStart>.*?<tbEnd>' |
|||
|
|||
error_data = "" |
|||
# 判断是否是小标题任务 |
|||
if "任务:生成论文小标题内容" in input: |
|||
# 判断公式 |
|||
formula_bool_list = re.findall(pantten_formula, output, re.DOTALL) |
|||
tb_bool_list = re.findall(pantten_tb, output, re.DOTALL) |
|||
picture_bool_list = re.findall(pantten_picture, output, re.DOTALL) |
|||
|
|||
if "数学公式用\\[\\]进行包裹" not in input and formula_bool_list != []: |
|||
error_data += "多生成公式问题:\n" |
|||
error_data += "input:\n" |
|||
error_data += input |
|||
error_data += "output:\n" |
|||
error_data += output |
|||
error_data += "\n========================================================================\n" |
|||
# 判断公式 |
|||
|
|||
if "表格部分开始必须用<tbStart>标识,表格部分结束必须用<tbEnd>标识,必须返回html格式的表格" not in input and tb_bool_list != []: |
|||
error_data += "多生成表格问题:\n" |
|||
error_data += "input:\n" |
|||
error_data += input |
|||
error_data += "output:\n" |
|||
error_data += output |
|||
error_data += "\n========================================================================\n" |
|||
|
|||
if "图片要求在文字中插入一张图" not in input and picture_bool_list != []: |
|||
error_data += "多生成图片问题:\n" |
|||
error_data += "input:\n" |
|||
error_data += input |
|||
error_data += "output:\n" |
|||
error_data += output |
|||
error_data += "\n========================================================================\n" |
|||
if error_data != "": |
|||
with open("logs/error_xiaobiaoti.log", "a", encoding="utf-8") as f: |
|||
f.write(error_data) |
|||
|
|||
|
|||
def return_type(input, output): |
|||
pantten_formula = r'\\\[.*?\\\]' |
|||
pantten_picture = r'<mermaidStart>.*?<mermaidEnd>' |
|||
pantten_tb = r'<tbStart>.*?<tbEnd>' |
|||
|
|||
return_type_list = [] |
|||
# 判断是否是小标题任务 |
|||
if "任务:生成论文小标题内容" in input: |
|||
# 判断表格 |
|||
tb_bool_list = re.findall(pantten_tb, output, re.DOTALL) |
|||
formula_bool_list = re.findall(pantten_formula, output, re.DOTALL) |
|||
picture_bool_list = re.findall(pantten_picture, output, re.DOTALL) |
|||
|
|||
if tb_bool_list != []: |
|||
return_type_list.append("1") |
|||
|
|||
if formula_bool_list != []: |
|||
return_type_list.append("2") |
|||
|
|||
if picture_bool_list != []: |
|||
return_type_list.append("3") |
|||
|
|||
return return_type_list |
|||
|
|||
|
|||
|
|||
def request_api_chatgpt(content, model, top_p, temperature): |
|||
data = { |
|||
"content": content, |
|||
"model": model, |
|||
"top_p": top_p, |
|||
"temperature": temperature |
|||
} |
|||
response = requests.post( |
|||
chatgpt_url_predict, |
|||
json=data, |
|||
timeout=100000 |
|||
) |
|||
if response.status_code == 200: |
|||
return response.json() |
|||
else: |
|||
# logger.error( |
|||
# "【{}】 Failed to get a proper response from remote " |
|||
# "server. Status Code: {}. Response: {}" |
|||
# "".format(url, response.status_code, response.text) |
|||
# ) |
|||
print("Failed to get a proper response from remote " |
|||
"server. Status Code: {}. Response: {}" |
|||
"".format(response.status_code, response.text)) |
|||
return {} |
|||
|
|||
|
|||
def uuid_search(uuid): |
|||
data = { |
|||
"id": uuid |
|||
} |
|||
response = requests.post( |
|||
chatgpt_url_search, |
|||
json=data, |
|||
timeout=100000 |
|||
) |
|||
if response.status_code == 200: |
|||
return response.json() |
|||
else: |
|||
# logger.error( |
|||
# "【{}】 Failed to get a proper response from remote " |
|||
# "server. Status Code: {}. Response: {}" |
|||
# "".format(url, response.status_code, response.text) |
|||
# ) |
|||
print("Failed to get a proper response from remote " |
|||
"server. Status Code: {}. Response: {}" |
|||
"".format(response.status_code, response.text)) |
|||
return {} |
|||
|
|||
|
|||
def uuid_search_mp(results): |
|||
|
|||
results_list = [""] * len(results) |
|||
while True: |
|||
tiaochu_bool = True |
|||
|
|||
for i in results_list: |
|||
if i == "": |
|||
tiaochu_bool = False |
|||
break |
|||
|
|||
if tiaochu_bool == True: |
|||
break |
|||
|
|||
for i in range(len(results)): |
|||
uuid = results[i]["texts"]["id"] |
|||
|
|||
result = uuid_search(uuid) |
|||
if result["code"] == 200: |
|||
results_list[i] = result["text"] |
|||
time.sleep(3) |
|||
return results_list |
|||
|
|||
|
|||
@app.route("/predict", methods=["POST"]) |
|||
def handle_query(): |
|||
print(request.remote_addr) |
|||
model = request.json.get("model") |
|||
messages = request.json.get("messages") |
|||
top_p = request.json.get("top_p") |
|||
temperature = request.json.get("temperature") |
|||
|
|||
print(model) |
|||
print(messages) |
|||
print(top_p) |
|||
print(temperature) |
|||
|
|||
# "messages": [ |
|||
# {"role": "user", "content": "你好"}, |
|||
# {"role": "assistant", "content": "你好!有什么我可以帮助你的吗?"}, |
|||
# # {"role": "user", "content": prompt} |
|||
# {"role": "user", "content": "一张信用卡为多个gpt4账号付费会风控吗"} |
|||
# ], |
|||
# text = "User: " + messages[-1]["content"] + "\nAssistant:" |
|||
content = "<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n<think>\n\n</think>\n\n".format(messages[-1]["content"]) |
|||
print(model) |
|||
print(messages) |
|||
print(top_p) |
|||
print(temperature) |
|||
uid = request_api_chatgpt(content, model, top_p, temperature) |
|||
# { |
|||
# "probabilities": null, |
|||
# "status_code": 200, |
|||
# "texts": { |
|||
# "id": "29379d06-d08b-11ee-b56d-31fe0a8adccc" |
|||
# } |
|||
# } |
|||
results = uuid_search_mp([uid])[0] |
|||
# 检查输入输出 |
|||
check_problems(messages[0]["content"], results) |
|||
return_type_list = return_type(messages[0]["content"], results) |
|||
|
|||
return_text = { |
|||
'code': 200, |
|||
'id': uid["texts"]["id"], |
|||
'object': 0, |
|||
'created': 0, |
|||
'model': model, |
|||
'choices': [ |
|||
{ |
|||
'index': 0, |
|||
'message': { |
|||
'role': 'assistant', |
|||
'content': results |
|||
}, |
|||
'logprobs': None, |
|||
'finish_reason': 'stop' |
|||
} |
|||
], |
|||
'return_type_list': return_type_list, |
|||
'usage': 0, |
|||
'system_fingerprint': 0 |
|||
} |
|||
return jsonify(return_text) |
|||
|
|||
|
|||
|
|||
|
|||
if __name__ == '__main__': |
|||
app.run(host="0.0.0.0", port=12004, threaded=True, debug=False) |
|||
|
Loading…
Reference in new issue