You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

728 lines
24 KiB

from flask import Flask, jsonify, Response
from flask import request
import redis
import uuid
import json
import time
import threading
from threading import Thread
from flask import send_file, send_from_directory
import os
from flask import make_response
import openai
import base64
import re
import urllib.parse as pa
import socket
app = Flask(__name__)
app.config["JSON_AS_ASCII"] = False
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
localhostip = s.getsockname()[0]
lock = threading.RLock()
pool = redis.ConnectionPool(host='localhost', port=63179, max_connections=50, db=2, password='Zhicheng123*')
redis_ = redis.Redis(connection_pool=pool, decode_responses=True)
pantten_second_biaoti = '[2二ⅡⅠ][、.]\s{0,}?[\u4e00-\u9fa5]+'
pantten_other_biaoti = '[2-9二三四五六七八九ⅡⅢⅣⅤⅥⅦⅧⅨ][、.]\s{0,}?[\u4e00-\u9fa5]+'
mulu_prompt = "请帮我根据题目为“{}”生成一个论文目录"
first_title_prompt = "论文题目是“{}”,目录是“{}”,请把其中的大标题“{}”的内容续写完整,保证续写内容不少于800字"
small_title_prompt = "论文题目是“{}”,目录是“{}”,请把其中的小标题“{}”的内容续写完整,保证续写内容不少于800字"
references_prompt = "论文题目是“{}”,目录是“{}”,请为这篇论文生成15篇左右的参考文献,要求其中有有中文参考文献不低于12篇,英文参考文献不低于2篇"
thank_prompt = "请以“{}”为题写一篇论文的致谢"
kaitibaogao_prompt = "请以《{}》为题目生成研究的主要的内容、背景、目的、意义,要求不少于100字"
chinese_abstract_prompt = "请以《{}》为题目生成论文摘要,要求不少于1500字"
english_abstract_prompt = "请把“{}”这段文字翻译成英文"
chinese_keyword_prompt = "请为“{}”这段论文摘要生成3-5个关键字"
english_keyword_prompt = "请把“{}”这几个关键字翻译成英文"
thanks = "致谢"
references = "参考文献"
dabiaoti = ["", "", "", "", "", "", "", ""]
project_data_txt_path = "/home/majiahui/ChatGPT_Sever/new_data_txt"
"""
key_list = [
{"ip": key-api},
{"ip": key-api},
{"ip": key-api},
]
redis_title = []
redis_title_ing = []
redis_small_task = [
{
uuid,
api_key,
mulu_title_id,
title,
mulu,
subtitle,
prompt
}
]
redis_res = [
{
"uuid":
"完成进度":
"标题":
"中文摘要":"",
"英文摘要"
"中文关键字"
"英文关键字"
"正文" : [""] * len(content)
}
] -
> list()
"""
openaikey_list = ["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",
"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",
"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",
"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",
"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",
"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",
"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",
"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",
"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"]
redis_key_name_openaikey_list = "openaikey_list_{}".format(str(localhostip))
redis_title = "redis_title"
redis_title_ing = "redis_title_ing"
redis_small_task = "redis_small_task"
redis_res = "redis_res"
for i in openaikey_list:
redis_.rpush(redis_key_name_openaikey_list, i)
def chat_kaitibaogao(api_key, uuid, main_parameter):
# t = Thread(target=chat_kaitibaogao, args=(api_key,
# uuid,
# main_parameter
# time.sleep(1)
openai.api_key = api_key
res = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": kaitibaogao_prompt.format(main_parameter[0])},
],
temperature=0.5
)
kaitibaogao = res.choices[0].message.content
# kaitibaogao_path = os.path.join(, "kaitibaogao.txt")
# with open(kaitibaogao_path, 'w', encoding='utf8') as f_kaitibaogao:
# f_kaitibaogao.write(kaitibaogao)
redis_.rpush(redis_key_name_openaikey_list, api_key)
lock.acquire()
res_dict_str = redis_.hget(redis_res, uuid)
res_dict = json.loads(res_dict_str)
res_dict["tasking_num"] += 1
res_dict["开题报告"] = kaitibaogao
res_dict_str = json.dumps(res_dict, ensure_ascii=False)
redis_.hset(redis_res, uuid, res_dict_str)
lock.release()
def chat_abstract_keyword(api_key, uuid, main_parameter):
# api_key,
# uuid,
# main_parameter
# time.sleep(7)
openai.api_key = api_key
# 生成中文摘要
res = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": chinese_abstract_prompt.format(main_parameter[0])},
],
temperature=0.5
)
chinese_abstract = res.choices[0].message.content
# 生成英文的摘要
res = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": english_abstract_prompt.format(chinese_abstract)},
],
temperature=0.5
)
english_abstract = res.choices[0].message.content
# 生成中文关键字
res = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": chinese_keyword_prompt.format(chinese_abstract)},
],
temperature=0.5
)
chinese_keyword = res.choices[0].message.content
# 生成英文关键字
res = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": english_keyword_prompt.format(chinese_keyword)},
],
temperature=0.5
)
english_keyword = res.choices[0].message.content
paper_abstract_keyword = {
"中文摘要": chinese_abstract,
"英文摘要": english_abstract,
"中文关键词": chinese_keyword,
"英文关键词": english_keyword
}
# json_str = json.dumps(paper_abstract_keyword, indent=4, ensure_ascii=False)
# abstract_keyword_path = os.path.join(uuid_path, "abstract_keyword.json")
# with open(abstract_keyword_path, 'w') as json_file:
# json_file.write(json_str)
#
# lock.acquire()
# api_key_list.append(api_key)
# lock.release()
redis_.rpush(redis_key_name_openaikey_list, api_key)
lock.acquire()
res_dict_str = redis_.hget(redis_res, uuid)
res_dict = json.loads(res_dict_str)
res_dict["tasking_num"] += 1
res_dict["中文摘要"] = paper_abstract_keyword["中文摘要"]
res_dict["英文摘要"] = paper_abstract_keyword["英文摘要"]
res_dict["中文关键词"] = paper_abstract_keyword["中文关键词"]
res_dict["英文关键词"] = paper_abstract_keyword["英文关键词"]
res_dict_str = json.dumps(res_dict, ensure_ascii=False)
redis_.hset(redis_res, uuid, res_dict_str)
lock.release()
def chat_content(api_key, uuid, main_parameter):
'''
:param api_key:
:param uuid:
:param main_parameter:
:return:
'''
content_index = main_parameter[0]
title = main_parameter[1]
mulu = main_parameter[2]
subtitle = main_parameter[3]
prompt = main_parameter[4]
if subtitle[:2] == "@@":
res_content = subtitle[2:]
else:
openai.api_key = api_key
res = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": prompt.format(title, mulu, subtitle)},
],
temperature=0.5
)
res_content = res.choices[0].message.content
redis_.rpush(redis_key_name_openaikey_list, api_key)
lock.acquire()
res_dict_str = redis_.hget(redis_res, uuid)
res_dict = json.loads(res_dict_str)
res_dict["tasking_num"] += 1
table_of_contents = res_dict["table_of_contents"]
table_of_contents[content_index] = res_content
res_dict["table_of_contents"] = table_of_contents
res_dict_str = json.dumps(res_dict, ensure_ascii=False)
redis_.hset(redis_res, uuid, res_dict_str)
lock.release()
def chat_thanks(api_key, uuid, main_parameter):
'''
:param api_key:
:param uuid:
:param main_parameter:
:return:
'''
# title,
# thank_prompt
title = main_parameter[0]
prompt = main_parameter[1]
openai.api_key = api_key
res = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": prompt.format(title)},
],
temperature=0.5
)
res_content = res.choices[0].message.content
redis_.rpush(redis_key_name_openaikey_list, api_key)
# "致谢": "",
# "参考文献": "",
# 加锁 读取redis生成致谢并存储
lock.acquire()
res_dict_str = redis_.hget(redis_res, uuid)
res_dict = json.loads(res_dict_str)
res_dict["tasking_num"] += 1
res_dict["致谢"] = res_content
res_dict_str = json.dumps(res_dict, ensure_ascii=False)
redis_.hset(redis_res, uuid, res_dict_str)
lock.release()
def chat_references(api_key, uuid, main_parameter):
'''
:param api_key:
:param uuid:
:param main_parameter:
:return:
'''
# title,
# mulu,
# references_prompt
title = main_parameter[0]
mulu = main_parameter[1]
prompt = main_parameter[2]
openai.api_key = api_key
res = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": prompt.format(title, mulu)},
],
temperature=0.5
)
res_content = res.choices[0].message.content
redis_.rpush(redis_key_name_openaikey_list, api_key)
# 加锁 读取resis并存储结果
lock.acquire()
res_dict_str = redis_.hget(redis_res, uuid)
res_dict = json.loads(res_dict_str)
res_dict["tasking_num"] += 1
res_dict["参考文献"] = res_content
res_dict_str = json.dumps(res_dict, ensure_ascii=False)
redis_.hset(redis_res, uuid, res_dict_str)
lock.release()
def threading_mulu(key_api, title, uuid):
'''
生成目录并吧任务拆解进入子任务的redis_list中和储存结果的redis_list中
:return:
'''
openai.api_key = key_api
res = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": mulu_prompt.format(title)},
],
temperature=0.5
)
redis_.rpush(redis_key_name_openaikey_list, key_api)
mulu = res.choices[0].message.content
mulu_list = str(mulu).split("\n")
mulu_list = [i.strip() for i in mulu_list if i != ""]
print(mulu_list)
cun_bool = False
table_of_contents = [mulu_list[0]]
for i in mulu_list[1:]:
result_second_biaoti_list = re.findall(pantten_second_biaoti, i)
result_other_biaoti_list = re.findall(pantten_other_biaoti, i)
if result_second_biaoti_list != []:
table_of_contents.append("@@" + i)
cun_bool = True
continue
if cun_bool == False:
continue
else:
if result_other_biaoti_list != []:
table_of_contents.append("@@" + i)
else:
table_of_contents.append(i)
print(table_of_contents)
# table_of_contents = table_of_contents[:3] + table_of_contents[-1:]
# print(table_of_contents)
thanks_references_bool_table = table_of_contents[-3:]
# thanks = "致谢"
# references = "参考文献"
if references in thanks_references_bool_table:
table_of_contents.remove(references)
if thanks in thanks_references_bool_table:
table_of_contents.remove(thanks)
# table_of_contents.append(thanks)
# table_of_contents.append(references)
# if thanks not in thanks_bool_table:
# table_of_contents.insert(-1, "致谢")
#
# if thanks not in thanks_bool_table:
# table_of_contents.insert(-1, "致谢")
print(len(table_of_contents))
small_task_list = []
# api_key,
# index,
# title,
# mulu,
# subtitle,
# prompt
kaitibaogao_task = {
"task_type": "kaitibaogao",
"uuid": uuid,
"main_parameter": [title]
}
chat_abstract_task = {
"task_type": "chat_abstract",
"uuid": uuid,
"main_parameter": [title]
}
small_task_list.append(kaitibaogao_task)
small_task_list.append(chat_abstract_task)
content_index = 0
while True:
if content_index == len(table_of_contents):
break
subtitle = table_of_contents[content_index]
if content_index == 0:
prompt = first_title_prompt
elif subtitle == "参考文献":
prompt = references_prompt
elif subtitle == "致谢":
prompt = thank_prompt
else:
prompt = small_title_prompt
print("请求的所有参数",
content_index,
title,
subtitle,
prompt)
paper_content = {
"task_type": "paper_content",
"uuid": uuid,
"main_parameter": [
content_index,
title,
mulu,
subtitle,
prompt
]
}
small_task_list.append(paper_content)
content_index += 1
thanks_task = {
"task_type": "thanks_task",
"uuid": uuid,
"main_parameter": [
title,
thank_prompt
]
}
references_task = {
"task_type": "references_task",
"uuid": uuid,
"main_parameter": [
title,
mulu,
references_prompt
]
}
small_task_list.append(thanks_task)
small_task_list.append(references_task)
for small_task in small_task_list:
small_task = json.dumps(small_task, ensure_ascii=False)
redis_.rpush(redis_small_task, small_task)
res = {
"uuid": uuid,
"num_small_task": len(small_task_list),
"tasking_num": 0,
"标题": title,
"目录": mulu,
"开题报告": "",
"任务书": "",
"中文摘要": "",
"英文摘要": "",
"中文关键词": "",
"英文关键词": "",
"正文": "",
"致谢": "",
"参考文献": "",
"table_of_contents": [""] * len(table_of_contents)
}
res = json.dumps(res, ensure_ascii=False)
redis_.hset(redis_res, uuid, res)
def threading_1():
# title, redis_key_name_openaikey_list
'''
生成目录
:param title:
:param redis_key_name_openaikey_list:
:return:
'''
while True:
if redis_.llen(redis_small_task) != 0: # 若队列中有元素就跳过
time.sleep(1)
continue
elif redis_.llen(redis_title) != 0 and redis_.llen(redis_key_name_openaikey_list) != 0:
title_uuid_dict_str = redis_.lpop(redis_title).decode('UTF-8')
api_key = redis_.lpop(redis_key_name_openaikey_list).decode('UTF-8')
# redis_title:{"id": id_, "title": title}
title_uuid_dict = json.loads(title_uuid_dict_str)
title = title_uuid_dict["title"]
uuid_id = title_uuid_dict["id"]
t = Thread(target=threading_mulu, args=(api_key,
title,
uuid_id,
))
t.start()
else:
time.sleep(1)
continue
def threading_2():
'''
顺序读取子任务
:return:
'''
while True:
if redis_.llen(redis_small_task) != 0 and redis_.llen(redis_key_name_openaikey_list) != 0:
# 执行小标题的任务
api_key = redis_.lpop(redis_key_name_openaikey_list).decode('UTF-8')
small_title = redis_.lpop(redis_small_task).decode('UTF-8')
small_title = json.loads(small_title)
task_type = small_title["task_type"]
uuid = small_title["uuid"]
main_parameter = small_title["main_parameter"]
# "task_type": "paper_content",
# "uuid": uuid,
# "main_parameter": [
# "task_type": "paper_content",
# "task_type": "chat_abstract",
# "task_type": "kaitibaogao",
if task_type == "kaitibaogao":
t = Thread(target=chat_kaitibaogao, args=(api_key,
uuid,
main_parameter
))
t.start()
elif task_type == "chat_abstract":
t = Thread(target=chat_abstract_keyword, args=(api_key,
uuid,
main_parameter
))
t.start()
elif task_type == "paper_content":
t = Thread(target=chat_content, args=(api_key,
uuid,
main_parameter
))
t.start()
elif task_type == "thanks_task":
t = Thread(target=chat_thanks, args=(api_key,
uuid,
main_parameter
))
t.start()
elif task_type == "references_task":
t = Thread(target=chat_references, args=(api_key,
uuid,
main_parameter
))
t.start()
else:
time.sleep(1)
continue
def threading_3():
while True:
res_end_list = []
res_dict = redis_.hgetall(redis_res)
for key, values in res_dict.items():
values_dict = json.loads(values)
# "num_small_task": len(small_task_list) - 1,
# "tasking_num": 0,
if int(values_dict["num_small_task"]) == int(values_dict["tasking_num"]):
res_end_list.append(key)
for key in res_end_list:
redis_.hdel(redis_res, key)
res_str = res_dict[key].decode("utf-8")
json_str = json.dumps(res_str, indent=4, ensure_ascii=False)
key = str(key, encoding="utf-8")
uuid_path = os.path.join(project_data_txt_path, key)
os.makedirs(uuid_path)
paper_content_path = os.path.join(uuid_path, "paper_content.json")
with open(paper_content_path, 'w') as json_file:
json_file.write(json_str)
"""
调用jar包
占位
"""
url_path_paper = "http://104.244.90.248:14000/download?filename_path={}/paper.docx".format(key)
url_path_kaiti = "http://104.244.90.248:14000/download?filename_path={}/paper_start.docx".format(key)
return_text = str({"id": key,
"content_url_path": url_path_paper,
"content_report_url_path": url_path_kaiti,
"probabilities": None,
"status_code": 200})
redis_.srem(redis_title_ing, key)
redis_.set(key, return_text, 28800)
time.sleep(1)
# def main(title):
# # print(request.remote_addr)
# # title = request.json["title"]
#
# id_ = str(uuid.uuid1())
# print(id_)
# redis_.rpush(redis_title, json.dumps({"id": id_, "title": title})) # 加入redis
@app.route("/chat", methods=["POST"])
def chat():
print(request.remote_addr)
title = request.json["title"]
id_ = str(uuid.uuid1())
print(id_)
redis_.rpush(redis_title, json.dumps({"id":id_, "title": title})) # 加入redis
return_text = {"texts": {'id': id_,}, "probabilities": None, "status_code": 200}
print("ok")
redis_.sadd(redis_title_ing, id_)
return jsonify(return_text) # 返回结果
@app.route("/download", methods=['GET'])
def download_file():
# 需要知道2个参数, 第1个参数是本地目录的path, 第2个参数是文件名(带扩展名)
# directory = os.path.join(project_data_txt_path, filename) # 假设在当前目录
# uuid_path, word_name = str(filename).split("/")
# word_path_root = os.path.join(project_data_txt_path, uuid_path)
# response = make_response(send_from_directory(word_path_root, word_name, as_attachment=True))
# response.headers["Content-Disposition"] = "attachment; filename={}".format(filename.encode().decode('latin-1'))
filename_path = request.args.get('filename_path', '')
filename = filename_path.split("/")[1]
path_name = os.path.join(project_data_txt_path, filename_path)
with open(path_name, 'rb') as f:
stream = f.read()
response = Response(stream, content_type='application/octet-stream')
response.headers['Content-disposition'] = 'attachment; filename={}'.format(filename)
return response
@app.route("/search", methods=["POST"])
def search():
id_ = request.json['id'] # 获取用户query中的文本 例如"I love you"
result = redis_.get(id_) # 获取该query的模型结果
if result is not None:
# redis_.delete(id_)
# result_dict = result.decode('UTF-8')
result_dict = eval(result)
# return_text = {"id":query_id, "load_result_path": load_result_path, "probabilities": None, "status_code": 200}
query_id = result_dict["id"]
# "content_url_path": url_path_paper,
# "content_report_url_path": url_path_kaiti,
content_url_path = result_dict["content_url_path"]
content_report_url_path = result_dict["content_report_url_path"]
probabilities = result_dict["probabilities"]
result_text = {'code': 200,
'content_url_path': content_url_path,
'content_report_url_path': content_report_url_path,
'probabilities': probabilities}
else:
querying_list = list(redis_.smembers(redis_title_ing))
querying_set = set()
for i in querying_list:
querying_set.add(i.decode())
querying_bool = False
if id_ in querying_set:
querying_bool = True
query_list_json = redis_.lrange(redis_title, 0, -1)
query_set_ids = set()
for i in query_list_json:
data_dict = json.loads(i)
query_id = data_dict['id']
query_set_ids.add(query_id)
query_bool = False
if id_ in query_set_ids:
query_bool = True
if querying_bool == True and query_bool == True:
result_text = {'code': "201", 'text': "", 'probabilities': None}
elif querying_bool == True and query_bool == False:
result_text = {'code': "202", 'text': "", 'probabilities': None}
else:
result_text = {'code': "203", 'text': "", 'probabilities': None}
return jsonify(result_text) # 返回结果
# threading_1 # 根据标题获取子任务,存入子任务序列
# threading_2 # 根据子任务生成结果,存入结果序列
# threading_3 # 根据存储的结果序列,看是否完成,如果完成输出json文件以及word
t = Thread(target=threading_1)
t.start()
t = Thread(target=threading_2)
t.start()
t = Thread(target=threading_3)
t.start()
if __name__ == '__main__':
# main("大型商业建筑人员疏散设计研究")
app.run(host="0.0.0.0", port=14002, threaded=True, debug=False)