|
|
@ -14,6 +14,11 @@ import base64 |
|
|
|
import re |
|
|
|
import urllib.parse as pa |
|
|
|
import socket |
|
|
|
from serve_config_1 import Config |
|
|
|
import requests |
|
|
|
|
|
|
|
|
|
|
|
config = Config() |
|
|
|
|
|
|
|
app = Flask(__name__) |
|
|
|
app.config["JSON_AS_ASCII"] = False |
|
|
@ -23,71 +28,18 @@ s.connect(("8.8.8.8", 80)) |
|
|
|
localhostip = s.getsockname()[0] |
|
|
|
|
|
|
|
lock = threading.RLock() |
|
|
|
pool = redis.ConnectionPool(host='localhost', port=63179, max_connections=50, db=2, password='Zhicheng123*') |
|
|
|
pool = redis.ConnectionPool(host=config.reids_ip, port=config.reids_port, max_connections=50, db=config.reids_db, password=config.reids_password) |
|
|
|
redis_ = redis.Redis(connection_pool=pool, decode_responses=True) |
|
|
|
|
|
|
|
pantten_second_biaoti = '[2二ⅡⅠ][、.]\s{0,}?[\u4e00-\u9fa5]+' |
|
|
|
pantten_other_biaoti = '[2-9二三四五六七八九ⅡⅢⅣⅤⅥⅦⅧⅨ][、.]\s{0,}?[\u4e00-\u9fa5]+' |
|
|
|
|
|
|
|
mulu_prompt = "请帮我根据题目为“{}”生成一个论文目录" |
|
|
|
first_title_prompt = "论文题目是“{}”,目录是“{}”,请把其中的大标题“{}”的内容续写完整,保证续写内容不少于800字" |
|
|
|
small_title_prompt = "论文题目是“{}”,目录是“{}”,请把其中的小标题“{}”的内容续写完整,保证续写内容不少于800字" |
|
|
|
references_prompt = "论文题目是“{}”,目录是“{}”,请为这篇论文生成15篇左右的参考文献,要求其中有有中文参考文献不低于12篇,英文参考文献不低于2篇" |
|
|
|
thank_prompt = "请以“{}”为题写一篇论文的致谢" |
|
|
|
kaitibaogao_prompt = "请以《{}》为题目生成研究的主要的内容、背景、目的、意义,要求不少于100字" |
|
|
|
chinese_abstract_prompt = "请以《{}》为题目生成论文摘要,要求不少于1500字" |
|
|
|
english_abstract_prompt = "请把“{}”这段文字翻译成英文" |
|
|
|
chinese_keyword_prompt = "请为“{}”这段论文摘要生成3-5个关键字" |
|
|
|
english_keyword_prompt = "请把“{}”这几个关键字翻译成英文" |
|
|
|
thanks = "致谢" |
|
|
|
references = "参考文献" |
|
|
|
dabiaoti = ["二", "三", "四", "五", "六", "七", "八", "九"] |
|
|
|
project_data_txt_path = "/home/majiahui/ChatGPT_Sever/new_data_txt" |
|
|
|
|
|
|
|
""" |
|
|
|
key_list = [ |
|
|
|
{"ip": key-api}, |
|
|
|
{"ip": key-api}, |
|
|
|
{"ip": key-api}, |
|
|
|
] |
|
|
|
redis_title = [] |
|
|
|
redis_title_ing = [] |
|
|
|
redis_small_task = [ |
|
|
|
{ |
|
|
|
uuid, |
|
|
|
api_key, |
|
|
|
mulu_title_id, |
|
|
|
title, |
|
|
|
mulu, |
|
|
|
subtitle, |
|
|
|
prompt |
|
|
|
} |
|
|
|
] |
|
|
|
redis_res = [ |
|
|
|
{ |
|
|
|
"uuid": |
|
|
|
"完成进度": |
|
|
|
"标题": |
|
|
|
"中文摘要":"", |
|
|
|
"英文摘要" |
|
|
|
"中文关键字" |
|
|
|
"英文关键字" |
|
|
|
"正文" : [""] * len(content) |
|
|
|
} |
|
|
|
] - |
|
|
|
|
|
|
|
> list() |
|
|
|
""" |
|
|
|
|
|
|
|
openaikey_list = ["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", |
|
|
|
"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", |
|
|
|
"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", |
|
|
|
"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", |
|
|
|
"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", |
|
|
|
"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", |
|
|
|
"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", |
|
|
|
"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", |
|
|
|
"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"] |
|
|
|
|
|
|
|
flask_serves_env = "http://{}:{}".format(localhostip,config.flask_port) |
|
|
|
|
|
|
|
paper_download_url = flask_serves_env + "/download?filename_path={}/paper.docx" |
|
|
|
paper_start_download_url = flask_serves_env + "/download?filename_path={}/paper_start.docx" |
|
|
|
|
|
|
|
redis_key_name_openaikey_bad_dict = "openaikey_bad_list_{}".format(str(localhostip)) |
|
|
|
|
|
|
|
redis_key_name_openaikey_list = "openaikey_list_{}".format(str(localhostip)) |
|
|
|
|
|
|
@ -99,82 +51,102 @@ redis_small_task = "redis_small_task" |
|
|
|
|
|
|
|
redis_res = "redis_res" |
|
|
|
|
|
|
|
for i in openaikey_list: |
|
|
|
for i in config.openaikey_list: |
|
|
|
redis_.rpush(redis_key_name_openaikey_list, i) |
|
|
|
|
|
|
|
redis_.hset(redis_key_name_openaikey_bad_dict, "1", "1") |
|
|
|
redis_.persist(redis_key_name_openaikey_list) |
|
|
|
redis_.persist(redis_key_name_openaikey_bad_dict) |
|
|
|
|
|
|
|
def chat_kaitibaogao(api_key, uuid, main_parameter): |
|
|
|
# t = Thread(target=chat_kaitibaogao, args=(api_key, |
|
|
|
# uuid, |
|
|
|
# main_parameter |
|
|
|
# time.sleep(1) |
|
|
|
openai.api_key = api_key |
|
|
|
res = openai.ChatCompletion.create( |
|
|
|
model="gpt-3.5-turbo", |
|
|
|
messages=[ |
|
|
|
{"role": "user", "content": kaitibaogao_prompt.format(main_parameter[0])}, |
|
|
|
|
|
|
|
def request_api_chatgpt(api_key, prompt): |
|
|
|
OPENAI_API_KEY = api_key |
|
|
|
url = "https://api.openai.com/v1/chat/completions" |
|
|
|
headers = { |
|
|
|
"Content-Type": "application/json", |
|
|
|
"Authorization": f"Bearer {OPENAI_API_KEY}" |
|
|
|
} |
|
|
|
data = { |
|
|
|
"model": "gpt-3.5-turbo", |
|
|
|
"messages": [ |
|
|
|
{"role": "user", "content": prompt}, |
|
|
|
], |
|
|
|
temperature=0.5 |
|
|
|
) |
|
|
|
kaitibaogao = res.choices[0].message.content |
|
|
|
# kaitibaogao_path = os.path.join(, "kaitibaogao.txt") |
|
|
|
# with open(kaitibaogao_path, 'w', encoding='utf8') as f_kaitibaogao: |
|
|
|
# f_kaitibaogao.write(kaitibaogao) |
|
|
|
|
|
|
|
redis_.rpush(redis_key_name_openaikey_list, api_key) |
|
|
|
"temperature": 0.5 |
|
|
|
} |
|
|
|
response = requests.post(url, |
|
|
|
headers=headers, |
|
|
|
data=json.dumps(data), |
|
|
|
timeout=600) |
|
|
|
|
|
|
|
return response |
|
|
|
|
|
|
|
def chat_kaitibaogao(api_key, uuid, main_parameter,task_type): |
|
|
|
|
|
|
|
try: |
|
|
|
response =request_api_chatgpt(api_key, config.kaitibaogao_prompt.format(main_parameter[0])) |
|
|
|
res = response.json() |
|
|
|
kaitibaogao = res["choices"][0]["message"]["content"] |
|
|
|
redis_.rpush(redis_key_name_openaikey_list, api_key) |
|
|
|
redis_.persist(redis_key_name_openaikey_list) |
|
|
|
|
|
|
|
except: |
|
|
|
""" |
|
|
|
发送警报 |
|
|
|
""" |
|
|
|
kaitibaogao = "" |
|
|
|
redis_.hset(redis_key_name_openaikey_bad_dict, uuid, str((api_key,task_type))) |
|
|
|
|
|
|
|
|
|
|
|
lock.acquire() |
|
|
|
res_dict_str = redis_.hget(redis_res, uuid) |
|
|
|
res_dict = json.loads(res_dict_str) |
|
|
|
res_dict["tasking_num"] += 1 |
|
|
|
print("子任务进度".format(uuid),res_dict["tasking_num"]) |
|
|
|
res_dict["开题报告"] = kaitibaogao |
|
|
|
res_dict_str = json.dumps(res_dict, ensure_ascii=False) |
|
|
|
redis_.hset(redis_res, uuid, res_dict_str) |
|
|
|
lock.release() |
|
|
|
|
|
|
|
|
|
|
|
def chat_abstract_keyword(api_key, uuid, main_parameter): |
|
|
|
# api_key, |
|
|
|
# uuid, |
|
|
|
# main_parameter |
|
|
|
# time.sleep(7) |
|
|
|
openai.api_key = api_key |
|
|
|
# 生成中文摘要 |
|
|
|
res = openai.ChatCompletion.create( |
|
|
|
model="gpt-3.5-turbo", |
|
|
|
messages=[ |
|
|
|
{"role": "user", "content": chinese_abstract_prompt.format(main_parameter[0])}, |
|
|
|
], |
|
|
|
temperature=0.5 |
|
|
|
) |
|
|
|
chinese_abstract = res.choices[0].message.content |
|
|
|
# 生成英文的摘要 |
|
|
|
res = openai.ChatCompletion.create( |
|
|
|
model="gpt-3.5-turbo", |
|
|
|
messages=[ |
|
|
|
{"role": "user", "content": english_abstract_prompt.format(chinese_abstract)}, |
|
|
|
], |
|
|
|
temperature=0.5 |
|
|
|
) |
|
|
|
english_abstract = res.choices[0].message.content |
|
|
|
# 生成中文关键字 |
|
|
|
res = openai.ChatCompletion.create( |
|
|
|
model="gpt-3.5-turbo", |
|
|
|
messages=[ |
|
|
|
{"role": "user", "content": chinese_keyword_prompt.format(chinese_abstract)}, |
|
|
|
], |
|
|
|
temperature=0.5 |
|
|
|
) |
|
|
|
chinese_keyword = res.choices[0].message.content |
|
|
|
# 生成英文关键字 |
|
|
|
res = openai.ChatCompletion.create( |
|
|
|
model="gpt-3.5-turbo", |
|
|
|
messages=[ |
|
|
|
{"role": "user", "content": english_keyword_prompt.format(chinese_keyword)}, |
|
|
|
], |
|
|
|
temperature=0.5 |
|
|
|
) |
|
|
|
def chat_abstract_keyword(api_key, uuid, main_parameter, task_type): |
|
|
|
|
|
|
|
try: |
|
|
|
# 生成中文摘要 |
|
|
|
|
|
|
|
response =request_api_chatgpt(api_key, config.chinese_abstract_prompt.format(main_parameter[0])) |
|
|
|
res = response.json() |
|
|
|
chinese_abstract = res["choices"][0]["message"]["content"] |
|
|
|
|
|
|
|
# 生成英文的摘要 |
|
|
|
|
|
|
|
response = request_api_chatgpt(api_key, config.english_abstract_prompt.format(chinese_abstract)) |
|
|
|
res = response.json() |
|
|
|
english_abstract = res["choices"][0]["message"]["content"] |
|
|
|
|
|
|
|
english_keyword = res.choices[0].message.content |
|
|
|
|
|
|
|
# 生成中文关键字 |
|
|
|
|
|
|
|
response = request_api_chatgpt(api_key, config.chinese_keyword_prompt.format(chinese_abstract)) |
|
|
|
res = response.json() |
|
|
|
chinese_keyword = res["choices"][0]["message"]["content"] |
|
|
|
|
|
|
|
|
|
|
|
# 生成英文关键字 |
|
|
|
response = request_api_chatgpt(api_key, config.english_keyword_prompt.format(chinese_keyword)) |
|
|
|
res = response.json() |
|
|
|
english_keyword = res["choices"][0]["message"]["content"] |
|
|
|
|
|
|
|
redis_.rpush(redis_key_name_openaikey_list, api_key) |
|
|
|
redis_.persist(redis_key_name_openaikey_list) |
|
|
|
except: |
|
|
|
""" |
|
|
|
发送警报 |
|
|
|
""" |
|
|
|
chinese_abstract = "" |
|
|
|
english_abstract = "" |
|
|
|
chinese_keyword = "" |
|
|
|
english_keyword = "" |
|
|
|
redis_.hset(redis_key_name_openaikey_bad_dict, uuid, str((api_key,task_type))) |
|
|
|
|
|
|
|
paper_abstract_keyword = { |
|
|
|
"中文摘要": chinese_abstract, |
|
|
@ -191,11 +163,12 @@ def chat_abstract_keyword(api_key, uuid, main_parameter): |
|
|
|
# lock.acquire() |
|
|
|
# api_key_list.append(api_key) |
|
|
|
# lock.release() |
|
|
|
redis_.rpush(redis_key_name_openaikey_list, api_key) |
|
|
|
|
|
|
|
lock.acquire() |
|
|
|
res_dict_str = redis_.hget(redis_res, uuid) |
|
|
|
res_dict = json.loads(res_dict_str) |
|
|
|
res_dict["tasking_num"] += 1 |
|
|
|
print("子任务进度".format(uuid),res_dict["tasking_num"]) |
|
|
|
res_dict["中文摘要"] = paper_abstract_keyword["中文摘要"] |
|
|
|
res_dict["英文摘要"] = paper_abstract_keyword["英文摘要"] |
|
|
|
res_dict["中文关键词"] = paper_abstract_keyword["中文关键词"] |
|
|
@ -205,7 +178,7 @@ def chat_abstract_keyword(api_key, uuid, main_parameter): |
|
|
|
lock.release() |
|
|
|
|
|
|
|
|
|
|
|
def chat_content(api_key, uuid, main_parameter): |
|
|
|
def chat_content(api_key, uuid, main_parameter, task_type): |
|
|
|
''' |
|
|
|
|
|
|
|
:param api_key: |
|
|
@ -222,20 +195,26 @@ def chat_content(api_key, uuid, main_parameter): |
|
|
|
if subtitle[:2] == "@@": |
|
|
|
res_content = subtitle[2:] |
|
|
|
else: |
|
|
|
openai.api_key = api_key |
|
|
|
res = openai.ChatCompletion.create( |
|
|
|
model="gpt-3.5-turbo", |
|
|
|
messages=[ |
|
|
|
{"role": "user", "content": prompt.format(title, mulu, subtitle)}, |
|
|
|
], |
|
|
|
temperature=0.5 |
|
|
|
) |
|
|
|
res_content = res.choices[0].message.content |
|
|
|
redis_.rpush(redis_key_name_openaikey_list, api_key) |
|
|
|
try: |
|
|
|
|
|
|
|
response = request_api_chatgpt(api_key, prompt.format(title, mulu, subtitle)) |
|
|
|
res = response.json() |
|
|
|
res_content = res["choices"][0]["message"]["content"] |
|
|
|
|
|
|
|
redis_.rpush(redis_key_name_openaikey_list, api_key) |
|
|
|
redis_.persist(redis_key_name_openaikey_list) |
|
|
|
except: |
|
|
|
""" |
|
|
|
发送警报 |
|
|
|
""" |
|
|
|
res_content = "" |
|
|
|
redis_.hset(redis_key_name_openaikey_bad_dict, uuid, str((api_key,task_type))) |
|
|
|
|
|
|
|
lock.acquire() |
|
|
|
res_dict_str = redis_.hget(redis_res, uuid) |
|
|
|
res_dict = json.loads(res_dict_str) |
|
|
|
res_dict["tasking_num"] += 1 |
|
|
|
print("子任务进度".format(uuid), res_dict["tasking_num"]) |
|
|
|
table_of_contents = res_dict["table_of_contents"] |
|
|
|
table_of_contents[content_index] = res_content |
|
|
|
res_dict["table_of_contents"] = table_of_contents |
|
|
@ -244,7 +223,7 @@ def chat_content(api_key, uuid, main_parameter): |
|
|
|
lock.release() |
|
|
|
|
|
|
|
|
|
|
|
def chat_thanks(api_key, uuid, main_parameter): |
|
|
|
def chat_thanks(api_key, uuid, main_parameter, task_type): |
|
|
|
''' |
|
|
|
|
|
|
|
:param api_key: |
|
|
@ -257,31 +236,32 @@ def chat_thanks(api_key, uuid, main_parameter): |
|
|
|
title = main_parameter[0] |
|
|
|
prompt = main_parameter[1] |
|
|
|
|
|
|
|
openai.api_key = api_key |
|
|
|
res = openai.ChatCompletion.create( |
|
|
|
model="gpt-3.5-turbo", |
|
|
|
messages=[ |
|
|
|
{"role": "user", "content": prompt.format(title)}, |
|
|
|
], |
|
|
|
temperature=0.5 |
|
|
|
) |
|
|
|
res_content = res.choices[0].message.content |
|
|
|
redis_.rpush(redis_key_name_openaikey_list, api_key) |
|
|
|
|
|
|
|
# "致谢": "", |
|
|
|
# "参考文献": "", |
|
|
|
# 加锁 读取redis生成致谢并存储 |
|
|
|
try: |
|
|
|
response = request_api_chatgpt(api_key, prompt.format(title)) |
|
|
|
res = response.json() |
|
|
|
res_content = res["choices"][0]["message"]["content"] |
|
|
|
|
|
|
|
redis_.rpush(redis_key_name_openaikey_list, api_key) |
|
|
|
redis_.persist(redis_key_name_openaikey_list) |
|
|
|
except: |
|
|
|
""" |
|
|
|
发送警报 |
|
|
|
""" |
|
|
|
res_content = "" |
|
|
|
redis_.hset(redis_key_name_openaikey_bad_dict, uuid, str((api_key,task_type))) |
|
|
|
|
|
|
|
lock.acquire() |
|
|
|
res_dict_str = redis_.hget(redis_res, uuid) |
|
|
|
res_dict = json.loads(res_dict_str) |
|
|
|
res_dict["tasking_num"] += 1 |
|
|
|
print("子任务进度".format(uuid), res_dict["tasking_num"]) |
|
|
|
res_dict["致谢"] = res_content |
|
|
|
res_dict_str = json.dumps(res_dict, ensure_ascii=False) |
|
|
|
redis_.hset(redis_res, uuid, res_dict_str) |
|
|
|
lock.release() |
|
|
|
|
|
|
|
|
|
|
|
def chat_references(api_key, uuid, main_parameter): |
|
|
|
def chat_references(api_key, uuid, main_parameter, task_type): |
|
|
|
''' |
|
|
|
|
|
|
|
:param api_key: |
|
|
@ -295,195 +275,205 @@ def chat_references(api_key, uuid, main_parameter): |
|
|
|
title = main_parameter[0] |
|
|
|
mulu = main_parameter[1] |
|
|
|
prompt = main_parameter[2] |
|
|
|
try: |
|
|
|
|
|
|
|
response = request_api_chatgpt(api_key, prompt.format(title, mulu)) |
|
|
|
res = response.json() |
|
|
|
res_content = res["choices"][0]["message"]["content"] |
|
|
|
|
|
|
|
openai.api_key = api_key |
|
|
|
res = openai.ChatCompletion.create( |
|
|
|
model="gpt-3.5-turbo", |
|
|
|
messages=[ |
|
|
|
{"role": "user", "content": prompt.format(title, mulu)}, |
|
|
|
], |
|
|
|
temperature=0.5 |
|
|
|
) |
|
|
|
res_content = res.choices[0].message.content |
|
|
|
redis_.rpush(redis_key_name_openaikey_list, api_key) |
|
|
|
redis_.rpush(redis_key_name_openaikey_list, api_key) |
|
|
|
redis_.persist(redis_key_name_openaikey_list) |
|
|
|
except: |
|
|
|
""" |
|
|
|
发送警报 |
|
|
|
""" |
|
|
|
res_content = "" |
|
|
|
redis_.hset(redis_key_name_openaikey_bad_dict, uuid, str((api_key,task_type))) |
|
|
|
|
|
|
|
# 加锁 读取resis并存储结果 |
|
|
|
lock.acquire() |
|
|
|
res_dict_str = redis_.hget(redis_res, uuid) |
|
|
|
res_dict = json.loads(res_dict_str) |
|
|
|
res_dict["tasking_num"] += 1 |
|
|
|
print("子任务进度".format(uuid), res_dict["tasking_num"]) |
|
|
|
res_dict["参考文献"] = res_content |
|
|
|
res_dict_str = json.dumps(res_dict, ensure_ascii=False) |
|
|
|
redis_.hset(redis_res, uuid, res_dict_str) |
|
|
|
lock.release() |
|
|
|
|
|
|
|
|
|
|
|
def threading_mulu(key_api, title, uuid): |
|
|
|
def threading_mulu(api_key, title, uuid): |
|
|
|
''' |
|
|
|
生成目录并吧任务拆解进入子任务的redis_list中和储存结果的redis_list中 |
|
|
|
:return: |
|
|
|
''' |
|
|
|
|
|
|
|
openai.api_key = key_api |
|
|
|
res = openai.ChatCompletion.create( |
|
|
|
model="gpt-3.5-turbo", |
|
|
|
messages=[ |
|
|
|
{"role": "user", "content": mulu_prompt.format(title)}, |
|
|
|
], |
|
|
|
temperature=0.5 |
|
|
|
) |
|
|
|
|
|
|
|
redis_.rpush(redis_key_name_openaikey_list, key_api) |
|
|
|
mulu = res.choices[0].message.content |
|
|
|
mulu_list = str(mulu).split("\n") |
|
|
|
mulu_list = [i.strip() for i in mulu_list if i != ""] |
|
|
|
|
|
|
|
print(mulu_list) |
|
|
|
|
|
|
|
cun_bool = False |
|
|
|
table_of_contents = [mulu_list[0]] |
|
|
|
|
|
|
|
for i in mulu_list[1:]: |
|
|
|
result_second_biaoti_list = re.findall(pantten_second_biaoti, i) |
|
|
|
result_other_biaoti_list = re.findall(pantten_other_biaoti, i) |
|
|
|
if result_second_biaoti_list != []: |
|
|
|
table_of_contents.append("@@" + i) |
|
|
|
cun_bool = True |
|
|
|
continue |
|
|
|
if cun_bool == False: |
|
|
|
continue |
|
|
|
else: |
|
|
|
if result_other_biaoti_list != []: |
|
|
|
try: |
|
|
|
response = request_api_chatgpt(api_key, config.mulu_prompt.format(title)) |
|
|
|
res = response.json() |
|
|
|
mulu = res["choices"][0]["message"]["content"] |
|
|
|
|
|
|
|
redis_.rpush(redis_key_name_openaikey_list, api_key) |
|
|
|
redis_.persist(redis_key_name_openaikey_list) |
|
|
|
except: |
|
|
|
""" |
|
|
|
发送警报 |
|
|
|
""" |
|
|
|
res_content = "" |
|
|
|
redis_.hset(redis_key_name_openaikey_bad_dict, uuid, str(api_key,"mulu")) |
|
|
|
mulu = "" |
|
|
|
|
|
|
|
try: |
|
|
|
mulu_list = str(mulu).split("\n") |
|
|
|
mulu_list = [i.strip() for i in mulu_list if i != ""] |
|
|
|
|
|
|
|
print(mulu_list) |
|
|
|
|
|
|
|
cun_bool = False |
|
|
|
table_of_contents = [mulu_list[0]] |
|
|
|
|
|
|
|
for i in mulu_list[1:]: |
|
|
|
result_second_biaoti_list = re.findall(config.pantten_second_biaoti, i) |
|
|
|
result_other_biaoti_list = re.findall(config.pantten_other_biaoti, i) |
|
|
|
if result_second_biaoti_list != []: |
|
|
|
table_of_contents.append("@@" + i) |
|
|
|
cun_bool = True |
|
|
|
continue |
|
|
|
if cun_bool == False: |
|
|
|
continue |
|
|
|
else: |
|
|
|
table_of_contents.append(i) |
|
|
|
|
|
|
|
print(table_of_contents) |
|
|
|
# table_of_contents = table_of_contents[:3] + table_of_contents[-1:] |
|
|
|
# print(table_of_contents) |
|
|
|
|
|
|
|
thanks_references_bool_table = table_of_contents[-3:] |
|
|
|
|
|
|
|
# thanks = "致谢" |
|
|
|
# references = "参考文献" |
|
|
|
if references in thanks_references_bool_table: |
|
|
|
table_of_contents.remove(references) |
|
|
|
|
|
|
|
if thanks in thanks_references_bool_table: |
|
|
|
table_of_contents.remove(thanks) |
|
|
|
|
|
|
|
# table_of_contents.append(thanks) |
|
|
|
# table_of_contents.append(references) |
|
|
|
|
|
|
|
# if thanks not in thanks_bool_table: |
|
|
|
# table_of_contents.insert(-1, "致谢") |
|
|
|
# |
|
|
|
# if thanks not in thanks_bool_table: |
|
|
|
# table_of_contents.insert(-1, "致谢") |
|
|
|
|
|
|
|
print(len(table_of_contents)) |
|
|
|
|
|
|
|
small_task_list = [] |
|
|
|
# api_key, |
|
|
|
# index, |
|
|
|
# title, |
|
|
|
# mulu, |
|
|
|
# subtitle, |
|
|
|
# prompt |
|
|
|
kaitibaogao_task = { |
|
|
|
"task_type": "kaitibaogao", |
|
|
|
"uuid": uuid, |
|
|
|
"main_parameter": [title] |
|
|
|
} |
|
|
|
if result_other_biaoti_list != []: |
|
|
|
table_of_contents.append("@@" + i) |
|
|
|
else: |
|
|
|
table_of_contents.append(i) |
|
|
|
|
|
|
|
print(table_of_contents) |
|
|
|
# table_of_contents = table_of_contents[:3] + table_of_contents[-1:] |
|
|
|
# print(table_of_contents) |
|
|
|
|
|
|
|
thanks_references_bool_table = table_of_contents[-3:] |
|
|
|
|
|
|
|
# thanks = "致谢" |
|
|
|
# references = "参考文献" |
|
|
|
if references in thanks_references_bool_table: |
|
|
|
table_of_contents.remove(references) |
|
|
|
|
|
|
|
if thanks in thanks_references_bool_table: |
|
|
|
table_of_contents.remove(thanks) |
|
|
|
|
|
|
|
# table_of_contents.append(thanks) |
|
|
|
# table_of_contents.append(references) |
|
|
|
|
|
|
|
# if thanks not in thanks_bool_table: |
|
|
|
# table_of_contents.insert(-1, "致谢") |
|
|
|
# |
|
|
|
# if thanks not in thanks_bool_table: |
|
|
|
# table_of_contents.insert(-1, "致谢") |
|
|
|
|
|
|
|
print(len(table_of_contents)) |
|
|
|
|
|
|
|
small_task_list = [] |
|
|
|
# api_key, |
|
|
|
# index, |
|
|
|
# title, |
|
|
|
# mulu, |
|
|
|
# subtitle, |
|
|
|
# prompt |
|
|
|
kaitibaogao_task = { |
|
|
|
"task_type": "kaitibaogao", |
|
|
|
"uuid": uuid, |
|
|
|
"main_parameter": [title] |
|
|
|
} |
|
|
|
|
|
|
|
chat_abstract_task = { |
|
|
|
"task_type": "chat_abstract", |
|
|
|
"uuid": uuid, |
|
|
|
"main_parameter": [title] |
|
|
|
} |
|
|
|
small_task_list.append(kaitibaogao_task) |
|
|
|
small_task_list.append(chat_abstract_task) |
|
|
|
content_index = 0 |
|
|
|
while True: |
|
|
|
if content_index == len(table_of_contents): |
|
|
|
break |
|
|
|
subtitle = table_of_contents[content_index] |
|
|
|
if content_index == 0: |
|
|
|
prompt = first_title_prompt |
|
|
|
elif subtitle == "参考文献": |
|
|
|
prompt = references_prompt |
|
|
|
elif subtitle == "致谢": |
|
|
|
prompt = thank_prompt |
|
|
|
else: |
|
|
|
prompt = small_title_prompt |
|
|
|
print("请求的所有参数", |
|
|
|
content_index, |
|
|
|
title, |
|
|
|
subtitle, |
|
|
|
prompt) |
|
|
|
|
|
|
|
paper_content = { |
|
|
|
"task_type": "paper_content", |
|
|
|
chat_abstract_task = { |
|
|
|
"task_type": "chat_abstract", |
|
|
|
"uuid": uuid, |
|
|
|
"main_parameter": [title] |
|
|
|
} |
|
|
|
small_task_list.append(kaitibaogao_task) |
|
|
|
small_task_list.append(chat_abstract_task) |
|
|
|
content_index = 0 |
|
|
|
while True: |
|
|
|
if content_index == len(table_of_contents): |
|
|
|
break |
|
|
|
subtitle = table_of_contents[content_index] |
|
|
|
if content_index == 0: |
|
|
|
prompt = config.first_title_prompt |
|
|
|
elif subtitle == "参考文献": |
|
|
|
prompt = config.references_prompt |
|
|
|
elif subtitle == "致谢": |
|
|
|
prompt = config.thank_prompt |
|
|
|
else: |
|
|
|
prompt = config.small_title_prompt |
|
|
|
print("请求的所有参数", |
|
|
|
content_index, |
|
|
|
title, |
|
|
|
subtitle, |
|
|
|
prompt) |
|
|
|
|
|
|
|
paper_content = { |
|
|
|
"task_type": "paper_content", |
|
|
|
"uuid": uuid, |
|
|
|
"main_parameter": [ |
|
|
|
content_index, |
|
|
|
title, |
|
|
|
mulu, |
|
|
|
subtitle, |
|
|
|
prompt |
|
|
|
] |
|
|
|
} |
|
|
|
|
|
|
|
small_task_list.append(paper_content) |
|
|
|
content_index += 1 |
|
|
|
|
|
|
|
thanks_task = { |
|
|
|
"task_type": "thanks_task", |
|
|
|
"uuid": uuid, |
|
|
|
"main_parameter": [ |
|
|
|
content_index, |
|
|
|
title, |
|
|
|
mulu, |
|
|
|
subtitle, |
|
|
|
prompt |
|
|
|
config.thank_prompt |
|
|
|
] |
|
|
|
} |
|
|
|
|
|
|
|
small_task_list.append(paper_content) |
|
|
|
content_index += 1 |
|
|
|
|
|
|
|
thanks_task = { |
|
|
|
"task_type": "thanks_task", |
|
|
|
"uuid": uuid, |
|
|
|
"main_parameter": [ |
|
|
|
title, |
|
|
|
thank_prompt |
|
|
|
] |
|
|
|
} |
|
|
|
|
|
|
|
references_task = { |
|
|
|
"task_type": "references_task", |
|
|
|
"uuid": uuid, |
|
|
|
"main_parameter": [ |
|
|
|
references_task = { |
|
|
|
"task_type": "references_task", |
|
|
|
"uuid": uuid, |
|
|
|
"main_parameter": [ |
|
|
|
title, |
|
|
|
mulu, |
|
|
|
references_prompt |
|
|
|
config.references_prompt |
|
|
|
] |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
small_task_list.append(thanks_task) |
|
|
|
small_task_list.append(references_task) |
|
|
|
|
|
|
|
for small_task in small_task_list: |
|
|
|
small_task = json.dumps(small_task, ensure_ascii=False) |
|
|
|
redis_.rpush(redis_small_task, small_task) |
|
|
|
|
|
|
|
res = { |
|
|
|
"uuid": uuid, |
|
|
|
"num_small_task": len(small_task_list), |
|
|
|
"tasking_num": 0, |
|
|
|
"标题": title, |
|
|
|
"目录": mulu, |
|
|
|
"开题报告": "", |
|
|
|
"任务书": "", |
|
|
|
"中文摘要": "", |
|
|
|
"英文摘要": "", |
|
|
|
"中文关键词": "", |
|
|
|
"英文关键词": "", |
|
|
|
"正文": "", |
|
|
|
"致谢": "", |
|
|
|
"参考文献": "", |
|
|
|
"table_of_contents": [""] * len(table_of_contents) |
|
|
|
} |
|
|
|
small_task_list.append(thanks_task) |
|
|
|
small_task_list.append(references_task) |
|
|
|
|
|
|
|
res = json.dumps(res, ensure_ascii=False) |
|
|
|
redis_.hset(redis_res, uuid, res) |
|
|
|
for small_task in small_task_list: |
|
|
|
small_task = json.dumps(small_task, ensure_ascii=False) |
|
|
|
redis_.rpush(redis_small_task, small_task) |
|
|
|
redis_.persist(redis_key_name_openaikey_list) |
|
|
|
|
|
|
|
res = { |
|
|
|
"uuid": uuid, |
|
|
|
"num_small_task": len(small_task_list), |
|
|
|
"tasking_num": 0, |
|
|
|
"标题": title, |
|
|
|
"目录": mulu, |
|
|
|
"开题报告": "", |
|
|
|
"任务书": "", |
|
|
|
"中文摘要": "", |
|
|
|
"英文摘要": "", |
|
|
|
"中文关键词": "", |
|
|
|
"英文关键词": "", |
|
|
|
"正文": "", |
|
|
|
"致谢": "", |
|
|
|
"参考文献": "", |
|
|
|
"table_of_contents": [""] * len(table_of_contents) |
|
|
|
} |
|
|
|
|
|
|
|
res = json.dumps(res, ensure_ascii=False) |
|
|
|
redis_.hset(redis_res, uuid, res) |
|
|
|
except: |
|
|
|
print("目录程序错误") |
|
|
|
|
|
|
|
|
|
|
|
def threading_1(): |
|
|
@ -542,32 +532,32 @@ def threading_2(): |
|
|
|
if task_type == "kaitibaogao": |
|
|
|
t = Thread(target=chat_kaitibaogao, args=(api_key, |
|
|
|
uuid, |
|
|
|
main_parameter |
|
|
|
)) |
|
|
|
main_parameter, |
|
|
|
task_type)) |
|
|
|
t.start() |
|
|
|
elif task_type == "chat_abstract": |
|
|
|
t = Thread(target=chat_abstract_keyword, args=(api_key, |
|
|
|
uuid, |
|
|
|
main_parameter |
|
|
|
)) |
|
|
|
main_parameter, |
|
|
|
task_type)) |
|
|
|
t.start() |
|
|
|
elif task_type == "paper_content": |
|
|
|
t = Thread(target=chat_content, args=(api_key, |
|
|
|
uuid, |
|
|
|
main_parameter |
|
|
|
)) |
|
|
|
main_parameter, |
|
|
|
task_type)) |
|
|
|
t.start() |
|
|
|
elif task_type == "thanks_task": |
|
|
|
t = Thread(target=chat_thanks, args=(api_key, |
|
|
|
uuid, |
|
|
|
main_parameter |
|
|
|
)) |
|
|
|
uuid, |
|
|
|
main_parameter, |
|
|
|
task_type)) |
|
|
|
t.start() |
|
|
|
elif task_type == "references_task": |
|
|
|
t = Thread(target=chat_references, args=(api_key, |
|
|
|
uuid, |
|
|
|
main_parameter |
|
|
|
)) |
|
|
|
uuid, |
|
|
|
main_parameter, |
|
|
|
task_type)) |
|
|
|
t.start() |
|
|
|
else: |
|
|
|
time.sleep(1) |
|
|
@ -584,36 +574,36 @@ def threading_3(): |
|
|
|
# "tasking_num": 0, |
|
|
|
if int(values_dict["num_small_task"]) == int(values_dict["tasking_num"]): |
|
|
|
res_end_list.append(key) |
|
|
|
for key in res_end_list: |
|
|
|
redis_.hdel(redis_res, key) |
|
|
|
|
|
|
|
res_str = res_dict[key].decode("utf-8") |
|
|
|
json_str = json.dumps(res_str, indent=4, ensure_ascii=False) |
|
|
|
|
|
|
|
key = str(key, encoding="utf-8") |
|
|
|
uuid_path = os.path.join(project_data_txt_path, key) |
|
|
|
|
|
|
|
os.makedirs(uuid_path) |
|
|
|
|
|
|
|
paper_content_path = os.path.join(uuid_path, "paper_content.json") |
|
|
|
with open(paper_content_path, 'w') as json_file: |
|
|
|
json_file.write(json_str) |
|
|
|
|
|
|
|
""" |
|
|
|
调用jar包 |
|
|
|
占位 |
|
|
|
|
|
|
|
|
|
|
|
""" |
|
|
|
url_path_paper = "http://104.244.90.248:14000/download?filename_path={}/paper.docx".format(key) |
|
|
|
url_path_kaiti = "http://104.244.90.248:14000/download?filename_path={}/paper_start.docx".format(key) |
|
|
|
return_text = str({"id": key, |
|
|
|
"content_url_path": url_path_paper, |
|
|
|
"content_report_url_path": url_path_kaiti, |
|
|
|
"probabilities": None, |
|
|
|
"status_code": 200}) |
|
|
|
redis_.srem(redis_title_ing, key) |
|
|
|
redis_.set(key, return_text, 28800) |
|
|
|
if res_end_list != []: |
|
|
|
for key in res_end_list: |
|
|
|
redis_.hdel(redis_res, key) |
|
|
|
|
|
|
|
res_str = res_dict[key].decode("utf-8") |
|
|
|
json_str = json.dumps(res_str, indent=4, ensure_ascii=False) |
|
|
|
|
|
|
|
key = str(key, encoding="utf-8") |
|
|
|
uuid_path = os.path.join(config.project_data_txt_path, key) |
|
|
|
|
|
|
|
os.makedirs(uuid_path) |
|
|
|
|
|
|
|
paper_content_path = os.path.join(uuid_path, "paper_content.json") |
|
|
|
with open(paper_content_path, 'w') as json_file: |
|
|
|
json_file.write(json_str) |
|
|
|
|
|
|
|
""" |
|
|
|
调用jar包 |
|
|
|
占位 |
|
|
|
|
|
|
|
""" |
|
|
|
url_path_paper = paper_download_url.format(key) |
|
|
|
url_path_kaiti = paper_start_download_url.format(key) |
|
|
|
return_text = str({"id": key, |
|
|
|
"content_url_path": url_path_paper, |
|
|
|
"content_report_url_path": url_path_kaiti, |
|
|
|
"probabilities": None, |
|
|
|
"status_code": 200}) |
|
|
|
redis_.srem(redis_title_ing, key) |
|
|
|
redis_.set(key, return_text, 28800) |
|
|
|
|
|
|
|
time.sleep(1) |
|
|
|
|
|
|
@ -627,15 +617,15 @@ def threading_3(): |
|
|
|
# redis_.rpush(redis_title, json.dumps({"id": id_, "title": title})) # 加入redis |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@app.route("/chat", methods=["POST"]) |
|
|
|
def chat(): |
|
|
|
print(request.remote_addr) |
|
|
|
title = request.json["title"] |
|
|
|
id_ = str(uuid.uuid1()) |
|
|
|
print(id_) |
|
|
|
redis_.rpush(redis_title, json.dumps({"id":id_, "title": title})) # 加入redis |
|
|
|
return_text = {"texts": {'id': id_,}, "probabilities": None, "status_code": 200} |
|
|
|
redis_.rpush(redis_title, json.dumps({"id": id_, "title": title},ensure_ascii=False)) # 加入redis |
|
|
|
redis_.persist(redis_key_name_openaikey_list) |
|
|
|
return_text = {"texts": {'id': id_, }, "probabilities": None, "status_code": 200} |
|
|
|
print("ok") |
|
|
|
redis_.sadd(redis_title_ing, id_) |
|
|
|
|
|
|
@ -653,12 +643,11 @@ def download_file(): |
|
|
|
# response.headers["Content-Disposition"] = "attachment; filename={}".format(filename.encode().decode('latin-1')) |
|
|
|
filename_path = request.args.get('filename_path', '') |
|
|
|
filename = filename_path.split("/")[1] |
|
|
|
path_name = os.path.join(project_data_txt_path, filename_path) |
|
|
|
path_name = os.path.join(config.project_data_txt_path, filename_path) |
|
|
|
with open(path_name, 'rb') as f: |
|
|
|
stream = f.read() |
|
|
|
response = Response(stream, content_type='application/octet-stream') |
|
|
|
response.headers['Content-disposition'] = 'attachment; filename={}'.format(filename) |
|
|
|
|
|
|
|
return response |
|
|
|
|
|
|
|
|
|
|
@ -669,19 +658,22 @@ def search(): |
|
|
|
if result is not None: |
|
|
|
# redis_.delete(id_) |
|
|
|
# result_dict = result.decode('UTF-8') |
|
|
|
if redis_.hexists(redis_key_name_openaikey_bad_dict, id_) == True: |
|
|
|
result_text = {'code': "204", 'text': "", 'probabilities': None} |
|
|
|
|
|
|
|
result_dict = eval(result) |
|
|
|
# return_text = {"id":query_id, "load_result_path": load_result_path, "probabilities": None, "status_code": 200} |
|
|
|
query_id = result_dict["id"] |
|
|
|
# "content_url_path": url_path_paper, |
|
|
|
# "content_report_url_path": url_path_kaiti, |
|
|
|
content_url_path = result_dict["content_url_path"] |
|
|
|
content_report_url_path = result_dict["content_report_url_path"] |
|
|
|
probabilities = result_dict["probabilities"] |
|
|
|
result_text = {'code': 200, |
|
|
|
'content_url_path': content_url_path, |
|
|
|
'content_report_url_path': content_report_url_path, |
|
|
|
'probabilities': probabilities} |
|
|
|
else: |
|
|
|
result_dict = eval(result) |
|
|
|
# return_text = {"id":query_id, "load_result_path": load_result_path, "probabilities": None, "status_code": 200} |
|
|
|
query_id = result_dict["id"] |
|
|
|
# "content_url_path": url_path_paper, |
|
|
|
# "content_report_url_path": url_path_kaiti, |
|
|
|
content_url_path = result_dict["content_url_path"] |
|
|
|
content_report_url_path = result_dict["content_report_url_path"] |
|
|
|
probabilities = result_dict["probabilities"] |
|
|
|
result_text = {'code': 200, |
|
|
|
'content_url_path': content_url_path, |
|
|
|
'content_report_url_path': content_report_url_path, |
|
|
|
'probabilities': probabilities} |
|
|
|
else: |
|
|
|
querying_list = list(redis_.smembers(redis_title_ing)) |
|
|
|
querying_set = set() |
|
|
@ -722,7 +714,6 @@ t.start() |
|
|
|
t = Thread(target=threading_3) |
|
|
|
t.start() |
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
|
|
# main("大型商业建筑人员疏散设计研究") |
|
|
|
app.run(host="0.0.0.0", port=14002, threaded=True, debug=False) |