diff --git a/.idea/ChatGPT_Sever.iml b/.idea/ChatGPT_Sever.iml
index c197a18..fe94f4e 100644
--- a/.idea/ChatGPT_Sever.iml
+++ b/.idea/ChatGPT_Sever.iml
@@ -5,7 +5,7 @@
-
+
diff --git a/.idea/deployment.xml b/.idea/deployment.xml
index 4e11f67..355b4da 100644
--- a/.idea/deployment.xml
+++ b/.idea/deployment.xml
@@ -1,15 +1,15 @@
-
+
-
+
-
+
diff --git a/.idea/misc.xml b/.idea/misc.xml
index d541bf3..944ded8 100644
--- a/.idea/misc.xml
+++ b/.idea/misc.xml
@@ -1,4 +1,4 @@
-
+
\ No newline at end of file
diff --git a/.idea/sshConfigs.xml b/.idea/sshConfigs.xml
index b5ed381..651c67d 100644
--- a/.idea/sshConfigs.xml
+++ b/.idea/sshConfigs.xml
@@ -2,7 +2,7 @@
-
+
\ No newline at end of file
diff --git a/.idea/webServers.xml b/.idea/webServers.xml
index 798e4cd..4fc2e5b 100644
--- a/.idea/webServers.xml
+++ b/.idea/webServers.xml
@@ -2,8 +2,8 @@
+
+
+
@@ -36,16 +61,16 @@
- {
- "keyToString": {
- "RunOnceActivity.OpenProjectViewOnStart": "true",
- "RunOnceActivity.ShowReadmeOnStart": "true",
- "WebServerToolWindowFactoryState": "false",
- "settings.editor.selected.configurable": "editor.preferences.fonts.default"
+
-
-
+}]]>
+
+
@@ -58,7 +83,7 @@
-
+
@@ -67,7 +92,7 @@
-
+
@@ -80,7 +105,7 @@
-
+
@@ -89,20 +114,20 @@
-
+
-
-
-
+
+
+
-
+
@@ -133,7 +158,7 @@
-
+
@@ -146,7 +171,7 @@
-
+
@@ -155,7 +180,7 @@
-
+
@@ -168,7 +193,29 @@
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -199,6 +246,28 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -397,23 +466,87 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
+
+
+
+
-
-
-
+
+
+
+
@@ -438,148 +571,141 @@
-
+
+
+
+
+
+
+ 1680511772109
+
+
+
+ 1680511772109
+
+
+ 1680771803627
+
+
+
+ 1680771803627
+
+
+ 1680835614890
+
+
+
+ 1680835614890
+
+
+
+
+
+
+
+
+
+
+
+
file://$PROJECT_DIR$/flask_serves.py
- 301
-
-
-
- file://$PROJECT_DIR$/flask_serves.py
- 79
-
-
-
- file://$PROJECT_DIR$/flask_serves.py
- 114
-
-
-
- file://$PROJECT_DIR$/flask_serves.py
- 204
-
-
-
- file://$PROJECT_DIR$/flask_serves.py
- 206
-
-
-
- file://$PROJECT_DIR$/flask_serves.py
- 122
-
-
-
- file://$PROJECT_DIR$/flask_serves.py
- 208
-
-
-
- file://$PROJECT_DIR$/flask_serves.py
- 209
-
-
-
- file://$PROJECT_DIR$/flask_serves.py
- 197
-
-
-
- file://$PROJECT_DIR$/flask_serves.py
- 112
-
-
-
- file://$PROJECT_DIR$/flask_serves.py
- 111
-
-
-
- file://$PROJECT_DIR$/flask_serves.py
- 199
-
+ 96
+
- file://$PROJECT_DIR$/flask_serves.py
- 318
-
+ file://$PROJECT_DIR$/flask_sever_1.py
+ 531
+
- file://$PROJECT_DIR$/flask_serves.py
- 317
-
+ file://$PROJECT_DIR$/flask_sever_1.py
+ 522
+
- file://$PROJECT_DIR$/flask_serves.py
- 316
-
+ file://$PROJECT_DIR$/flask_sever_1.py
+ 518
+
- file://$PROJECT_DIR$/flask_serves.py
- 312
-
+ file://$PROJECT_DIR$/flask_sever_1.py
+ 536
+
- file://$PROJECT_DIR$/flask_serves.py
- 320
-
+ file://$PROJECT_DIR$/flask_sever_1.py
+ 537
+
- file://$PROJECT_DIR$/flask_serves.py
- 321
-
+ file://$PROJECT_DIR$/flask_sever_1.py
+ 100
+
- file://$PROJECT_DIR$/flask_serves.py
- 322
-
+ file://$PROJECT_DIR$/flask_sever_1.py
+ 85
+
- file://$PROJECT_DIR$/flask_serves.py
- 323
-
+ file://$PROJECT_DIR$/flask_sever_1.py
+ 115
+
- file://$PROJECT_DIR$/flask_serves.py
- 324
-
+ file://$PROJECT_DIR$/flask_sever_1.py
+ 188
+
- file://$PROJECT_DIR$/flask_serves.py
- 143
-
+ file://$PROJECT_DIR$/flask_sever_1.py
+ 239
+
-
-
+
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
-
+
+
-
+
\ No newline at end of file
diff --git a/chatGPT账号 26.txt b/chatGPT账号 26.txt
new file mode 100644
index 0000000..25a0b20
--- /dev/null
+++ b/chatGPT账号 26.txt
@@ -0,0 +1,30 @@
+主号
+jhma1234333@gmail.com shabi123* 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 付款账户 :{plus:5319 9345 1099 3434, api:
+1987890321@qq.com zjz1987890321++ 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 付款账户 :{plus:5319 9345 1099 3434 ,api:5319 9345 1099 3434
+1432334894@qq.com H1234567 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 付款账户 :
+33367671@qq.com ly33367671. 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 付款账户 :
+gradymendoza342@gmail.com----qx6kqppvupyg566h----a0fndmcme8y7x3z3@aol.com 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
+romypynysy76433@gmail.com----x87zd8qdq9gg27kw----h4oizvpsykhz0kfn@gmx.com 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
+jesikajoya819@gmail.com----cdekq9gq9q9btz7h----vasf5ofm6txmojis@proton.me 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
+ounrzagrdezi@gmail.com----tkfevgjsbpcc54w0----qflrsz92ebbkot97@gmx.com api-keys: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
+aiyaaiaj27262@gmail.com----m4h48gamzqp48us9----aibmmesoooduwjmo@mail.com api-keys: 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
+homeworkshxjdhd@gmail.com----fjyysmunpoce16w6----yq29o1u911v0vw5n@outlook.com api-keys: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
+hansikamutvani@gmail.com----g794fak5wy9mobqp----vu3gu1ou0nz48u0a@hotmail.com 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
+qatalpend356@gmail.com----xo894ftgachr1huq----pg7gnc4a7l1wly1y@proton.me api-keys: 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
+akhiakther8373@gmail.com----dt6g2tp0qkp8t24f----ys1w8k6j0wo6l144@aol.com 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
+qatalpend356@gmail.com----xo894ftgachr1huq----pg7gnc4a7l1wly1y@proton.me api-keys: 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
+ramtonazkine@gmail.com----qquh8uaabgdznrtg----exsypsq2buqxwbfa@gmx.com api-keys: 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
+davislaird085@gmail.com----cw1fhcde8syffqmy----trre1iri5mhd2v8w@outlook.com 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
+ruiduphfo35644@gmail.com----hbu6458aa44f9s8h----ns237070stg1v5kn@163.com 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
+najminmohonas3@gmail.com----mctpqhe06opspbnt----i9m52jmf3i4ngs6a@mail.com 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
+eobdkxpwpekd@gmail.com----cpqqxxqk0wf34cba----j59ix0ibxwmlzohf@mail.com api-keys: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
+subornasalinia@gmail.com----tn4tved9akum1kx1----ztxqc4kbttzp75om@outlook.com api-keys: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
+peswjfgsdsv122@gmail.com----cwsgftzg85h0npx6----baua3t3317j007tx@gmx.com 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
+yyzhao@entroduction.cn she220877 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
+JacobRios531@outlook.com----bkwga59Kw7----org-IkKh08PJohhA8ISPAu3F1CoH----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
+
+
+副号
+JacobRios531@outlook.com----bkwga59Kw7----org-IkKh08PJohhA8ISPAu3F1CoH----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
+joakrdallizx@gmail.com----b9mbyj3yv5q71fkn----zagztz5lnnwuq9hz@hotmail.com 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
+
diff --git a/chatgpt_post.py b/chatgpt_post.py
new file mode 100644
index 0000000..ca3d5da
--- /dev/null
+++ b/chatgpt_post.py
@@ -0,0 +1,56 @@
+import requests
+import json
+
+OPENAI_API_KEY = "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"
+
+url = "https://api.openai.com/v1/chat/completions"
+headers = {
+ "Content-Type": "application/json",
+ "Authorization": f"Bearer {OPENAI_API_KEY}"
+}
+data = {
+ "model": "gpt-3.5-turbo",
+ "messages": [
+ {"role": "user", "content": "请帮我改写这句话:在城市发展进程当中,逐渐呈现出一些综合性的大型建筑群。"},
+ {"role": "assistant", "content": "随着城市的发展,综合性大型建筑群正在逐渐出现。"},
+ {"role": "user", "content": "这句话我不满意,再改一下帮我"}
+ ],
+ "temperature": 0.7
+}
+
+response = requests.post(url,
+ headers=headers,
+ data=json.dumps(data),
+ timeout=1000)
+
+res = response.json()
+print(res)
+print(res["choices"][0]["message"]["content"])
+
+
+
+
+
+
+
+OPENAI_API_KEY = api_key
+
+ url = "https://api.openai.com/v1/chat/completions"
+ headers = {
+ "Content-Type": "application/json",
+ "Authorization": f"Bearer {OPENAI_API_KEY}"
+ }
+ data = {
+ "model": "gpt-3.5-turbo",
+ "messages": [
+ {"role": "user", "content": "请帮我改写这句话:在城市发展进程当中,逐渐呈现出一些综合性的大型建筑群。"},
+ {"role": "assistant", "content": "随着城市的发展,综合性大型建筑群正在逐渐出现。"},
+ {"role": "user", "content": "这句话我不满意,再改一下帮我"}
+ ],
+ "temperature": 0.7
+ }
+ response = requests.post(url,
+ headers=headers,
+ data=json.dumps(data),
+ timeout=1000)
+ redis_.rpush(redis_key_name_openaikey_list, api_key)
\ No newline at end of file
diff --git a/flask_serve_2.py b/flask_serve_2.py
new file mode 100644
index 0000000..addff58
--- /dev/null
+++ b/flask_serve_2.py
@@ -0,0 +1,714 @@
+from flask import Flask, jsonify, Response
+from flask import request
+import redis
+import uuid
+import json
+import time
+import threading
+from threading import Thread
+from flask import send_file, send_from_directory
+import os
+from flask import make_response
+import openai
+import base64
+import re
+import urllib.parse as pa
+import socket
+from serve_config_1 import Config
+import requests
+
+config = Config()
+
+app = Flask(__name__)
+app.config["JSON_AS_ASCII"] = False
+
+s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+s.connect(("8.8.8.8", 80))
+localhostip = s.getsockname()[0]
+
+lock = threading.RLock()
+pool = redis.ConnectionPool(host=config.reids_ip, port=config.reids_port, max_connections=50, db=config.reids_db,
+ password=config.reids_password)
+redis_ = redis.Redis(connection_pool=pool, decode_responses=True)
+
+thanks = "致谢"
+references = "参考文献"
+
+flask_serves_env = "http://{}:{}".format(localhostip, config.flask_port)
+
+paper_download_url = flask_serves_env + "/download?filename_path={}/paper.docx"
+paper_start_download_url = flask_serves_env + "/download?filename_path={}/paper_start.docx"
+
+redis_key_name_openaikey_bad_dict = "openaikey_bad_list_{}".format(str(localhostip))
+
+redis_key_name_openaikey_list = "openaikey_list_{}".format(str(localhostip))
+
+redis_title = "redis_title"
+
+redis_title_ing = "redis_title_ing"
+
+redis_small_task = "redis_small_task"
+
+redis_res = "redis_res"
+
+for i in config.openaikey_list:
+ redis_.rpush(redis_key_name_openaikey_list, i)
+
+redis_.hset(redis_key_name_openaikey_bad_dict, "1", "1")
+redis_.persist(redis_key_name_openaikey_list)
+redis_.persist(redis_key_name_openaikey_bad_dict)
+
+
+def request_api_chatgpt(api_key, prompt):
+ OPENAI_API_KEY = api_key
+ url = "https://api.openai.com/v1/chat/completions"
+ headers = {
+ "Content-Type": "application/json",
+ "Authorization": f"Bearer {OPENAI_API_KEY}"
+ }
+ data = {
+ "model": "gpt-3.5-turbo",
+ "messages": [
+ {"role": "user", "content": prompt},
+ ],
+ "temperature": 0.5
+ }
+ response = requests.post(url,
+ headers=headers,
+ data=json.dumps(data),
+ timeout=600)
+
+ return response
+
+
+def chat_kaitibaogao(api_key, uuid, main_parameter, task_type):
+ try:
+ response = request_api_chatgpt(api_key, config.kaitibaogao_prompt.format(main_parameter[0]))
+ res = response.json()
+ kaitibaogao = res["choices"][0]["message"]["content"]
+ redis_.rpush(redis_key_name_openaikey_list, api_key)
+ redis_.persist(redis_key_name_openaikey_list)
+
+ except:
+ """
+ 发送警报
+ """
+ kaitibaogao = ""
+ redis_.hset(redis_key_name_openaikey_bad_dict, uuid, str((api_key, task_type)))
+
+ lock.acquire()
+ res_dict_str = redis_.hget(redis_res, uuid)
+ res_dict = json.loads(res_dict_str)
+ res_dict["tasking_num"] += 1
+ print("子任务进度".format(uuid), res_dict["tasking_num"])
+ res_dict["开题报告"] = kaitibaogao
+ res_dict_str = json.dumps(res_dict, ensure_ascii=False)
+ redis_.hset(redis_res, uuid, res_dict_str)
+ lock.release()
+
+
+def chat_abstract_keyword(api_key, uuid, main_parameter, task_type):
+ try:
+ # 生成中文摘要
+
+ response = request_api_chatgpt(api_key, config.chinese_abstract_prompt.format(main_parameter[0]))
+ res = response.json()
+ chinese_abstract = res["choices"][0]["message"]["content"]
+
+ # 生成英文的摘要
+
+ response = request_api_chatgpt(api_key, config.english_abstract_prompt.format(chinese_abstract))
+ res = response.json()
+ english_abstract = res["choices"][0]["message"]["content"]
+
+ # 生成中文关键字
+
+ response = request_api_chatgpt(api_key, config.chinese_keyword_prompt.format(chinese_abstract))
+ res = response.json()
+ chinese_keyword = res["choices"][0]["message"]["content"]
+
+ # 生成英文关键字
+ response = request_api_chatgpt(api_key, config.english_keyword_prompt.format(chinese_keyword))
+ res = response.json()
+ english_keyword = res["choices"][0]["message"]["content"]
+
+ redis_.rpush(redis_key_name_openaikey_list, api_key)
+ redis_.persist(redis_key_name_openaikey_list)
+ except:
+ """
+ 发送警报
+ """
+ chinese_abstract = ""
+ english_abstract = ""
+ chinese_keyword = ""
+ english_keyword = ""
+ redis_.hset(redis_key_name_openaikey_bad_dict, uuid, str((api_key, task_type)))
+
+ paper_abstract_keyword = {
+ "中文摘要": chinese_abstract,
+ "英文摘要": english_abstract,
+ "中文关键词": chinese_keyword,
+ "英文关键词": english_keyword
+ }
+
+ # json_str = json.dumps(paper_abstract_keyword, indent=4, ensure_ascii=False)
+ # abstract_keyword_path = os.path.join(uuid_path, "abstract_keyword.json")
+ # with open(abstract_keyword_path, 'w') as json_file:
+ # json_file.write(json_str)
+ #
+ # lock.acquire()
+ # api_key_list.append(api_key)
+ # lock.release()
+
+ lock.acquire()
+ res_dict_str = redis_.hget(redis_res, uuid)
+ res_dict = json.loads(res_dict_str)
+ res_dict["tasking_num"] += 1
+ print("子任务进度".format(uuid), res_dict["tasking_num"])
+ res_dict["中文摘要"] = paper_abstract_keyword["中文摘要"]
+ res_dict["英文摘要"] = paper_abstract_keyword["英文摘要"]
+ res_dict["中文关键词"] = paper_abstract_keyword["中文关键词"]
+ res_dict["英文关键词"] = paper_abstract_keyword["英文关键词"]
+ res_dict_str = json.dumps(res_dict, ensure_ascii=False)
+ redis_.hset(redis_res, uuid, res_dict_str)
+ lock.release()
+
+
+def chat_content(api_key, uuid, main_parameter, task_type):
+ '''
+
+ :param api_key:
+ :param uuid:
+ :param main_parameter:
+ :return:
+ '''
+ content_index = main_parameter[0]
+ title = main_parameter[1]
+ mulu = main_parameter[2]
+ subtitle = main_parameter[3]
+ prompt = main_parameter[4]
+
+ if subtitle[:2] == "@@":
+ res_content = subtitle[2:]
+ else:
+ try:
+
+ response = request_api_chatgpt(api_key, prompt.format(title, mulu, subtitle))
+ res = response.json()
+ res_content = res["choices"][0]["message"]["content"]
+
+ redis_.rpush(redis_key_name_openaikey_list, api_key)
+ redis_.persist(redis_key_name_openaikey_list)
+ except:
+ """
+ 发送警报
+ """
+ res_content = ""
+ redis_.hset(redis_key_name_openaikey_bad_dict, uuid, str((api_key, task_type)))
+
+ lock.acquire()
+ res_dict_str = redis_.hget(redis_res, uuid)
+ res_dict = json.loads(res_dict_str)
+ res_dict["tasking_num"] += 1
+ print("子任务进度".format(uuid), res_dict["tasking_num"])
+ table_of_contents = res_dict["table_of_contents"]
+ table_of_contents[content_index] = res_content
+ res_dict["table_of_contents"] = table_of_contents
+ res_dict_str = json.dumps(res_dict, ensure_ascii=False)
+ redis_.hset(redis_res, uuid, res_dict_str)
+ lock.release()
+
+
+def chat_thanks(api_key, uuid, main_parameter, task_type):
+ '''
+
+ :param api_key:
+ :param uuid:
+ :param main_parameter:
+ :return:
+ '''
+ # title,
+ # thank_prompt
+ title = main_parameter[0]
+ prompt = main_parameter[1]
+
+ try:
+ response = request_api_chatgpt(api_key, prompt.format(title))
+ res = response.json()
+ res_content = res["choices"][0]["message"]["content"]
+
+ redis_.rpush(redis_key_name_openaikey_list, api_key)
+ redis_.persist(redis_key_name_openaikey_list)
+ except:
+ """
+ 发送警报
+ """
+ res_content = ""
+ redis_.hset(redis_key_name_openaikey_bad_dict, uuid, str((api_key, task_type)))
+
+ lock.acquire()
+ res_dict_str = redis_.hget(redis_res, uuid)
+ res_dict = json.loads(res_dict_str)
+ res_dict["tasking_num"] += 1
+ print("子任务进度".format(uuid), res_dict["tasking_num"])
+ res_dict["致谢"] = res_content
+ res_dict_str = json.dumps(res_dict, ensure_ascii=False)
+ redis_.hset(redis_res, uuid, res_dict_str)
+ lock.release()
+
+
+def chat_references(api_key, uuid, main_parameter, task_type):
+ '''
+
+ :param api_key:
+ :param uuid:
+ :param main_parameter:
+ :return:
+ '''
+ # title,
+ # mulu,
+ # references_prompt
+ title = main_parameter[0]
+ mulu = main_parameter[1]
+ prompt = main_parameter[2]
+ try:
+
+ response = request_api_chatgpt(api_key, prompt.format(title, mulu))
+ res = response.json()
+ res_content = res["choices"][0]["message"]["content"]
+
+ redis_.rpush(redis_key_name_openaikey_list, api_key)
+ redis_.persist(redis_key_name_openaikey_list)
+ except:
+ """
+ 发送警报
+ """
+ res_content = ""
+ redis_.hset(redis_key_name_openaikey_bad_dict, uuid, str((api_key, task_type)))
+
+ # 加锁 读取resis并存储结果
+ lock.acquire()
+ res_dict_str = redis_.hget(redis_res, uuid)
+ res_dict = json.loads(res_dict_str)
+ res_dict["tasking_num"] += 1
+ print("子任务进度".format(uuid), res_dict["tasking_num"])
+ res_dict["参考文献"] = res_content
+ res_dict_str = json.dumps(res_dict, ensure_ascii=False)
+ redis_.hset(redis_res, uuid, res_dict_str)
+ lock.release()
+
+
+def threading_mulu(api_key, title, uuid):
+ '''
+ 生成目录并吧任务拆解进入子任务的redis_list中和储存结果的redis_list中
+ :return:
+ '''
+ try:
+ response = request_api_chatgpt(api_key, config.mulu_prompt.format(title))
+ res = response.json()
+ mulu = res["choices"][0]["message"]["content"]
+
+ redis_.rpush(redis_key_name_openaikey_list, api_key)
+ redis_.persist(redis_key_name_openaikey_list)
+ except:
+ """
+ 发送警报
+ """
+ res_content = ""
+ redis_.hset(redis_key_name_openaikey_bad_dict, uuid, str(api_key, "mulu"))
+ mulu = ""
+
+ try:
+ mulu_list = str(mulu).split("\n")
+ mulu_list = [i.strip() for i in mulu_list if i != ""]
+
+ print(mulu_list)
+
+ cun_bool = False
+ table_of_contents = [mulu_list[0]]
+
+ for i in mulu_list[1:]:
+ result_second_biaoti_list = re.findall(config.pantten_second_biaoti, i)
+ result_other_biaoti_list = re.findall(config.pantten_other_biaoti, i)
+ if result_second_biaoti_list != []:
+ table_of_contents.append("@@" + i)
+ cun_bool = True
+ continue
+ if cun_bool == False:
+ continue
+ else:
+ if result_other_biaoti_list != []:
+ table_of_contents.append("@@" + i)
+ else:
+ table_of_contents.append(i)
+
+ print(table_of_contents)
+ # table_of_contents = table_of_contents[:3] + table_of_contents[-1:]
+ # print(table_of_contents)
+
+ thanks_references_bool_table = table_of_contents[-3:]
+
+ # thanks = "致谢"
+ # references = "参考文献"
+ if references in thanks_references_bool_table:
+ table_of_contents.remove(references)
+
+ if thanks in thanks_references_bool_table:
+ table_of_contents.remove(thanks)
+
+ # table_of_contents.append(thanks)
+ # table_of_contents.append(references)
+
+ # if thanks not in thanks_bool_table:
+ # table_of_contents.insert(-1, "致谢")
+ #
+ # if thanks not in thanks_bool_table:
+ # table_of_contents.insert(-1, "致谢")
+
+ print(len(table_of_contents))
+
+ small_task_list = []
+ # api_key,
+ # index,
+ # title,
+ # mulu,
+ # subtitle,
+ # prompt
+ kaitibaogao_task = {
+ "task_type": "kaitibaogao",
+ "uuid": uuid,
+ "main_parameter": [title]
+ }
+
+ chat_abstract_task = {
+ "task_type": "chat_abstract",
+ "uuid": uuid,
+ "main_parameter": [title]
+ }
+ small_task_list.append(kaitibaogao_task)
+ small_task_list.append(chat_abstract_task)
+ content_index = 0
+ while True:
+ if content_index == len(table_of_contents):
+ break
+ subtitle = table_of_contents[content_index]
+ if content_index == 0:
+ prompt = config.first_title_prompt
+ elif subtitle == "参考文献":
+ prompt = config.references_prompt
+ elif subtitle == "致谢":
+ prompt = config.thank_prompt
+ else:
+ prompt = config.small_title_prompt
+ print("请求的所有参数",
+ content_index,
+ title,
+ subtitle,
+ prompt)
+
+ paper_content = {
+ "task_type": "paper_content",
+ "uuid": uuid,
+ "main_parameter": [
+ content_index,
+ title,
+ mulu,
+ subtitle,
+ prompt
+ ]
+ }
+
+ small_task_list.append(paper_content)
+ content_index += 1
+
+ thanks_task = {
+ "task_type": "thanks_task",
+ "uuid": uuid,
+ "main_parameter": [
+ title,
+ config.thank_prompt
+ ]
+ }
+
+ references_task = {
+ "task_type": "references_task",
+ "uuid": uuid,
+ "main_parameter": [
+ title,
+ mulu,
+ config.references_prompt
+ ]
+ }
+
+ small_task_list.append(thanks_task)
+ small_task_list.append(references_task)
+
+ for small_task in small_task_list:
+ small_task = json.dumps(small_task, ensure_ascii=False)
+ redis_.rpush(redis_small_task, small_task)
+ redis_.persist(redis_key_name_openaikey_list)
+
+ res = {
+ "uuid": uuid,
+ "num_small_task": len(small_task_list),
+ "tasking_num": 0,
+ "标题": title,
+ "目录": mulu,
+ "开题报告": "",
+ "任务书": "",
+ "中文摘要": "",
+ "英文摘要": "",
+ "中文关键词": "",
+ "英文关键词": "",
+ "正文": "",
+ "致谢": "",
+ "参考文献": "",
+ "table_of_contents": [""] * len(table_of_contents)
+ }
+
+ res = json.dumps(res, ensure_ascii=False)
+ redis_.hset(redis_res, uuid, res)
+ except:
+ print("目录程序错误")
+
+
+def threading_1():
+ # title, redis_key_name_openaikey_list
+ '''
+ 生成目录
+ :param title:
+ :param redis_key_name_openaikey_list:
+ :return:
+ '''
+ while True:
+ if redis_.llen(redis_small_task) != 0: # 若队列中有元素就跳过
+ time.sleep(1)
+ continue
+ elif redis_.llen(redis_title) != 0 and redis_.llen(redis_key_name_openaikey_list) != 0:
+ title_uuid_dict_str = redis_.lpop(redis_title).decode('UTF-8')
+ api_key = redis_.lpop(redis_key_name_openaikey_list).decode('UTF-8')
+ # redis_title:{"id": id_, "title": title}
+ title_uuid_dict = json.loads(title_uuid_dict_str)
+
+ title = title_uuid_dict["title"]
+ uuid_id = title_uuid_dict["id"]
+
+ t = Thread(target=threading_mulu, args=(api_key,
+ title,
+ uuid_id,
+ ))
+ t.start()
+ else:
+ time.sleep(1)
+ continue
+
+
+def threading_2():
+ '''
+ 顺序读取子任务
+ :return:
+ '''
+ while True:
+ if redis_.llen(redis_small_task) != 0 and redis_.llen(redis_key_name_openaikey_list) != 0:
+ # 执行小标题的任务
+ api_key = redis_.lpop(redis_key_name_openaikey_list).decode('UTF-8')
+ small_title = redis_.lpop(redis_small_task).decode('UTF-8')
+ small_title = json.loads(small_title)
+ task_type = small_title["task_type"]
+ uuid = small_title["uuid"]
+ main_parameter = small_title["main_parameter"]
+
+ # "task_type": "paper_content",
+ # "uuid": uuid,
+ # "main_parameter": [
+ # "task_type": "paper_content",
+ # "task_type": "chat_abstract",
+ # "task_type": "kaitibaogao",
+
+ if task_type == "kaitibaogao":
+ t = Thread(target=chat_kaitibaogao, args=(api_key,
+ uuid,
+ main_parameter,
+ task_type))
+ t.start()
+ elif task_type == "chat_abstract":
+ t = Thread(target=chat_abstract_keyword, args=(api_key,
+ uuid,
+ main_parameter,
+ task_type))
+ t.start()
+ elif task_type == "paper_content":
+ t = Thread(target=chat_content, args=(api_key,
+ uuid,
+ main_parameter,
+ task_type))
+ t.start()
+ elif task_type == "thanks_task":
+ t = Thread(target=chat_thanks, args=(api_key,
+ uuid,
+ main_parameter,
+ task_type))
+ t.start()
+ elif task_type == "references_task":
+ t = Thread(target=chat_references, args=(api_key,
+ uuid,
+ main_parameter,
+ task_type))
+ t.start()
+ else:
+ time.sleep(1)
+ continue
+
+
+def threading_3():
+ while True:
+ res_end_list = []
+ res_dict = redis_.hgetall(redis_res)
+ for key, values in res_dict.items():
+ values_dict = json.loads(values)
+ # "num_small_task": len(small_task_list) - 1,
+ # "tasking_num": 0,
+ if int(values_dict["num_small_task"]) == int(values_dict["tasking_num"]):
+ res_end_list.append(key)
+ if res_end_list != []:
+ for key in res_end_list:
+ redis_.hdel(redis_res, key)
+
+ res_str = res_dict[key].decode("utf-8")
+ json_str = json.dumps(res_str, indent=4, ensure_ascii=False)
+
+ key = str(key, encoding="utf-8")
+ uuid_path = os.path.join(config.project_data_txt_path, key)
+
+ os.makedirs(uuid_path)
+
+ paper_content_path = os.path.join(uuid_path, "paper_content.json")
+ with open(paper_content_path, 'w') as json_file:
+ json_file.write(json_str)
+
+ """
+ 调用jar包
+ 占位
+
+ """
+ url_path_paper = paper_download_url.format(key)
+ url_path_kaiti = paper_start_download_url.format(key)
+ return_text = str({"id": key,
+ "content_url_path": url_path_paper,
+ "content_report_url_path": url_path_kaiti,
+ "probabilities": None,
+ "status_code": 200})
+ redis_.srem(redis_title_ing, key)
+ redis_.set(key, return_text, 28800)
+
+ time.sleep(1)
+
+
+# def main(title):
+# # print(request.remote_addr)
+# # title = request.json["title"]
+#
+# id_ = str(uuid.uuid1())
+# print(id_)
+# redis_.rpush(redis_title, json.dumps({"id": id_, "title": title})) # 加入redis
+
+
+@app.route("/chat", methods=["POST"])
+def chat():
+ print(request.remote_addr)
+ title = request.json["title"]
+ id_ = str(uuid.uuid1())
+ print(id_)
+ redis_.rpush(redis_title, json.dumps({"id": id_, "title": title}, ensure_ascii=False)) # 加入redis
+ redis_.persist(redis_key_name_openaikey_list)
+ return_text = {"texts": {'id': id_, }, "probabilities": None, "status_code": 200}
+ print("ok")
+ redis_.sadd(redis_title_ing, id_)
+
+ return jsonify(return_text) # 返回结果
+
+
+@app.route("/download", methods=['GET'])
+def download_file():
+ # 需要知道2个参数, 第1个参数是本地目录的path, 第2个参数是文件名(带扩展名)
+ # directory = os.path.join(project_data_txt_path, filename) # 假设在当前目录
+
+ # uuid_path, word_name = str(filename).split("/")
+ # word_path_root = os.path.join(project_data_txt_path, uuid_path)
+ # response = make_response(send_from_directory(word_path_root, word_name, as_attachment=True))
+ # response.headers["Content-Disposition"] = "attachment; filename={}".format(filename.encode().decode('latin-1'))
+ filename_path = request.args.get('filename_path', '')
+ filename = filename_path.split("/")[1]
+ path_name = os.path.join(config.project_data_txt_path, filename_path)
+ with open(path_name, 'rb') as f:
+ stream = f.read()
+ response = Response(stream, content_type='application/octet-stream')
+ response.headers['Content-disposition'] = 'attachment; filename={}'.format(filename)
+ return response
+
+
+@app.route("/search", methods=["POST"])
+def search():
+ id_ = request.json['id'] # 获取用户query中的文本 例如"I love you"
+ result = redis_.get(id_) # 获取该query的模型结果
+ if result is not None:
+ # redis_.delete(id_)
+ # result_dict = result.decode('UTF-8')
+ if redis_.hexists(redis_key_name_openaikey_bad_dict, id_) == True:
+ result_text = {'code': "204", 'text': "", 'probabilities': None}
+
+ else:
+ result_dict = eval(result)
+ # return_text = {"id":query_id, "load_result_path": load_result_path, "probabilities": None, "status_code": 200}
+ query_id = result_dict["id"]
+ # "content_url_path": url_path_paper,
+ # "content_report_url_path": url_path_kaiti,
+ content_url_path = result_dict["content_url_path"]
+ content_report_url_path = result_dict["content_report_url_path"]
+ probabilities = result_dict["probabilities"]
+ result_text = {'code': 200,
+ 'content_url_path': content_url_path,
+ 'content_report_url_path': content_report_url_path,
+ 'probabilities': probabilities}
+ else:
+ querying_list = list(redis_.smembers(redis_title_ing))
+ querying_set = set()
+ for i in querying_list:
+ querying_set.add(i.decode())
+
+ querying_bool = False
+ if id_ in querying_set:
+ querying_bool = True
+
+ query_list_json = redis_.lrange(redis_title, 0, -1)
+ query_set_ids = set()
+ for i in query_list_json:
+ data_dict = json.loads(i)
+ query_id = data_dict['id']
+ query_set_ids.add(query_id)
+
+ query_bool = False
+ if id_ in query_set_ids:
+ query_bool = True
+
+ if querying_bool == True and query_bool == True:
+ result_text = {'code': "201", 'text': "", 'probabilities': None}
+ elif querying_bool == True and query_bool == False:
+ result_text = {'code': "202", 'text': "", 'probabilities': None}
+ else:
+ result_text = {'code': "203", 'text': "", 'probabilities': None}
+ return jsonify(result_text) # 返回结果
+
+
+# threading_1 # 根据标题获取子任务,存入子任务序列
+# threading_2 # 根据子任务生成结果,存入结果序列
+# threading_3 # 根据存储的结果序列,看是否完成,如果完成输出json文件以及word
+t = Thread(target=threading_1)
+t.start()
+t = Thread(target=threading_2)
+t.start()
+
+
+if __name__ == '__main__':
+ # main("大型商业建筑人员疏散设计研究")
+ app.run(host="0.0.0.0", port=14002, threaded=True, debug=False)
\ No newline at end of file
diff --git a/flask_serves.py b/flask_serves.py
index 74f883e..587b34f 100644
--- a/flask_serves.py
+++ b/flask_serves.py
@@ -26,7 +26,7 @@ import re
import urllib.parse as pa
-pool = redis.ConnectionPool(host='localhost', port=6379, max_connections=50, db=1)
+pool = redis.ConnectionPool(host='localhost', port=63179, max_connections=50, db=1, password='Zhicheng123*')
redis_ = redis.Redis(connection_pool=pool, decode_responses=True)
db_key_query = 'query'
@@ -40,12 +40,16 @@ import logging
lock = threading.RLock()
mulu_prompt = "请帮我根据题目为“{}”生成一个论文目录"
-first_title_prompt = "论文题目是“{}”,目录是“{}”,请把其中的大标题“{}”的内容续写完整,保证续写内容不少于1000字"
-small_title_prompt = "论文题目是“{}”,目录是“{}”,请把其中的小标题“{}”的内容续写完整,保证续写内容不少于1000字"
-references_prompt = "论文题目是“{}”,目录是“{}”,请为这篇论文生成中文的{}"
+first_title_prompt = "论文题目是“{}”,目录是“{}”,请把其中的大标题“{}”的内容续写完整,保证续写内容不少于800字"
+small_title_prompt = "论文题目是“{}”,目录是“{}”,请把其中的小标题“{}”的内容续写完整,保证续写内容不少于800字"
+references_prompt = "论文题目是“{}”,目录是“{}”,请为这篇论文生成15篇中文的{},要求其中有有中文参考文献不低于12篇,英文参考文献不低于2篇"
thank_prompt = "论文题目是“{}”,目录是“{}”,请把其中的{}部分续写完整"
kaitibaogao_prompt = "请以《{}》为题目生成研究的主要的内容、背景、目的、意义,要求不少于1500字"
-
+chinese_abstract_prompt = "请以《{}》为题目生成论文摘要,要求不少于500字"
+english_abstract_prompt = "请把“{}”这段文字翻译成英文"
+chinese_keyword_prompt = "请为“{}”这段论文摘要生成3-5个关键字"
+english_keyword_prompt = "请把“{}”这几个关键字翻译成英文"
+thanks = "致谢"
dabiaoti = ["二","三","四","五","六","七","八","九"]
# 正则
@@ -57,7 +61,14 @@ project_data_txt_path = "/home/majiahui/ChatGPT_Sever/new_data_txt"
api_key_list = ["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",
"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",
"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",
- "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"]
+ "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",
+ "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",
+ "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",
+ "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",
+ "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",
+ "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"]
+
+# "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"
def chat_title(title, api_key):
global lock
@@ -82,6 +93,7 @@ def chat_title(title, api_key):
def chat_kaitibaogao(title, api_key, uuid_path):
global lock
+ # time.sleep(1)
openai.api_key = api_key
res = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
@@ -148,7 +160,10 @@ def classify(): # 调用模型,设置最大batch_size
# 生成开题报告
# title, api_key, uuid_path
+
+ lock.acquire()
api_key = api_key_list.pop()
+ lock.release()
t = Thread(target=chat_kaitibaogao, args=(title,
api_key,
uuid_path,
@@ -198,15 +213,26 @@ def classify(): # 调用模型,设置最大batch_size
print(table_of_contents)
# table_of_contents = table_of_contents[:3] + table_of_contents[-1:]
# print(table_of_contents)
+
+
+ thanks_bool_table = table_of_contents[-3:]
+ if thanks not in thanks_bool_table:
+ table_of_contents.insert(-1, "致谢")
+
chat_class = GeneratePaper(mulu_list, table_of_contents)
print(len(table_of_contents))
+
+
############################################################
while True:
if api_key_list == []:
+ time.sleep(1)
continue
if index == len(table_of_contents):
break
+ lock.acquire()
api_key = api_key_list.pop()
+ lock.release()
subtitle = table_of_contents[index]
if index == 0:
prompt = first_title_prompt
@@ -215,18 +241,17 @@ def classify(): # 调用模型,设置最大batch_size
elif subtitle == "致谢":
prompt = thank_prompt
else:
- prompt = first_title_prompt
+ prompt = small_title_prompt
print("请求的所有参数", api_key,
index,
title,
- mulu_list,
subtitle,
prompt)
t = Thread(target=chat_class.chat_content_, args=(api_key,
index,
title,
- mulu_list,
+ mulu,
subtitle,
prompt))
t.start()
@@ -278,8 +303,8 @@ def classify(): # 调用模型,设置最大batch_size
os.system("java -Dfile.encoding=UTF-8 -jar '/home/majiahui/ChatGPT_Sever/createAiXieZuoKaitiWord.jar' '{}' '{}'".format(
kaitibaogao_txt_path, save_word_paper_start))
- url_path_paper = "http://104.244.90.248:14000/download?filename_path={}/paper.docx".format(query_id)
- url_path_kaiti = "http://104.244.90.248:14000/download?filename_path={}/paper_start.docx".format(query_id)
+ url_path_paper = "http://104.244.89.190:14000/download?filename_path={}/paper.docx".format(query_id)
+ url_path_kaiti = "http://104.244.89.190:14000/download?filename_path={}/paper_start.docx".format(query_id)
# content_path = os.path.join(uuid_path, "content.txt")
# load_result_path = res_path.format(query_id)
# load_result_path = os.path.abspath(load_result_path)
@@ -378,8 +403,8 @@ def search():
result_text = {'code': "203", 'text': "", 'probabilities': None}
return jsonify(result_text) # 返回结果
-t = Thread(target=classify)
-t.start()
+t1 = Thread(target=classify)
+t1.start()
if __name__ == "__main__":
app.run(host="0.0.0.0", port=14000, threaded=True, debug=False)
\ No newline at end of file
diff --git a/flask_sever_1.py b/flask_sever_1.py
index 33ee894..873a7ec 100644
--- a/flask_sever_1.py
+++ b/flask_sever_1.py
@@ -14,6 +14,11 @@ import base64
import re
import urllib.parse as pa
import socket
+from serve_config_1 import Config
+import requests
+
+
+config = Config()
app = Flask(__name__)
app.config["JSON_AS_ASCII"] = False
@@ -23,71 +28,18 @@ s.connect(("8.8.8.8", 80))
localhostip = s.getsockname()[0]
lock = threading.RLock()
-pool = redis.ConnectionPool(host='localhost', port=63179, max_connections=50, db=2, password='Zhicheng123*')
+pool = redis.ConnectionPool(host=config.reids_ip, port=config.reids_port, max_connections=50, db=config.reids_db, password=config.reids_password)
redis_ = redis.Redis(connection_pool=pool, decode_responses=True)
-pantten_second_biaoti = '[2二ⅡⅠ][、.]\s{0,}?[\u4e00-\u9fa5]+'
-pantten_other_biaoti = '[2-9二三四五六七八九ⅡⅢⅣⅤⅥⅦⅧⅨ][、.]\s{0,}?[\u4e00-\u9fa5]+'
-
-mulu_prompt = "请帮我根据题目为“{}”生成一个论文目录"
-first_title_prompt = "论文题目是“{}”,目录是“{}”,请把其中的大标题“{}”的内容续写完整,保证续写内容不少于800字"
-small_title_prompt = "论文题目是“{}”,目录是“{}”,请把其中的小标题“{}”的内容续写完整,保证续写内容不少于800字"
-references_prompt = "论文题目是“{}”,目录是“{}”,请为这篇论文生成15篇左右的参考文献,要求其中有有中文参考文献不低于12篇,英文参考文献不低于2篇"
-thank_prompt = "请以“{}”为题写一篇论文的致谢"
-kaitibaogao_prompt = "请以《{}》为题目生成研究的主要的内容、背景、目的、意义,要求不少于100字"
-chinese_abstract_prompt = "请以《{}》为题目生成论文摘要,要求不少于1500字"
-english_abstract_prompt = "请把“{}”这段文字翻译成英文"
-chinese_keyword_prompt = "请为“{}”这段论文摘要生成3-5个关键字"
-english_keyword_prompt = "请把“{}”这几个关键字翻译成英文"
thanks = "致谢"
references = "参考文献"
-dabiaoti = ["二", "三", "四", "五", "六", "七", "八", "九"]
-project_data_txt_path = "/home/majiahui/ChatGPT_Sever/new_data_txt"
-
-"""
-key_list = [
- {"ip": key-api},
- {"ip": key-api},
- {"ip": key-api},
- ]
-redis_title = []
-redis_title_ing = []
-redis_small_task = [
- {
- uuid,
- api_key,
- mulu_title_id,
- title,
- mulu,
- subtitle,
- prompt
- }
- ]
-redis_res = [
- {
- "uuid":
- "完成进度":
- "标题":
- "中文摘要":"",
- "英文摘要"
- "中文关键字"
- "英文关键字"
- "正文" : [""] * len(content)
- }
- ] -
-
- > list()
-"""
-
-openaikey_list = ["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",
- "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",
- "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",
- "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",
- "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",
- "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",
- "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",
- "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",
- "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"]
+
+flask_serves_env = "http://{}:{}".format(localhostip,config.flask_port)
+
+paper_download_url = flask_serves_env + "/download?filename_path={}/paper.docx"
+paper_start_download_url = flask_serves_env + "/download?filename_path={}/paper_start.docx"
+
+redis_key_name_openaikey_bad_dict = "openaikey_bad_list_{}".format(str(localhostip))
redis_key_name_openaikey_list = "openaikey_list_{}".format(str(localhostip))
@@ -99,82 +51,102 @@ redis_small_task = "redis_small_task"
redis_res = "redis_res"
-for i in openaikey_list:
+for i in config.openaikey_list:
redis_.rpush(redis_key_name_openaikey_list, i)
+redis_.hset(redis_key_name_openaikey_bad_dict, "1", "1")
+redis_.persist(redis_key_name_openaikey_list)
+redis_.persist(redis_key_name_openaikey_bad_dict)
-def chat_kaitibaogao(api_key, uuid, main_parameter):
- # t = Thread(target=chat_kaitibaogao, args=(api_key,
- # uuid,
- # main_parameter
- # time.sleep(1)
- openai.api_key = api_key
- res = openai.ChatCompletion.create(
- model="gpt-3.5-turbo",
- messages=[
- {"role": "user", "content": kaitibaogao_prompt.format(main_parameter[0])},
+
+def request_api_chatgpt(api_key, prompt):
+ OPENAI_API_KEY = api_key
+ url = "https://api.openai.com/v1/chat/completions"
+ headers = {
+ "Content-Type": "application/json",
+ "Authorization": f"Bearer {OPENAI_API_KEY}"
+ }
+ data = {
+ "model": "gpt-3.5-turbo",
+ "messages": [
+ {"role": "user", "content": prompt},
],
- temperature=0.5
- )
- kaitibaogao = res.choices[0].message.content
- # kaitibaogao_path = os.path.join(, "kaitibaogao.txt")
- # with open(kaitibaogao_path, 'w', encoding='utf8') as f_kaitibaogao:
- # f_kaitibaogao.write(kaitibaogao)
-
- redis_.rpush(redis_key_name_openaikey_list, api_key)
+ "temperature": 0.5
+ }
+ response = requests.post(url,
+ headers=headers,
+ data=json.dumps(data),
+ timeout=600)
+
+ return response
+
+def chat_kaitibaogao(api_key, uuid, main_parameter,task_type):
+
+ try:
+ response =request_api_chatgpt(api_key, config.kaitibaogao_prompt.format(main_parameter[0]))
+ res = response.json()
+ kaitibaogao = res["choices"][0]["message"]["content"]
+ redis_.rpush(redis_key_name_openaikey_list, api_key)
+ redis_.persist(redis_key_name_openaikey_list)
+
+ except:
+ """
+ 发送警报
+ """
+ kaitibaogao = ""
+ redis_.hset(redis_key_name_openaikey_bad_dict, uuid, str((api_key,task_type)))
+
+
lock.acquire()
res_dict_str = redis_.hget(redis_res, uuid)
res_dict = json.loads(res_dict_str)
res_dict["tasking_num"] += 1
+ print("子任务进度".format(uuid),res_dict["tasking_num"])
res_dict["开题报告"] = kaitibaogao
res_dict_str = json.dumps(res_dict, ensure_ascii=False)
redis_.hset(redis_res, uuid, res_dict_str)
lock.release()
-def chat_abstract_keyword(api_key, uuid, main_parameter):
- # api_key,
- # uuid,
- # main_parameter
- # time.sleep(7)
- openai.api_key = api_key
- # 生成中文摘要
- res = openai.ChatCompletion.create(
- model="gpt-3.5-turbo",
- messages=[
- {"role": "user", "content": chinese_abstract_prompt.format(main_parameter[0])},
- ],
- temperature=0.5
- )
- chinese_abstract = res.choices[0].message.content
- # 生成英文的摘要
- res = openai.ChatCompletion.create(
- model="gpt-3.5-turbo",
- messages=[
- {"role": "user", "content": english_abstract_prompt.format(chinese_abstract)},
- ],
- temperature=0.5
- )
- english_abstract = res.choices[0].message.content
- # 生成中文关键字
- res = openai.ChatCompletion.create(
- model="gpt-3.5-turbo",
- messages=[
- {"role": "user", "content": chinese_keyword_prompt.format(chinese_abstract)},
- ],
- temperature=0.5
- )
- chinese_keyword = res.choices[0].message.content
- # 生成英文关键字
- res = openai.ChatCompletion.create(
- model="gpt-3.5-turbo",
- messages=[
- {"role": "user", "content": english_keyword_prompt.format(chinese_keyword)},
- ],
- temperature=0.5
- )
+def chat_abstract_keyword(api_key, uuid, main_parameter, task_type):
+
+ try:
+ # 生成中文摘要
+
+ response =request_api_chatgpt(api_key, config.chinese_abstract_prompt.format(main_parameter[0]))
+ res = response.json()
+ chinese_abstract = res["choices"][0]["message"]["content"]
+
+ # 生成英文的摘要
+
+ response = request_api_chatgpt(api_key, config.english_abstract_prompt.format(chinese_abstract))
+ res = response.json()
+ english_abstract = res["choices"][0]["message"]["content"]
- english_keyword = res.choices[0].message.content
+
+ # 生成中文关键字
+
+ response = request_api_chatgpt(api_key, config.chinese_keyword_prompt.format(chinese_abstract))
+ res = response.json()
+ chinese_keyword = res["choices"][0]["message"]["content"]
+
+
+ # 生成英文关键字
+ response = request_api_chatgpt(api_key, config.english_keyword_prompt.format(chinese_keyword))
+ res = response.json()
+ english_keyword = res["choices"][0]["message"]["content"]
+
+ redis_.rpush(redis_key_name_openaikey_list, api_key)
+ redis_.persist(redis_key_name_openaikey_list)
+ except:
+ """
+ 发送警报
+ """
+ chinese_abstract = ""
+ english_abstract = ""
+ chinese_keyword = ""
+ english_keyword = ""
+ redis_.hset(redis_key_name_openaikey_bad_dict, uuid, str((api_key,task_type)))
paper_abstract_keyword = {
"中文摘要": chinese_abstract,
@@ -191,11 +163,12 @@ def chat_abstract_keyword(api_key, uuid, main_parameter):
# lock.acquire()
# api_key_list.append(api_key)
# lock.release()
- redis_.rpush(redis_key_name_openaikey_list, api_key)
+
lock.acquire()
res_dict_str = redis_.hget(redis_res, uuid)
res_dict = json.loads(res_dict_str)
res_dict["tasking_num"] += 1
+ print("子任务进度".format(uuid),res_dict["tasking_num"])
res_dict["中文摘要"] = paper_abstract_keyword["中文摘要"]
res_dict["英文摘要"] = paper_abstract_keyword["英文摘要"]
res_dict["中文关键词"] = paper_abstract_keyword["中文关键词"]
@@ -205,7 +178,7 @@ def chat_abstract_keyword(api_key, uuid, main_parameter):
lock.release()
-def chat_content(api_key, uuid, main_parameter):
+def chat_content(api_key, uuid, main_parameter, task_type):
'''
:param api_key:
@@ -222,20 +195,26 @@ def chat_content(api_key, uuid, main_parameter):
if subtitle[:2] == "@@":
res_content = subtitle[2:]
else:
- openai.api_key = api_key
- res = openai.ChatCompletion.create(
- model="gpt-3.5-turbo",
- messages=[
- {"role": "user", "content": prompt.format(title, mulu, subtitle)},
- ],
- temperature=0.5
- )
- res_content = res.choices[0].message.content
- redis_.rpush(redis_key_name_openaikey_list, api_key)
+ try:
+
+ response = request_api_chatgpt(api_key, prompt.format(title, mulu, subtitle))
+ res = response.json()
+ res_content = res["choices"][0]["message"]["content"]
+
+ redis_.rpush(redis_key_name_openaikey_list, api_key)
+ redis_.persist(redis_key_name_openaikey_list)
+ except:
+ """
+ 发送警报
+ """
+ res_content = ""
+ redis_.hset(redis_key_name_openaikey_bad_dict, uuid, str((api_key,task_type)))
+
lock.acquire()
res_dict_str = redis_.hget(redis_res, uuid)
res_dict = json.loads(res_dict_str)
res_dict["tasking_num"] += 1
+ print("子任务进度".format(uuid), res_dict["tasking_num"])
table_of_contents = res_dict["table_of_contents"]
table_of_contents[content_index] = res_content
res_dict["table_of_contents"] = table_of_contents
@@ -244,7 +223,7 @@ def chat_content(api_key, uuid, main_parameter):
lock.release()
-def chat_thanks(api_key, uuid, main_parameter):
+def chat_thanks(api_key, uuid, main_parameter, task_type):
'''
:param api_key:
@@ -257,31 +236,32 @@ def chat_thanks(api_key, uuid, main_parameter):
title = main_parameter[0]
prompt = main_parameter[1]
- openai.api_key = api_key
- res = openai.ChatCompletion.create(
- model="gpt-3.5-turbo",
- messages=[
- {"role": "user", "content": prompt.format(title)},
- ],
- temperature=0.5
- )
- res_content = res.choices[0].message.content
- redis_.rpush(redis_key_name_openaikey_list, api_key)
-
- # "致谢": "",
- # "参考文献": "",
- # 加锁 读取redis生成致谢并存储
+ try:
+ response = request_api_chatgpt(api_key, prompt.format(title))
+ res = response.json()
+ res_content = res["choices"][0]["message"]["content"]
+
+ redis_.rpush(redis_key_name_openaikey_list, api_key)
+ redis_.persist(redis_key_name_openaikey_list)
+ except:
+ """
+ 发送警报
+ """
+ res_content = ""
+ redis_.hset(redis_key_name_openaikey_bad_dict, uuid, str((api_key,task_type)))
+
lock.acquire()
res_dict_str = redis_.hget(redis_res, uuid)
res_dict = json.loads(res_dict_str)
res_dict["tasking_num"] += 1
+ print("子任务进度".format(uuid), res_dict["tasking_num"])
res_dict["致谢"] = res_content
res_dict_str = json.dumps(res_dict, ensure_ascii=False)
redis_.hset(redis_res, uuid, res_dict_str)
lock.release()
-def chat_references(api_key, uuid, main_parameter):
+def chat_references(api_key, uuid, main_parameter, task_type):
'''
:param api_key:
@@ -295,195 +275,205 @@ def chat_references(api_key, uuid, main_parameter):
title = main_parameter[0]
mulu = main_parameter[1]
prompt = main_parameter[2]
+ try:
+ response = request_api_chatgpt(api_key, prompt.format(title, mulu))
+ res = response.json()
+ res_content = res["choices"][0]["message"]["content"]
- openai.api_key = api_key
- res = openai.ChatCompletion.create(
- model="gpt-3.5-turbo",
- messages=[
- {"role": "user", "content": prompt.format(title, mulu)},
- ],
- temperature=0.5
- )
- res_content = res.choices[0].message.content
- redis_.rpush(redis_key_name_openaikey_list, api_key)
+ redis_.rpush(redis_key_name_openaikey_list, api_key)
+ redis_.persist(redis_key_name_openaikey_list)
+ except:
+ """
+ 发送警报
+ """
+ res_content = ""
+ redis_.hset(redis_key_name_openaikey_bad_dict, uuid, str((api_key,task_type)))
# 加锁 读取resis并存储结果
lock.acquire()
res_dict_str = redis_.hget(redis_res, uuid)
res_dict = json.loads(res_dict_str)
res_dict["tasking_num"] += 1
+ print("子任务进度".format(uuid), res_dict["tasking_num"])
res_dict["参考文献"] = res_content
res_dict_str = json.dumps(res_dict, ensure_ascii=False)
redis_.hset(redis_res, uuid, res_dict_str)
lock.release()
-def threading_mulu(key_api, title, uuid):
+def threading_mulu(api_key, title, uuid):
'''
生成目录并吧任务拆解进入子任务的redis_list中和储存结果的redis_list中
:return:
'''
-
- openai.api_key = key_api
- res = openai.ChatCompletion.create(
- model="gpt-3.5-turbo",
- messages=[
- {"role": "user", "content": mulu_prompt.format(title)},
- ],
- temperature=0.5
- )
-
- redis_.rpush(redis_key_name_openaikey_list, key_api)
- mulu = res.choices[0].message.content
- mulu_list = str(mulu).split("\n")
- mulu_list = [i.strip() for i in mulu_list if i != ""]
-
- print(mulu_list)
-
- cun_bool = False
- table_of_contents = [mulu_list[0]]
-
- for i in mulu_list[1:]:
- result_second_biaoti_list = re.findall(pantten_second_biaoti, i)
- result_other_biaoti_list = re.findall(pantten_other_biaoti, i)
- if result_second_biaoti_list != []:
- table_of_contents.append("@@" + i)
- cun_bool = True
- continue
- if cun_bool == False:
- continue
- else:
- if result_other_biaoti_list != []:
+ try:
+ response = request_api_chatgpt(api_key, config.mulu_prompt.format(title))
+ res = response.json()
+ mulu = res["choices"][0]["message"]["content"]
+
+ redis_.rpush(redis_key_name_openaikey_list, api_key)
+ redis_.persist(redis_key_name_openaikey_list)
+ except:
+ """
+ 发送警报
+ """
+ res_content = ""
+ redis_.hset(redis_key_name_openaikey_bad_dict, uuid, str(api_key,"mulu"))
+ mulu = ""
+
+ try:
+ mulu_list = str(mulu).split("\n")
+ mulu_list = [i.strip() for i in mulu_list if i != ""]
+
+ print(mulu_list)
+
+ cun_bool = False
+ table_of_contents = [mulu_list[0]]
+
+ for i in mulu_list[1:]:
+ result_second_biaoti_list = re.findall(config.pantten_second_biaoti, i)
+ result_other_biaoti_list = re.findall(config.pantten_other_biaoti, i)
+ if result_second_biaoti_list != []:
table_of_contents.append("@@" + i)
+ cun_bool = True
+ continue
+ if cun_bool == False:
+ continue
else:
- table_of_contents.append(i)
-
- print(table_of_contents)
- # table_of_contents = table_of_contents[:3] + table_of_contents[-1:]
- # print(table_of_contents)
-
- thanks_references_bool_table = table_of_contents[-3:]
-
- # thanks = "致谢"
- # references = "参考文献"
- if references in thanks_references_bool_table:
- table_of_contents.remove(references)
-
- if thanks in thanks_references_bool_table:
- table_of_contents.remove(thanks)
-
- # table_of_contents.append(thanks)
- # table_of_contents.append(references)
-
- # if thanks not in thanks_bool_table:
- # table_of_contents.insert(-1, "致谢")
- #
- # if thanks not in thanks_bool_table:
- # table_of_contents.insert(-1, "致谢")
-
- print(len(table_of_contents))
-
- small_task_list = []
- # api_key,
- # index,
- # title,
- # mulu,
- # subtitle,
- # prompt
- kaitibaogao_task = {
- "task_type": "kaitibaogao",
- "uuid": uuid,
- "main_parameter": [title]
- }
+ if result_other_biaoti_list != []:
+ table_of_contents.append("@@" + i)
+ else:
+ table_of_contents.append(i)
+
+ print(table_of_contents)
+ # table_of_contents = table_of_contents[:3] + table_of_contents[-1:]
+ # print(table_of_contents)
+
+ thanks_references_bool_table = table_of_contents[-3:]
+
+ # thanks = "致谢"
+ # references = "参考文献"
+ if references in thanks_references_bool_table:
+ table_of_contents.remove(references)
+
+ if thanks in thanks_references_bool_table:
+ table_of_contents.remove(thanks)
+
+ # table_of_contents.append(thanks)
+ # table_of_contents.append(references)
+
+ # if thanks not in thanks_bool_table:
+ # table_of_contents.insert(-1, "致谢")
+ #
+ # if thanks not in thanks_bool_table:
+ # table_of_contents.insert(-1, "致谢")
+
+ print(len(table_of_contents))
+
+ small_task_list = []
+ # api_key,
+ # index,
+ # title,
+ # mulu,
+ # subtitle,
+ # prompt
+ kaitibaogao_task = {
+ "task_type": "kaitibaogao",
+ "uuid": uuid,
+ "main_parameter": [title]
+ }
- chat_abstract_task = {
- "task_type": "chat_abstract",
- "uuid": uuid,
- "main_parameter": [title]
- }
- small_task_list.append(kaitibaogao_task)
- small_task_list.append(chat_abstract_task)
- content_index = 0
- while True:
- if content_index == len(table_of_contents):
- break
- subtitle = table_of_contents[content_index]
- if content_index == 0:
- prompt = first_title_prompt
- elif subtitle == "参考文献":
- prompt = references_prompt
- elif subtitle == "致谢":
- prompt = thank_prompt
- else:
- prompt = small_title_prompt
- print("请求的所有参数",
- content_index,
- title,
- subtitle,
- prompt)
-
- paper_content = {
- "task_type": "paper_content",
+ chat_abstract_task = {
+ "task_type": "chat_abstract",
+ "uuid": uuid,
+ "main_parameter": [title]
+ }
+ small_task_list.append(kaitibaogao_task)
+ small_task_list.append(chat_abstract_task)
+ content_index = 0
+ while True:
+ if content_index == len(table_of_contents):
+ break
+ subtitle = table_of_contents[content_index]
+ if content_index == 0:
+ prompt = config.first_title_prompt
+ elif subtitle == "参考文献":
+ prompt = config.references_prompt
+ elif subtitle == "致谢":
+ prompt = config.thank_prompt
+ else:
+ prompt = config.small_title_prompt
+ print("请求的所有参数",
+ content_index,
+ title,
+ subtitle,
+ prompt)
+
+ paper_content = {
+ "task_type": "paper_content",
+ "uuid": uuid,
+ "main_parameter": [
+ content_index,
+ title,
+ mulu,
+ subtitle,
+ prompt
+ ]
+ }
+
+ small_task_list.append(paper_content)
+ content_index += 1
+
+ thanks_task = {
+ "task_type": "thanks_task",
"uuid": uuid,
"main_parameter": [
- content_index,
title,
- mulu,
- subtitle,
- prompt
+ config.thank_prompt
]
}
- small_task_list.append(paper_content)
- content_index += 1
-
- thanks_task = {
- "task_type": "thanks_task",
- "uuid": uuid,
- "main_parameter": [
- title,
- thank_prompt
- ]
- }
-
- references_task = {
- "task_type": "references_task",
- "uuid": uuid,
- "main_parameter": [
+ references_task = {
+ "task_type": "references_task",
+ "uuid": uuid,
+ "main_parameter": [
title,
mulu,
- references_prompt
+ config.references_prompt
]
- }
+ }
- small_task_list.append(thanks_task)
- small_task_list.append(references_task)
-
- for small_task in small_task_list:
- small_task = json.dumps(small_task, ensure_ascii=False)
- redis_.rpush(redis_small_task, small_task)
-
- res = {
- "uuid": uuid,
- "num_small_task": len(small_task_list),
- "tasking_num": 0,
- "标题": title,
- "目录": mulu,
- "开题报告": "",
- "任务书": "",
- "中文摘要": "",
- "英文摘要": "",
- "中文关键词": "",
- "英文关键词": "",
- "正文": "",
- "致谢": "",
- "参考文献": "",
- "table_of_contents": [""] * len(table_of_contents)
- }
+ small_task_list.append(thanks_task)
+ small_task_list.append(references_task)
- res = json.dumps(res, ensure_ascii=False)
- redis_.hset(redis_res, uuid, res)
+ for small_task in small_task_list:
+ small_task = json.dumps(small_task, ensure_ascii=False)
+ redis_.rpush(redis_small_task, small_task)
+ redis_.persist(redis_key_name_openaikey_list)
+
+ res = {
+ "uuid": uuid,
+ "num_small_task": len(small_task_list),
+ "tasking_num": 0,
+ "标题": title,
+ "目录": mulu,
+ "开题报告": "",
+ "任务书": "",
+ "中文摘要": "",
+ "英文摘要": "",
+ "中文关键词": "",
+ "英文关键词": "",
+ "正文": "",
+ "致谢": "",
+ "参考文献": "",
+ "table_of_contents": [""] * len(table_of_contents)
+ }
+
+ res = json.dumps(res, ensure_ascii=False)
+ redis_.hset(redis_res, uuid, res)
+ except:
+ print("目录程序错误")
def threading_1():
@@ -542,32 +532,32 @@ def threading_2():
if task_type == "kaitibaogao":
t = Thread(target=chat_kaitibaogao, args=(api_key,
uuid,
- main_parameter
- ))
+ main_parameter,
+ task_type))
t.start()
elif task_type == "chat_abstract":
t = Thread(target=chat_abstract_keyword, args=(api_key,
uuid,
- main_parameter
- ))
+ main_parameter,
+ task_type))
t.start()
elif task_type == "paper_content":
t = Thread(target=chat_content, args=(api_key,
uuid,
- main_parameter
- ))
+ main_parameter,
+ task_type))
t.start()
elif task_type == "thanks_task":
t = Thread(target=chat_thanks, args=(api_key,
- uuid,
- main_parameter
- ))
+ uuid,
+ main_parameter,
+ task_type))
t.start()
elif task_type == "references_task":
t = Thread(target=chat_references, args=(api_key,
- uuid,
- main_parameter
- ))
+ uuid,
+ main_parameter,
+ task_type))
t.start()
else:
time.sleep(1)
@@ -584,36 +574,36 @@ def threading_3():
# "tasking_num": 0,
if int(values_dict["num_small_task"]) == int(values_dict["tasking_num"]):
res_end_list.append(key)
- for key in res_end_list:
- redis_.hdel(redis_res, key)
-
- res_str = res_dict[key].decode("utf-8")
- json_str = json.dumps(res_str, indent=4, ensure_ascii=False)
-
- key = str(key, encoding="utf-8")
- uuid_path = os.path.join(project_data_txt_path, key)
-
- os.makedirs(uuid_path)
-
- paper_content_path = os.path.join(uuid_path, "paper_content.json")
- with open(paper_content_path, 'w') as json_file:
- json_file.write(json_str)
-
- """
- 调用jar包
- 占位
-
-
- """
- url_path_paper = "http://104.244.90.248:14000/download?filename_path={}/paper.docx".format(key)
- url_path_kaiti = "http://104.244.90.248:14000/download?filename_path={}/paper_start.docx".format(key)
- return_text = str({"id": key,
- "content_url_path": url_path_paper,
- "content_report_url_path": url_path_kaiti,
- "probabilities": None,
- "status_code": 200})
- redis_.srem(redis_title_ing, key)
- redis_.set(key, return_text, 28800)
+ if res_end_list != []:
+ for key in res_end_list:
+ redis_.hdel(redis_res, key)
+
+ res_str = res_dict[key].decode("utf-8")
+ json_str = json.dumps(res_str, indent=4, ensure_ascii=False)
+
+ key = str(key, encoding="utf-8")
+ uuid_path = os.path.join(config.project_data_txt_path, key)
+
+ os.makedirs(uuid_path)
+
+ paper_content_path = os.path.join(uuid_path, "paper_content.json")
+ with open(paper_content_path, 'w') as json_file:
+ json_file.write(json_str)
+
+ """
+ 调用jar包
+ 占位
+
+ """
+ url_path_paper = paper_download_url.format(key)
+ url_path_kaiti = paper_start_download_url.format(key)
+ return_text = str({"id": key,
+ "content_url_path": url_path_paper,
+ "content_report_url_path": url_path_kaiti,
+ "probabilities": None,
+ "status_code": 200})
+ redis_.srem(redis_title_ing, key)
+ redis_.set(key, return_text, 28800)
time.sleep(1)
@@ -627,15 +617,15 @@ def threading_3():
# redis_.rpush(redis_title, json.dumps({"id": id_, "title": title})) # 加入redis
-
@app.route("/chat", methods=["POST"])
def chat():
print(request.remote_addr)
title = request.json["title"]
id_ = str(uuid.uuid1())
print(id_)
- redis_.rpush(redis_title, json.dumps({"id":id_, "title": title})) # 加入redis
- return_text = {"texts": {'id': id_,}, "probabilities": None, "status_code": 200}
+ redis_.rpush(redis_title, json.dumps({"id": id_, "title": title},ensure_ascii=False)) # 加入redis
+ redis_.persist(redis_key_name_openaikey_list)
+ return_text = {"texts": {'id': id_, }, "probabilities": None, "status_code": 200}
print("ok")
redis_.sadd(redis_title_ing, id_)
@@ -653,12 +643,11 @@ def download_file():
# response.headers["Content-Disposition"] = "attachment; filename={}".format(filename.encode().decode('latin-1'))
filename_path = request.args.get('filename_path', '')
filename = filename_path.split("/")[1]
- path_name = os.path.join(project_data_txt_path, filename_path)
+ path_name = os.path.join(config.project_data_txt_path, filename_path)
with open(path_name, 'rb') as f:
stream = f.read()
response = Response(stream, content_type='application/octet-stream')
response.headers['Content-disposition'] = 'attachment; filename={}'.format(filename)
-
return response
@@ -669,19 +658,22 @@ def search():
if result is not None:
# redis_.delete(id_)
# result_dict = result.decode('UTF-8')
+ if redis_.hexists(redis_key_name_openaikey_bad_dict, id_) == True:
+ result_text = {'code': "204", 'text': "", 'probabilities': None}
- result_dict = eval(result)
- # return_text = {"id":query_id, "load_result_path": load_result_path, "probabilities": None, "status_code": 200}
- query_id = result_dict["id"]
- # "content_url_path": url_path_paper,
- # "content_report_url_path": url_path_kaiti,
- content_url_path = result_dict["content_url_path"]
- content_report_url_path = result_dict["content_report_url_path"]
- probabilities = result_dict["probabilities"]
- result_text = {'code': 200,
- 'content_url_path': content_url_path,
- 'content_report_url_path': content_report_url_path,
- 'probabilities': probabilities}
+ else:
+ result_dict = eval(result)
+ # return_text = {"id":query_id, "load_result_path": load_result_path, "probabilities": None, "status_code": 200}
+ query_id = result_dict["id"]
+ # "content_url_path": url_path_paper,
+ # "content_report_url_path": url_path_kaiti,
+ content_url_path = result_dict["content_url_path"]
+ content_report_url_path = result_dict["content_report_url_path"]
+ probabilities = result_dict["probabilities"]
+ result_text = {'code': 200,
+ 'content_url_path': content_url_path,
+ 'content_report_url_path': content_report_url_path,
+ 'probabilities': probabilities}
else:
querying_list = list(redis_.smembers(redis_title_ing))
querying_set = set()
@@ -722,7 +714,6 @@ t.start()
t = Thread(target=threading_3)
t.start()
-
if __name__ == '__main__':
# main("大型商业建筑人员疏散设计研究")
app.run(host="0.0.0.0", port=14002, threaded=True, debug=False)
\ No newline at end of file
diff --git a/json_模板.json b/json_模板.json
new file mode 100644
index 0000000..670d67d
--- /dev/null
+++ b/json_模板.json
@@ -0,0 +1,11 @@
+{
+ "标题":"",
+ "目录":"",
+ "中文摘要":"",
+ "英文摘要":"",
+ "正文": ["一、绪论",
+ "1.1 研究背景\n在大型商业建筑人员疏散管理中,建立科学的管理制度是非常必要的。1.2 研究背景\n在大型商业建筑人员疏散管理中,建立科学的管理制度是非常必要的。",
+ "二、机械手臂的设计与分析",
+ "二、机械手臂的设计与分析"
+ ]
+}
\ No newline at end of file
diff --git a/lock_flask.py b/lock_flask.py
index 8d99d9a..45e4644 100644
--- a/lock_flask.py
+++ b/lock_flask.py
@@ -8,6 +8,12 @@
@Describe:
"""
+## TODO 输入列表问题
+## TODO 服务停止在启动redis回滚问题
+## TODO 多线程问题
+## TODO ip地址问题
+## TODO 请求国内地址
+
import os
from flask import Flask, jsonify, Response
from flask import request
@@ -26,7 +32,7 @@ import re
import urllib.parse as pa
-pool = redis.ConnectionPool(host='localhost', port=6379, max_connections=50, db=2)
+pool = redis.ConnectionPool(host='localhost', port=6379, max_connections=50, db=1)
redis_ = redis.Redis(connection_pool=pool, decode_responses=True)
db_key_query = 'query'
@@ -40,12 +46,16 @@ import logging
lock = threading.RLock()
mulu_prompt = "请帮我根据题目为“{}”生成一个论文目录"
-first_title_prompt = "论文题目是“{}”,目录是“{}”,请把其中的大标题“{}”的内容续写完整,保证续写内容不少于1000字"
-small_title_prompt = "论文题目是“{}”,目录是“{}”,请把其中的小标题“{}”的内容续写完整,保证续写内容不少于1000字"
-references_prompt = "论文题目是“{}”,目录是“{}”,请为这篇论文生成中文的{}"
+first_title_prompt = "论文题目是“{}”,目录是“{}”,请把其中的大标题“{}”的内容续写完整,保证续写内容不少于800字"
+small_title_prompt = "论文题目是“{}”,目录是“{}”,请把其中的小标题“{}”的内容续写完整,保证续写内容不少于800字"
+references_prompt = "论文题目是“{}”,目录是“{}”,请为这篇论文生成15篇中文的{},要求其中有有中文参考文献不低于12篇,英文参考文献不低于2篇"
thank_prompt = "论文题目是“{}”,目录是“{}”,请把其中的{}部分续写完整"
kaitibaogao_prompt = "请以《{}》为题目生成研究的主要的内容、背景、目的、意义,要求不少于1500字"
-
+chinese_abstract_prompt = "请以《{}》为题目生成论文摘要,要求不少于500字"
+english_abstract_prompt = "请把“{}”这段文字翻译成英文"
+chinese_keyword_prompt = "请为“{}”这段论文摘要生成3-5个关键字"
+english_keyword_prompt = "请把“{}”这几个关键字翻译成英文"
+thanks = "致谢"
dabiaoti = ["二","三","四","五","六","七","八","九"]
# 正则
@@ -57,7 +67,14 @@ project_data_txt_path = "/home/majiahui/ChatGPT_Sever/new_data_txt"
api_key_list = ["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",
"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",
"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",
- "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"]
+ "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",
+ "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",
+ "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",
+ "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",
+ "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",
+ "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"]
+
+# "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"
def chat_title(title, api_key):
global lock
@@ -82,6 +99,7 @@ def chat_title(title, api_key):
def chat_kaitibaogao(title, api_key, uuid_path):
global lock
+ # time.sleep(1)
openai.api_key = api_key
res = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
@@ -99,6 +117,66 @@ def chat_kaitibaogao(title, api_key, uuid_path):
lock.release()
+def chat_abstract_keyword(title, api_key, uuid_path):
+ global lock
+
+ # time.sleep(7)
+ openai.api_key = api_key
+ # 生成中文摘要
+ res = openai.ChatCompletion.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {"role": "user", "content": chinese_abstract_prompt.format(title)},
+ ],
+ temperature=0.5
+ )
+ chinese_abstract = res.choices[0].message.content
+ # 生成英文的摘要
+ res = openai.ChatCompletion.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {"role": "user", "content": english_abstract_prompt.format(chinese_abstract)},
+ ],
+ temperature=0.5
+ )
+ english_abstract = res.choices[0].message.content
+ # 生成中文关键字
+ res = openai.ChatCompletion.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {"role": "user", "content": chinese_keyword_prompt.format(chinese_abstract)},
+ ],
+ temperature=0.5
+ )
+ chinese_keyword = res.choices[0].message.content
+ # 生成英文关键字
+ res = openai.ChatCompletion.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {"role": "user", "content": english_keyword_prompt.format(chinese_keyword)},
+ ],
+ temperature=0.5
+ )
+
+ english_keyword = res.choices[0].message.content
+
+ paper_abstract_keyword = {
+ "中文摘要": chinese_abstract,
+ "英文摘要": english_abstract,
+ "中文关键字": chinese_keyword,
+ "英文关键字": english_keyword
+ }
+
+ json_str = json.dumps(paper_abstract_keyword, indent=4, ensure_ascii=False)
+ abstract_keyword_path = os.path.join(uuid_path, "abstract_keyword.json")
+ with open(abstract_keyword_path, 'w') as json_file:
+ json_file.write(json_str)
+
+ lock.acquire()
+ api_key_list.append(api_key)
+ lock.release()
+
+
class GeneratePaper:
def __init__(self, mulu, table):
self.mulu = mulu
@@ -107,8 +185,7 @@ class GeneratePaper:
def chat_content_(self,api_key, mulu_title_id, title, mulu, subtitle, prompt):
global lock
# time.sleep(5)
- # api_key_list.append(api_key)
- # self.paper[mulu_title_id] = subtitle
+ self.paper[mulu_title_id] = subtitle
if subtitle[:2] == "@@":
self.paper[mulu_title_id] = subtitle[2:]
else:
@@ -148,7 +225,10 @@ def classify(): # 调用模型,设置最大batch_size
# 生成开题报告
# title, api_key, uuid_path
+
+ lock.acquire()
api_key = api_key_list.pop()
+ lock.release()
t = Thread(target=chat_kaitibaogao, args=(title,
api_key,
uuid_path,
@@ -156,6 +236,17 @@ def classify(): # 调用模型,设置最大batch_size
t.start()
thread_list.append(t)
+ # 生成中英文摘要
+ lock.acquire()
+ api_key = api_key_list.pop()
+ lock.release()
+ t = Thread(target=chat_abstract_keyword, args=(title,
+ api_key,
+ uuid_path,
+ ))
+ t.start()
+ thread_list.append(t)
+
# 生成目录
while True:
if api_key_list != []:
@@ -198,15 +289,25 @@ def classify(): # 调用模型,设置最大batch_size
print(table_of_contents)
# table_of_contents = table_of_contents[:3] + table_of_contents[-1:]
# print(table_of_contents)
+
+
+ thanks_bool_table = table_of_contents[-3:]
+ if thanks not in thanks_bool_table:
+ table_of_contents.insert(-1, "致谢")
+
chat_class = GeneratePaper(mulu_list, table_of_contents)
print(len(table_of_contents))
+
+
############################################################
while True:
if api_key_list == []:
continue
if index == len(table_of_contents):
break
+ lock.acquire()
api_key = api_key_list.pop()
+ lock.release()
subtitle = table_of_contents[index]
if index == 0:
prompt = first_title_prompt
@@ -215,18 +316,17 @@ def classify(): # 调用模型,设置最大batch_size
elif subtitle == "致谢":
prompt = thank_prompt
else:
- prompt = first_title_prompt
+ prompt = small_title_prompt
print("请求的所有参数", api_key,
index,
title,
- mulu_list,
subtitle,
prompt)
t = Thread(target=chat_class.chat_content_, args=(api_key,
index,
title,
- mulu_list,
+ mulu,
subtitle,
prompt))
t.start()
@@ -239,20 +339,57 @@ def classify(): # 调用模型,设置最大batch_size
thread.join()
- print(chat_class.paper)
- paper = "\n".join(chat_class.paper)
- print(paper)
+ paper_content_list = chat_class.paper
+ # paper = "\n".join(chat_class.paper)
+ # print(paper)
+
+ # 不要txt,修改为json
+ #判断 摘要是否生成完成
+ abstract_keyword_path = os.path.join(uuid_path, "abstract_keyword.json")
+ while True:
+ print(abstract_keyword_path)
+ print(os.path.exists(abstract_keyword_path))
+ if os.path.exists(abstract_keyword_path) == True:
+ break
+ time.sleep(3)
+
+ with open(abstract_keyword_path, "r", encoding="utf-8") as f:
+ abstract_keyword_dict = json.load(f)
- content_path = os.path.join(uuid_path, "content.txt")
- with open(content_path, 'w', encoding='utf8') as f_content:
- f_content.write(paper)
+ # 开题报告
+ kaitibaogao_path = os.path.join(uuid_path, "kaitibaogao.txt")
+ while True:
+ print(kaitibaogao_path)
+ print(os.path.exists(kaitibaogao_path))
+ if os.path.exists(kaitibaogao_path) == True:
+ break
+ time.sleep(3)
+ with open(kaitibaogao_path, "r", encoding="utf-8") as f:
+ kaitibaogao = f.read()
+
+ print("文件路径检测完成")
+
+ paper_dict = {}
+ for i in abstract_keyword_dict:
+ paper_dict[i] = abstract_keyword_dict[i]
+
+ paper_dict["正文"] = paper_content_list
+ paper_dict["目录"] = mulu
+ paper_dict["开题报告"] = kaitibaogao
+
+ json_str = json.dumps(paper_dict, indent=4, ensure_ascii=False)
+ paper_content_path = os.path.join(uuid_path, "paper_content.json")
+ with open(paper_content_path, 'w') as json_file:
+ json_file.write(json_str)
+
+ # content_path = os.path.join(uuid_path, "content.txt")
+ # with open(content_path, 'w', encoding='utf8') as f_content:
+ # f_content.write(paper_dict)
mulu_path = os.path.join(uuid_path, "mulu.txt")
with open(mulu_path, 'w', encoding='utf8') as f_mulu:
f_mulu.write(mulu)
- kaitibaogao_txt_path = os.path.join(uuid_path, "kaitibaogao.txt")
-
# word保存路径
save_word_paper = os.path.join(uuid_path, "paper.docx")
@@ -264,19 +401,19 @@ def classify(): # 调用模型,设置最大batch_size
# f2.write(content_base64)
# 拼接成word
- title = pa.quote(title)
- mulu_path = mulu_path
- content_path = content_path
-
- # 调用jar包
- print("java_path", mulu_path, content_path, title, save_word_paper)
- os.system(
- "java -Dfile.encoding=UTF-8 -jar '/home/majiahui/ChatGPT_Sever/createAiXieZuoWord.jar' '{}' '{}' '{}' '{}'".format(
- mulu_path, content_path, title, save_word_paper))
-
- print("jaba_kaitibaogao", kaitibaogao_txt_path, save_word_paper_start)
- os.system("java -Dfile.encoding=UTF-8 -jar '/home/majiahui/ChatGPT_Sever/createAiXieZuoKaitiWord.jar' '{}' '{}'".format(
- kaitibaogao_txt_path, save_word_paper_start))
+ # title = pa.quote(title)
+ # mulu_path = mulu_path
+ # content_path = content_path
+ #
+ # # 调用jar包
+ # print("java_path", mulu_path, content_path, title, save_word_paper)
+ # os.system(
+ # "java -Dfile.encoding=UTF-8 -jar '/home/majiahui/ChatGPT_Sever/createAiXieZuoWord.jar' '{}' '{}' '{}' '{}'".format(
+ # mulu_path, content_path, title, save_word_paper))
+ #
+ # print("jaba_kaitibaogao", kaitibaogao_txt_path, save_word_paper_start)
+ # os.system("java -Dfile.encoding=UTF-8 -jar '/home/majiahui/ChatGPT_Sever/createAiXieZuoKaitiWord.jar' '{}' '{}'".format(
+ # kaitibaogao_txt_path, save_word_paper_start))
url_path_paper = "http://104.244.90.248:14000/download?filename_path={}/paper.docx".format(query_id)
url_path_kaiti = "http://104.244.90.248:14000/download?filename_path={}/paper_start.docx".format(query_id)
@@ -382,4 +519,4 @@ t1 = Thread(target=classify)
t1.start()
if __name__ == "__main__":
- app.run(host="0.0.0.0", port=14002, threaded=True, debug=False)
\ No newline at end of file
+ app.run(host="0.0.0.0", port=14000, threaded=True, debug=False)
\ No newline at end of file
diff --git a/serve_config_1.py b/serve_config_1.py
new file mode 100644
index 0000000..98ed15f
--- /dev/null
+++ b/serve_config_1.py
@@ -0,0 +1,45 @@
+
+
+class Config:
+ def __init__(self):
+
+ # 目录提取拼接相关参数
+ self.pantten_second_biaoti = '[2二ⅡⅠ][、.]\s{0,}?[\u4e00-\u9fa5]+'
+ self.pantten_other_biaoti = '[2-9二三四五六七八九ⅡⅢⅣⅤⅥⅦⅧⅨ][、.]\s{0,}?[\u4e00-\u9fa5]+'
+
+ # chatgpt 接口相关参数
+ self.mulu_prompt = "请帮我根据题目为“{}”生成一个论文目录"
+ self.first_title_prompt = "论文题目是“{}”,目录是“{}”,请把其中的大标题“{}”的内容补充完整,补充内容字数在100字左右"
+ self.small_title_prompt = "论文题目是“{}”,目录是“{}”,请把其中的小标题“{}”的内容补充完整,补充内容字数在100字左右"
+ self.references_prompt = "论文题目是“{}”,目录是“{}”,请为这篇论文生成15篇左右的参考文献,要求其中有有中文参考文献不低于12篇,英文参考文献不低于2篇"
+ self.thank_prompt = "请以“{}”为题写一篇论文的致谢"
+ self.kaitibaogao_prompt = "请以《{}》为题目生成研究的主要的内容、背景、目的、意义,要求不少于100字"
+ self.chinese_abstract_prompt = "请以《{}》为题目生成论文摘要,要求生成的字数在100字左右"
+ self.english_abstract_prompt = "请把“{}”这段文字翻译成英文"
+ self.chinese_keyword_prompt = "请为“{}”这段论文摘要生成3-5个关键字"
+ self.english_keyword_prompt = "请把“{}”这几个关键字翻译成英文"
+ self.dabiaoti = ["二", "三", "四", "五", "六", "七", "八", "九"]
+ self.project_data_txt_path = "/home/majiahui/ChatGPT_Sever/new_data_txt"
+ self.openaikey_list = ["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",
+ "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",
+ "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"
+ ]
+ # "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",
+ # "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"
+ # "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",
+ # "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",
+ # "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",
+ # "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"
+
+ # 流程相关参数
+ self.thanks = "致谢"
+ self.references = "参考文献"
+
+ # flask port
+ self.flask_port = "14000"
+
+ # redis config
+ self.reids_ip = 'localhost'
+ self.reids_port = 63179
+ self.reids_db = 2
+ self.reids_password='Zhicheng123*'
\ No newline at end of file
diff --git a/serve_config_2.py b/serve_config_2.py
new file mode 100644
index 0000000..d7aa329
--- /dev/null
+++ b/serve_config_2.py
@@ -0,0 +1,43 @@
+
+
+class Config:
+ def __init__(self):
+
+ # 目录提取拼接相关参数
+ self.pantten_second_biaoti = '[2二ⅡⅠ][、.]\s{0,}?[\u4e00-\u9fa5]+'
+ self.pantten_other_biaoti = '[2-9二三四五六七八九ⅡⅢⅣⅤⅥⅦⅧⅨ][、.]\s{0,}?[\u4e00-\u9fa5]+'
+
+ # chatgpt 接口相关参数
+ self.mulu_prompt = "请帮我根据题目为“{}”生成一个论文目录"
+ self.first_title_prompt = "论文题目是“{}”,目录是“{}”,请把其中的大标题“{}”的内容补充完整,补充内容字数在100字左右"
+ self.small_title_prompt = "论文题目是“{}”,目录是“{}”,请把其中的小标题“{}”的内容补充完整,补充内容字数在100字左右"
+ self.references_prompt = "论文题目是“{}”,目录是“{}”,请为这篇论文生成15篇左右的参考文献,要求其中有有中文参考文献不低于12篇,英文参考文献不低于2篇"
+ self.thank_prompt = "请以“{}”为题写一篇论文的致谢"
+ self.kaitibaogao_prompt = "请以《{}》为题目生成研究的主要的内容、背景、目的、意义,要求不少于100字"
+ self.chinese_abstract_prompt = "请以《{}》为题目生成论文摘要,要求生成的字数在100字左右"
+ self.english_abstract_prompt = "请把“{}”这段文字翻译成英文"
+ self.chinese_keyword_prompt = "请为“{}”这段论文摘要生成3-5个关键字"
+ self.english_keyword_prompt = "请把“{}”这几个关键字翻译成英文"
+ self.dabiaoti = ["二", "三", "四", "五", "六", "七", "八", "九"]
+ self.project_data_txt_path = "/home/majiahui/ChatGPT_Sever/new_data_txt"
+ self.openaikey_list = [
+ "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",
+ "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",
+ "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"
+ ]
+ # "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",
+ # "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",
+ # "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"
+
+ # 流程相关参数
+ self.thanks = "致谢"
+ self.references = "参考文献"
+
+ # flask port
+ self.flask_port = "14000"
+
+ # redis config
+ self.reids_ip = '104.244.89.190'
+ self.reids_port = 63179
+ self.reids_db = 2
+ self.reids_password='Zhicheng123*'
\ No newline at end of file
diff --git a/查询uuid.py b/查询uuid.py
index ce9d614..518b062 100644
--- a/查询uuid.py
+++ b/查询uuid.py
@@ -1,13 +1,27 @@
import requests
import time
-data = {"id": "58abde1c-d1ef-11ed-a2cd-aaaa001aad2e"}
+data = {"id": "a259c76a-d521-11ed-b23c-aaaa001b4bbf"}
start = time.time()
-res = requests.post('http://104.244.90.248:14000/search', json=data)
+res = requests.post('http://104.244.89.190:14002/search', json=data)
end = time.time()
print(end - start)
-print(res.text)
\ No newline at end of file
+print(res.text)
+
+'''
+da823db0-d50e-11ed-a38c-aaaa001b4bbf
+e8bb13f2-d50e-11ed-b37a-aaaa001b4bbf
+f3e2c216-d50e-11ed-9869-aaaa001b4bbf
+268f0b52-d50f-11ed-b741-aaaa001b4bbf
+'''
+
+
+'''a9880f98-d516-11ed-b1f9-aaaa001b4bbf
+b689132c-d516-11ed-b4e7-aaaa001b4bbf
+c3f4bea8-d516-11ed-beda-aaaa001b4bbf
+cd948cb8-d516-11ed-9847-aaaa001b4bbf
+f2c5920c-d516-11ed-96b1-aaaa001b4bbf'''
diff --git a/测试chatgpt调用接口.py b/测试chatgpt调用接口.py
index afbd2a1..8506282 100644
--- a/测试chatgpt调用接口.py
+++ b/测试chatgpt调用接口.py
@@ -7,11 +7,13 @@
@Software:
@Describe:
"""
+import time
+
import openai
import flask
-def chat_drop():
- openai.api_key = "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"
+def chat_drop(api_key):
+ openai.api_key = api_key
res = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
@@ -23,5 +25,19 @@ def chat_drop():
top_p=1,
)
print(res.choices[0].message.content)
+ time.sleep(3)
+
+openaikey_list = ["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",
+ "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",
+ "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",
+ "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",
+ "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",
+ "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",
+ "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",
+ "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",
+ "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"]
+
+for i in openaikey_list:
+ chat_drop(i)
-chat_drop()
\ No newline at end of file
+chat_drop("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")
\ No newline at end of file
diff --git a/测试flask多进程.py b/测试flask多进程.py
new file mode 100644
index 0000000..e56488a
--- /dev/null
+++ b/测试flask多进程.py
@@ -0,0 +1,13 @@
+from flask import Flask
+
+app = Flask(__name__)
+
+
+@app.route("/")
+def index():
+ filename_path = request.args.get('filename_path', '')
+ return "Hello world!"
+
+
+if __name__ == "__main__":
+ app.run('0.0.0.0', port=11000, debug=True)
\ No newline at end of file
diff --git a/测试多进程.py b/测试多进程.py
index 0c0450c..a3e235b 100644
--- a/测试多进程.py
+++ b/测试多进程.py
@@ -1,6 +1,6 @@
import threading
-num = 0
+
lock = threading.Lock()
@@ -18,6 +18,7 @@ def sub():
lock.release()
for i in range(1000):
+ num = 0
t1 = threading.Thread(target=add, )
t2 = threading.Thread(target=sub, )
t1.start()
diff --git a/测试生成uuid.py b/测试生成uuid.py
index 8701fbd..f7e7d79 100644
--- a/测试生成uuid.py
+++ b/测试生成uuid.py
@@ -5,7 +5,7 @@ data = {"title": "大型商业建筑人员疏散设计研究"}
start = time.time()
-res = requests.post('http://104.244.90.248:14000/chat', json=data)
+res = requests.post('http://104.244.89.190:14002/chat', json=data)
end = time.time()
print(end - start)
diff --git a/简单的flask.py b/简单的flask.py
index 90c8dff..3609bec 100644
--- a/简单的flask.py
+++ b/简单的flask.py
@@ -3,7 +3,7 @@ from flask import Flask
app = Flask(__name__)
-@app.route("/aa")
+@app.route("/")
def index():
return "Hello world!"