Browse Source

普通版降重优化:使用vllm模型,解决英文问题,修复空字符bug

master
majiahui@haimaqingfan.com 10 months ago
parent
commit
594da0a4a4
  1. 84
      flask_drop_rewrite_request.py
  2. 77
      flask_predict_no_batch_t5.py
  3. 2
      run_app_nohub_search_redis.sh

84
flask_drop_rewrite_request.py

@ -137,7 +137,6 @@ def dialog_line_parse(url, text):
print("{}】 Failed to get a proper response from remote "
"server. Status Code: {}. Response: {}"
"".format(url, response.status_code, response.text))
print(text)
return {}
@ -274,17 +273,19 @@ def uuid_search_mp(results):
results_list[i] = result["text"]
else:
results_list[i] = "Empty character"
print(results_list)
time.sleep(3)
return results_list
def get_multiple_urls(urls):
def get_multiple_urls(text_info):
input_values = []
input_index = []
for i in urls:
input_values.append(i[1])
for i in range(len(text_info)):
if text_info[i][3] == True:
input_values.append(text_info[i][4])
input_index.append(i)
with concurrent.futures.ThreadPoolExecutor(100) as executor:
# 使用map方法并发地调用worker_function
results_1 = list(executor.map(request_api_chatgpt, input_values))
@ -293,15 +294,24 @@ def get_multiple_urls(urls):
# 使用map方法并发地调用worker_function
results = list(executor.map(uuid_search_mp, [results_1]))
return_list = []
for i,j in zip(urls, results[0]):
return_list.append([i, j])
return return_list
# return_list = []
# for i,j in zip(urls, results[0]):
# return_list.append([i, j])
return_dict = {}
for i, j in zip(input_index, results[0]):
return_dict[i] = j
for i in range(len(text_info)):
if i in return_dict:
text_info[i].append(return_dict[i])
else:
text_info[i].append(text_info[i][0])
return text_info
def chulipangban_test_1(snetence_id, text):
# 引号处理
text = text.strip()
dialogs_text, dialogs_index, other_index = get_dialogs_index(text)
for dialogs_text_dan in dialogs_text:
@ -454,7 +464,7 @@ def predict_data_post_processing(text_list):
# # text_list.extend(i)
# # return_list = predict_data_post_processing(text_list)
# # return return_list
def post_sentence_ulit(sentence, text_info):
def post_sentence_ulit(text_info):
'''
后处理
:param sentence:
@ -467,7 +477,7 @@ def post_sentence_ulit(sentence, text_info):
if_change = text_info[3]
if if_change == True:
sentence = sentence.strip()
sentence = text_info[-1].strip()
if "改写后:" in sentence:
sentence_lable_index = sentence.index("改写后:")
sentence = sentence[sentence_lable_index + 4:]
@ -483,7 +493,7 @@ def post_sentence_ulit(sentence, text_info):
# sentence = sentence[:-1] + text_info[0][-1]
else:
sentence = text_info[0]
return sentence
return text_info[:4] + [sentence]
def has_chinese(s):
return bool(re.search('[\u4e00-\u9fa5]', s))
@ -491,16 +501,19 @@ def has_chinese(s):
def english_ulit(sentence):
sentence = str(sentence).strip()
if_change = False
if_change = True
# 判断句子长度
print("sen", sentence)
if sentence[-1] != ".":
text = f"<|im_start|>user\nTask:Rewrite a sentence\nRewrite the following sentence fragment, ensuring that the meaning remains similar but with significant changes. The length of the rewritten sentence must be greater, not less. Additionally, the words in the short sentences must connect seamlessly with the preceding and following sentences:\n{sentence}\n<|im_end|>\n<|im_start|>assistant\n"
if "" != "":
if sentence[-1] != ".":
text = f"<|im_start|>user\nRewrite the following sentence fragment, ensuring that the meaning remains similar but with significant changes. The length of the rewritten sentence must be greater, not less. Additionally, the words in the short sentences must connect seamlessly with the preceding and following sentences:\n{sentence}\n<|im_end|>\n<|im_start|>assistant\n"
else:
text = f"<|im_start|>user\nRewrite the following sentence, requiring the meaning to be similar but the change to be larger, and the number of words can only be more but not less:\n{sentence}\n<|im_end|>\n<|im_start|>assistant\n"
else:
text = f"<|im_start|>user\nTask:Rewrite a sentence\nRewrite the following sentence, requiring the meaning to be similar but the change to be larger, and the number of words can only be more but not less:\n{sentence}\n<|im_end|>\n<|im_start|>assistant\n"
# text = f"<|im_start|>user\n任务:改写句子\n改写下面这句话,要求不改变原句语义,短句之间衔接不能有误:\n{sentence}\n<|im_end|>\n<|im_start|>assistant\n"
if_change = False
text = f"<|im_start|>user\nHello\n<|im_end|>\n<|im_start|>assistant\n"
return text, if_change
@ -517,7 +530,6 @@ def chinese_ulit(sentence):
text = f"<|im_start|>user\n任务:改写句子\n改写下面半这句话,要求意思接近但是改动幅度比较大,字数只能多不能少,短句前后词跟上下句衔接不能有错误:\n{sentence}\n<|im_end|>\n<|im_start|>assistant\n"
else:
text = f"<|im_start|>user\n任务:改写句子\n改写下面这句话,要求意思接近但是改动幅度比较大,字数只能多不能少:\n{sentence}\n<|im_end|>\n<|im_start|>assistant\n"
# text = f"<|im_start|>user\n任务:改写句子\n改写下面这句话,要求不改变原句语义,短句之间衔接不能有误:\n{sentence}\n<|im_end|>\n<|im_start|>assistant\n"
else:
text = f"<|im_start|>user\n下面句子不做任何变化:\n{sentence}\n<|im_end|>\n<|im_start|>assistant\n"
@ -573,9 +585,7 @@ def main(texts: dict):
for i in text_list:
print("sen", i[0])
text, if_change = pre_sentence_ulit(i[0])
text_sentence.append(text)
text_info.append([i[0], i[1], i[2], if_change])
text_info.append([i[0], i[1], i[2], if_change, text])
# outputs = llm.generate(text_sentence, sampling_params) # 调用模型
#
@ -623,30 +633,14 @@ def main(texts: dict):
# [4, 'http://114.116.25.228:12000/predict', {'texts': '任务:改写句子\n改写下面这句话,要求意思接近但是改动幅度比较大,字数只能多不能少:\n一是新时代“枫桥经验”对'}]
# ]
input_data = []
for i in range(len(text_sentence)):
# input_data.append([i, chatgpt_url, {"texts": text_sentence[i]}])
input_data.append([i, text_sentence[i]])
text_info = get_multiple_urls(text_info)
results = get_multiple_urls(input_data)
generated_text_list = [""] * len(input_data)
for url, result in results:
# print(f"Result for {url}: {result}")
if result != "":
generated_text_list[url[0]] = result
else:
generated_text_list[url[0]] = "Empty character"
for i in range(len(generated_text_list)):
# if len(text_list[i][0]) > 7:
# generated_text_list[i] = post_sentence_ulit(generated_text_list[i])
# else:
# generated_text_list[i] = text_list[i][0]
generated_text_list[i] = post_sentence_ulit(generated_text_list[i], text_info[i])
for i in range(len(text_info)):
text_info[i] = post_sentence_ulit(text_info[i])
for i, j in zip(generated_text_list, text_info):
text_list_new.append([i] + j[1:3])
for i in range(len(text_info)):
text_list_new.append([text_info[i][-1]] + text_info[i][1:3])
return_list = predict_data_post_processing(text_list_new)
return return_list
@ -744,7 +738,7 @@ def handle_query():
return jsonify(return_text)
if isinstance(texts, dict):
id_ = str(uuid.uuid1()) # 为query生成唯一标识
print("uuid: ", uuid)
print("uuid: ", id_)
d = {'id': id_, 'text': texts, "text_type": text_type} # 绑定文本和query id
load_request_path = './request_data_logs/{}.json'.format(id_)

77
flask_predict_no_batch_t5.py

@ -30,6 +30,7 @@ redis_ = redis.Redis(connection_pool=pool, decode_responses=True)
db_key_query = 'query'
db_key_querying = 'querying'
db_key_queryset = 'queryset'
batch_size = 32
app = Flask(__name__)
@ -51,33 +52,6 @@ encoder, decoder, model, tokenizer = generatemodel.device_setup()
autotitle = AutoTitle(encoder, decoder, model, tokenizer, start_id=0, end_id=tokenizer._token_end_id, maxlen=120)
def smtp_f(name):
# 在下面的代码行中使用断点来调试脚本。
import smtplib
from email.mime.text import MIMEText
from email.header import Header
sender = '838878981@qq.com' # 发送邮箱
receivers = ['838878981@qq.com'] # 接收邮箱
auth_code = "jfqtutaiwrtdbcge" # 授权码
message = MIMEText('普通版降重项目出错,紧急', 'plain', 'utf-8')
message['From'] = Header("Sender<%s>" % sender) # 发送者
message['To'] = Header("Receiver<%s>" % receivers[0]) # 接收者
subject = name
message['Subject'] = Header(subject, 'utf-8')
try:
server = smtplib.SMTP_SSL('smtp.qq.com', 465)
server.login(sender, auth_code)
server.sendmail(sender, receivers, message.as_string())
print("邮件发送成功")
server.close()
except smtplib.SMTPException:
print("Error: 无法发送邮件")
class log:
def __init__(self):
pass
@ -367,32 +341,29 @@ def classify(): # 调用模型,设置最大batch_size
@app.route("/predict", methods=["POST"])
def handle_query():
try:
print(request.remote_addr)
texts = request.json["texts"]
text_type = request.json["text_type"]
if texts is None:
return_text = {"texts": "输入了空值", "probabilities": None, "status_code": 402}
return jsonify(return_text)
if isinstance(texts, dict):
id_ = str(uuid.uuid1()) # 为query生成唯一标识
print("uuid: ", uuid)
d = {'id': id_, 'text': texts, "text_type": text_type} # 绑定文本和query id
load_request_path = './request_data_logs/{}.json'.format(id_)
with open(load_request_path, 'w', encoding='utf8') as f2:
# ensure_ascii=False才能输入中文,否则是Unicode字符
# indent=2 JSON数据的缩进,美观
json.dump(d, f2, ensure_ascii=False, indent=4)
redis_.rpush(db_key_query, json.dumps({"id": id_, "path": load_request_path})) # 加入redis
redis_.sadd(db_key_querying, id_)
return_text = {"texts": {'id': id_, }, "probabilities": None, "status_code": 200}
print("ok")
else:
return_text = {"texts": "输入格式应该为字典", "probabilities": None, "status_code": 401}
except:
return_text = {"texts": "项目出错", "probabilities": None, "status_code": 402}
smtp_f("drop_weight_rewrite")
print(request.remote_addr)
texts = request.json["texts"]
text_type = request.json["text_type"]
if texts is None:
return_text = {"texts": "输入了空值", "probabilities": None, "status_code": 402}
return jsonify(return_text)
if isinstance(texts, dict):
id_ = str(uuid.uuid1()) # 为query生成唯一标识
print("uuid: ", uuid)
d = {'id': id_, 'text': texts, "text_type": text_type} # 绑定文本和query id
load_request_path = './request_data_logs/{}.json'.format(id_)
with open(load_request_path, 'w', encoding='utf8') as f2:
# ensure_ascii=False才能输入中文,否则是Unicode字符
# indent=2 JSON数据的缩进,美观
json.dump(d, f2, ensure_ascii=False, indent=4)
redis_.rpush(db_key_query, json.dumps({"id": id_, "path": load_request_path})) # 加入redis
redis_.sadd(db_key_querying, id_)
redis_.sadd(db_key_queryset, id_)
return_text = {"texts": {'id': id_, }, "probabilities": None, "status_code": 200}
print("ok")
else:
return_text = {"texts": "输入格式应该为字典", "probabilities": None, "status_code": 401}
return jsonify(return_text) # 返回结果

2
run_app_nohub_search_redis.sh

@ -1 +1 @@
nohup python redis_check_uuid.py > myout.redis_check_uuid.logs 2>&1 &
nohup python redis_check_uuid_mistral.py > myout.redis_check_uuid_mistral.logs 2>&1 &

Loading…
Cancel
Save