From c26b5750f0a0d0b57b0c6d8a76cb81b279b4dd0b Mon Sep 17 00:00:00 2001 From: "majiahui@haimaqingfan.com" Date: Wed, 7 Aug 2024 16:44:04 +0800 Subject: [PATCH] =?UTF-8?q?=E4=BC=98=E5=8C=96=E4=BB=A3=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- flask_drop_rewrite_request.py | 301 +++++++++++++++++++++++++++++++++--------- 1 file changed, 236 insertions(+), 65 deletions(-) diff --git a/flask_drop_rewrite_request.py b/flask_drop_rewrite_request.py index c6ae84c..1e81920 100644 --- a/flask_drop_rewrite_request.py +++ b/flask_drop_rewrite_request.py @@ -1,4 +1,3 @@ -# -*- encoding:utf-8 -*- import os from flask import Flask, jsonify from flask import request @@ -12,6 +11,9 @@ import re import logging import concurrent.futures import socket +from sentence_spliter.logic_graph_en import long_cuter_en +from sentence_spliter.automata.state_machine import StateMachine +from sentence_spliter.automata.sequence import EnSequence #调取英文 Sequence logging.basicConfig(level=logging.DEBUG, # 控制台打印的日志级别 @@ -42,6 +44,8 @@ pantten_biaoti_0 = '^[1-9一二三四五六七八九ⅠⅡⅢⅣⅤⅥⅦⅧⅨ] pantten_biaoti_1 = '^第[一二三四五六七八九]章\s{0,}?[\u4e00-\u9fa5a-zA-Z]+' pantten_biaoti_2 = '^[0-9.]+\s{0,}?[\u4e00-\u9fa5a-zA-Z]+' pantten_biaoti_3 = '^[((][1-9一二三四五六七八九ⅠⅡⅢⅣⅤⅥⅦⅧⅨ][)_)][、.]{0,}?\s{0,}?[\u4e00-\u9fa5a-zA-Z]+' +pantten_biaoti_4 = '(摘要)' +pantten_biaoti_5 = '(致谢)' def get_host_ip(): @@ -133,7 +137,6 @@ def dialog_line_parse(url, text): print("【{}】 Failed to get a proper response from remote " "server. Status Code: {}. Response: {}" "".format(url, response.status_code, response.text)) - print(text) return {} @@ -266,17 +269,23 @@ def uuid_search_mp(results): result = uuid_search(uuid) if result["code"] == 200: - results_list[i] = result["text"] + if result["text"] != "": + results_list[i] = result["text"] + else: + results_list[i] = "Empty character" time.sleep(3) return results_list -def get_multiple_urls(urls): +def get_multiple_urls(text_info): input_values = [] + input_index = [] - for i in urls: - input_values.append(i[1]) + for i in range(len(text_info)): + if text_info[i][3] == True: + input_values.append(text_info[i][4]) + input_index.append(i) with concurrent.futures.ThreadPoolExecutor(100) as executor: # 使用map方法并发地调用worker_function results_1 = list(executor.map(request_api_chatgpt, input_values)) @@ -285,15 +294,25 @@ def get_multiple_urls(urls): # 使用map方法并发地调用worker_function results = list(executor.map(uuid_search_mp, [results_1])) - return_list = [] - for i,j in zip(urls, results[0]): - return_list.append([i, j]) - return return_list + # return_list = [] + # for i,j in zip(urls, results[0]): + # return_list.append([i, j]) + return_dict = {} + for i, j in zip(input_index, results[0]): + return_dict[i] = j + + for i in range(len(text_info)): + if i in return_dict: + text_info[i].append(return_dict[i]) + else: + text_info[i].append(text_info[i][0]) + return text_info def chulipangban_test_1(snetence_id, text): # 引号处理 + dialogs_text, dialogs_index, other_index = get_dialogs_index(text) for dialogs_text_dan in dialogs_text: text_dan_list = text.split(dialogs_text_dan) @@ -301,34 +320,57 @@ def chulipangban_test_1(snetence_id, text): # text_new_str = "".join(text_new) - sentence_list = text.split("\n") - # sentence_list_new = [] - # for i in sentence_list: - # if i != "": - # sentence_list_new.append(i) - # sentence_list = sentence_list_new + if has_chinese(text) == False: + spilt_word = ". " + spilt_sen_len = 1e9 + is_chinese = False + else: + spilt_word = "。" + spilt_sen_len = 120 + is_chinese = True + + # 存放整理完的数据 sentence_batch_list = [] - sentence_batch_one = [] - sentence_batch_length = 0 - return_list = [] - for sentence in sentence_list: - if sentence != "": - dan_sentence_list = [i for i in str(sentence).split("。") if i != ""] + if is_chinese == False: + __long_machine_en = StateMachine(long_cuter_en(max_len=25, min_len=3)) + m_input = EnSequence(text) + __long_machine_en.run(m_input) + for v in m_input.sentence_list(): + sentence_batch_list.append([v, snetence_id, 0]) - if len(dan_sentence_list) <= 3: - sentence_batch_list.append([sentence, snetence_id, 0]) + else: + sentence_list = text.split(spilt_word) + # sentence_list_new = [] + # for i in sentence_list: + # if i != "": + # sentence_list_new.append(i) + # sentence_list = sentence_list_new + sentence_batch_length = 0 + + for sentence in sentence_list[:-1]: + if len(sentence) < spilt_sen_len: + sentence_batch_length += len(sentence) + sentence_batch_list.append([sentence + spilt_word, snetence_id, 0]) # sentence_pre = autotitle.gen_synonyms_short(sentence) # return_list.append(sentence_pre) else: - shot_sen = 0 - start = 0 - for end in range(3, len(dan_sentence_list), 3): - sentence_batch_list.append(["。".join(dan_sentence_list[start: end]) + "。", snetence_id, shot_sen]) - start = end - shot_sen += 1 - - sentence_batch_list.append(["。".join(dan_sentence_list[start: len(dan_sentence_list)]) + "。", snetence_id, shot_sen]) + sentence_split_list = chulichangju_1(sentence, snetence_id, [], 0) + for sentence_short in sentence_split_list[:-1]: + sentence_batch_list.append(sentence_short) + sentence_split_list[-1][0] = sentence_split_list[-1][0] + spilt_word + sentence_batch_list.append(sentence_split_list[-1]) + + if sentence_list[-1] != "": + if len(sentence_list[-1]) < spilt_sen_len: + sentence_batch_length += len(sentence_list[-1]) + sentence_batch_list.append([sentence_list[-1], snetence_id, 0]) + # sentence_pre = autotitle.gen_synonyms_short(sentence) + # return_list.append(sentence_pre) + else: + sentence_split_list = chulichangju_1(sentence_list[-1], snetence_id, [], 0) + for sentence_short in sentence_split_list: + sentence_batch_list.append(sentence_short) return sentence_batch_list @@ -343,6 +385,7 @@ def paragraph_test(texts: dict): return text_new + def batch_predict(batch_data_list): ''' 一个bacth数据预测 @@ -371,7 +414,6 @@ def is_chinese(char): def predict_data_post_processing(text_list): - print("text_list", text_list) text_list_sentence = [] # text_list_sentence.append([text_list[0][0], text_list[0][1]]) @@ -405,7 +447,7 @@ def predict_data_post_processing(text_list): # # text_list.extend(i) # # return_list = predict_data_post_processing(text_list) # # return return_list -def post_sentence_ulit(sentence, text_info): +def post_sentence_ulit(text_info): ''' 后处理 :param sentence: @@ -415,24 +457,87 @@ def post_sentence_ulit(sentence, text_info): # generated_text_list[i] = post_sentence_ulit(generated_text_list[i]) # else: # generated_text_list[i] = text_list[i][0] - - sentence = sentence.strip("\n").strip(" ") - return sentence + if_change = text_info[3] + + if if_change == True: + sentence = text_info[-1].strip() + if "改写后:" in sentence: + sentence_lable_index = sentence.index("改写后:") + sentence = sentence[sentence_lable_index + 4:] + # if sentence[-1] == "\n": + # sentence = sentence[:-1] + + sentence = sentence.strip("\n") + # if sentence[-1] != text_info[0][-1]: + # if is_chinese(text_info[0][-1]) == True: + # if is_chinese(sentence[-1]) != True: + # sentence = sentence[:-1] + # else: + # sentence = sentence[:-1] + text_info[0][-1] + else: + sentence = text_info[0] + return text_info[:4] + [sentence] def has_chinese(s): return bool(re.search('[\u4e00-\u9fa5]', s)) +def english_ulit(sentence): + sentence = str(sentence).strip() + if_change = False + + # 判断句子长度 + + if "" != "": + if sentence[-1] != ".": + text = f"User: Rewrite the following sentence fragment, ensuring that the meaning remains similar but with significant changes. The length of the rewritten sentence must be greater, not less. Additionally, the words in the short sentences must connect seamlessly with the preceding and following sentences:\n{sentence}\nAssistant:" + else: + text = f"User: Rewrite the following sentence, requiring the meaning to be similar but the change to be larger, and the number of words can only be more but not less:\n{sentence}\nAssistant:" + else: + if_change = False + text = f"User: Hello\nAssistant:" + + return text, if_change + + +def chinese_ulit(sentence): + max_length = 25 + sentence = str(sentence).strip() + if_change = True + + text = f"User: 任务:降aigc率\n请用偏口语化改写句子,要求改写后的句子与原句差别较大,句子完成重新打乱重新描述,语义上可以有变动,让观点更明确,且内容更丰富,形容词增多,但是不能出现明显的逻辑错误和语法错误,不能有不合理的用词,用词必须符合汉语的常识习惯,但是句子格式和内容变化要大,且必须符合人类的书写习惯,语法上必须要规范,尽量多断句,尽量控制每个短句句子长度,能用同义词替换的部分尽量用同义词替换,在句子开头禁止出现连词,比如“首先”, “其次”等,尽量避免出现模糊表达和不确定性表达,比如“某大学”,“某地”,“某城市”等,改写后的句子长度不能少于原句的,需要改写的句子 “{sentence}”,直返会改写后的句子,不要返回其他内容\nAssistant:" + + # 判断标题 + result_biaoti_list_0 = re.findall(pantten_biaoti_0, sentence) + result_biaoti_list_1 = re.findall(pantten_biaoti_1, sentence) + result_biaoti_list_2 = re.findall(pantten_biaoti_2, sentence) + result_biaoti_list_3 = re.findall(pantten_biaoti_3, sentence) + result_biaoti_list_4 = re.findall(pantten_biaoti_4, sentence.replace(" ", "")) + result_biaoti_list_5 = re.findall(pantten_biaoti_5, sentence.replace(" ", "")) + + if list(set(result_biaoti_list_0 + result_biaoti_list_1 + result_biaoti_list_2 + result_biaoti_list_3)) != []: + if len(sentence) < max_length: + if_change = False + return text, if_change + + elif list(set(result_biaoti_list_4 + result_biaoti_list_5 )) != []: + if_change = False + return text, if_change + + return text, if_change + + def pre_sentence_ulit(sentence): ''' 预处理 :param sentence: :return: ''' - sentence = str(sentence).strip() - if_change = True - - text = f"User: 任务:降aigc率\n请用偏口语化改写句子,要求改写后的句子与原句差别较大,句子完成重新打乱重新描述,语义上可以有变动,让观点更明确,且内容更丰富,形容词增多,但是不能出现明显的逻辑错误和语法错误,不能有不合理的用词,用词必须符合汉语的常识习惯,但是句子格式和内容变化要大,且必须符合人类的书写习惯,语法上必须要规范,尽量多断句,尽量控制每个短句句子长度,能用同义词替换的部分尽量用同义词替换,在句子开头禁止出现连词,比如“首先”, “其次”等,尽量避免出现模糊表达和不确定性表达,比如“某大学”,“某地”,“某城市”等,改写后的句子长度不能少于原句的,需要改写的句子 “{sentence}”,直返会改写后的句子,不要返回其他内容\nAssistant:" + # 判断是否为全英文 + if has_chinese(sentence) == False: + text, if_change = english_ulit(sentence) + else: + text, if_change = chinese_ulit(sentence) return text, if_change @@ -449,40 +554,104 @@ def main(texts: dict): # vllm预测 for i in text_list: + print("sen", i[0]) text, if_change = pre_sentence_ulit(i[0]) - text_sentence.append(text) - text_info.append([i[0], i[1], i[2], if_change]) - - input_data = [] - for i in range(len(text_sentence)): - # input_data.append([i, chatgpt_url, {"texts": text_sentence[i]}]) - input_data.append([i, text_sentence[i]]) - - results = get_multiple_urls(input_data) - - generated_text_list = [""] * len(input_data) - for url, result in results: - # print(f"Result for {url}: {result}") - generated_text_list[url[0]] = result - - for i in range(len(generated_text_list)): - generated_text_list[i] = post_sentence_ulit(generated_text_list[i], text_info[i]) - - for i, j in zip(generated_text_list, text_info): - text_list_new.append([i] + j[1:3]) + text_info.append([i[0], i[1], i[2], if_change, text]) + + # outputs = llm.generate(text_sentence, sampling_params) # 调用模型 + # + # generated_text_list = [""] * len(text_sentence) + # + # # generated_text_list = ["" if len(i[0]) > 5 else i[0] for i in text_list] + # + # for i, output in enumerate(outputs): + # index = output.request_id + # generated_text = output.outputs[0].text + # generated_text_list[int(index)] = generated_text + + ## ============================================================ + # generated_text_list = dialog_line_parse( + # chatgpt_url, + # { + # "texts": text_sentence + # } + # )["data"] + + ## ==================================================================== + + # input_data = [] + # for i in range(len(text_sentence)): + # input_data.append([chatgpt_url, {"texts": text_sentence[i]}]) + # + # with concurrent.futures.ThreadPoolExecutor() as executor: + # # 使用submit方法将任务提交给线程池,并获取Future对象 + # futures = [executor.submit(dialog_line_parse, i[0], i[1]) for i in input_data] + # + # # 使用as_completed获取已完成的任务,并获取返回值 + # results = [future.result() for future in concurrent.futures.as_completed(futures)] + # + # generated_text_list = [] + # for dan in results: + # generated_text_list.append(dan["data"]) + + ## ============================================================================================== + + # urls = [ + # [0, 'http://114.116.25.228:12000/predict', {'texts': '任务:改写句子\n改写下面这句话,要求意思接近但是改动幅度比较大,字数只能多不能少:\n中华苏维埃共和国时期的法律制度建设是中国共产党'}], + # [1, 'http://114.116.25.228:12000/predict', {'texts': '任务:改写句子\n改写下面这句话,要求意思接近但是改动幅度比较大,字数只能多不能少:\n革命道路的实践中,无数革命先辈用鲜血和生命铸就了以坚定信念、求真务实、一心为民、清正廉洁、艰苦奋斗、争创一流、无私奉献等为主要内涵的苏区精神,孕育了'}], + # [2, 'http://114.116.25.228:12000/predict', {'texts': '任务:改写句子\n改写下面这句话,要求意思接近但是改动幅度比较大,字数只能多不能少:\n年前的浙江枫桥干部群众创造了“依靠群众就地化解矛盾”的“枫桥经验”,并根据形势变化不断赋予新的内涵,已成为基层治理的'}], + # [3, 'http://114.116.25.228:12000/predict', {'texts': '任务:改写句子\n改写下面这句话,要求意思接近但是改动幅度比较大,字数只能多不能少:\n新时代,习近平总书记反复强调要坚持和发展新时代“枫桥经验”,加强和创新基层社会治理,切实把矛盾化解在基层,维护好社会稳定。'}], + # [4, 'http://114.116.25.228:12000/predict', {'texts': '任务:改写句子\n改写下面这句话,要求意思接近但是改动幅度比较大,字数只能多不能少:\n一是新时代“枫桥经验”对'}] + # ] + + text_info = get_multiple_urls(text_info) + + + for i in range(len(text_info)): + text_info[i] = post_sentence_ulit(text_info[i]) + + for i in range(len(text_info)): + text_list_new.append([text_info[i][-1]] + text_info[i][1:3]) return_list = predict_data_post_processing(text_list_new) return return_list +# @app.route('/droprepeat/', methods=['POST']) +# def sentence(): +# print(request.remote_addr) +# texts = request.json["texts"] +# text_type = request.json["text_type"] +# print("原始语句" + str(texts)) +# # question = question.strip('。、!??') +# +# if isinstance(texts, dict): +# texts_list = [] +# y_pred_label_list = [] +# position_list = [] +# +# # texts = texts.replace('\'', '\"') +# if texts is None: +# return_text = {"texts": "输入了空值", "probabilities": None, "status_code": False} +# return jsonify(return_text) +# else: +# assert text_type in ['focus', 'chapter'] +# if text_type == 'focus': +# texts_list = main(texts) +# if text_type == 'chapter': +# texts_list = main(texts) +# return_text = {"texts": texts_list, "probabilities": None, "status_code": True} +# else: +# return_text = {"texts": "输入格式应该为list", "probabilities": None, "status_code": False} +# return jsonify(return_text) + + def classify(): # 调用模型,设置最大batch_size while True: if redis_.llen(db_key_query) == 0: # 若队列中没有元素就继续获取 time.sleep(3) continue query = redis_.lpop(db_key_query).decode('UTF-8') # 获取query的text - - print("query", query) data_dict_path = json.loads(query) path = data_dict_path['path'] # text_type = data_dict["text_type"] @@ -499,7 +668,10 @@ def classify(): # 调用模型,设置最大batch_size if text_type == 'focus': texts_list = main(texts) elif text_type == 'chapter': - texts_list = main(texts) + # try: + texts_list = main(texts) + # except: + # texts_list = [] else: texts_list = [] if texts_list != []: @@ -541,7 +713,6 @@ def handle_query(): d = {'id': id_, 'text': texts, "text_type": text_type} # 绑定文本和query id load_request_path = './request_data_logs/{}.json'.format(id_) - print(load_request_path) with open(load_request_path, 'w', encoding='utf8') as f2: # ensure_ascii=False才能输入中文,否则是Unicode字符 # indent=2 JSON数据的缩进,美观