From 062b5f7daf95543d5a78c65edaaaa8e92a432d59 Mon Sep 17 00:00:00 2001 From: "majiahui@haimaqingfan.com" Date: Fri, 24 Nov 2023 16:30:23 +0800 Subject: [PATCH] =?UTF-8?q?ai=E5=88=9B=E6=84=8F=E5=B7=A5=E5=8E=82=E9=9C=80?= =?UTF-8?q?=E8=A6=81=E7=9A=84=E6=8E=A5=E5=8F=A3=E7=BB=84=E5=90=88?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- main.py | 275 +++++++++++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 185 insertions(+), 90 deletions(-) diff --git a/main.py b/main.py index e53a734..9602e93 100644 --- a/main.py +++ b/main.py @@ -15,9 +15,11 @@ app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER RE_CHINA_NUMS = "[1-9].(.*)" # 允许的文件类型 ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'} +fenhao_list = [";", ";"] -prompt_picture = { + +prompt_picture_dict = { "1": "图中的商品:{},有什么突出亮点和卖点,请分条列举出来,要求亮点或者卖点要用一个词总结,冒号后面在进行解释,例如:1. 时尚黑色:图中的鞋子是黑色的,符合时尚潮流,适合不同场合的穿搭。", "2": "图中的商品:{},有什么亮点,写一段营销话语", "3": "图中的商品:{},有以下亮点:\n{}\n根据这些优势亮点,写一段营销文本让商品卖的更好", @@ -26,14 +28,13 @@ prompt_picture = { "6": "根据图中的商品:{},生成一个商品名称,要求商品名称格式中包含的信息(有品牌名,有产品名,有细分产品种类词,比如猫砂,篮球鞋等,有三到五个卖点和形容词)", } -# prompt_text = { -# "1": "图中{}有什么突出亮点,请列举出来", -# "2": "图中{}有什么亮点,写一段营销话语", -# "3": "图中{}有以下亮点:\n{}\n根据这些优势亮点,写一段营销文本让商品买的更好", -# "4": "图中{}有哪些不足之处可以改进?", -# "5": "图中{}的渲染图做哪些调整可以更吸引消费者", -# "5": "图中{}的渲染图做哪些调整可以更吸引消费者", -# } +prompt_text_dict = { + "1": "", + "2": "User:商品名称:{};卖点:{},请帮我生成一个有很多活泼表情的小红书文案,以商品使用者角度来写作,让人感觉真实\nAssistant:", + "3": "图中{}有以下亮点:\n{}\n根据这些优势亮点,写一段营销文本让商品买的更好", + "4": "图中{}有哪些不足之处可以改进?", + "5": "图中{}的渲染图做哪些调整可以更吸引消费者", +} def dialog_line_parse(url, text): @@ -88,7 +89,7 @@ def allowed_file(filename): return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS -def picyure_model_predict(image_path, prompt): +def picture_model_predict(image_path, prompt): # query = tokenizer.from_list_format([ # {'image': image_path}, # {'text': prompt}, @@ -106,16 +107,35 @@ def picyure_model_predict(image_path, prompt): return result -def picture_main(path_list, commodity, type, additional): - if type == "1": - result_list_len = False - dan_result_geshi = True - dan_result_geshi_maohao = True +def text_model_predict(prompt): + # query = tokenizer.from_list_format([ + # {'image': image_path}, + # {'text': prompt}, + # ]) + # + # response, history = model.chat(tokenizer, query=query, history=None) + # return response + + url = "http://192.168.31.74:12000/predict" + data = { + "texts": prompt, + } + result = dialog_line_parse(url, data)["data"] + return result + + - prompy_text = prompt_picture[type] - prompy_text = prompy_text.format(commodity) +def type_1(path_list, commodity, input_type): + code = 200 + result_list_len = False + dan_result_geshi = True + dan_result_geshi_maohao = True + return_list = [] + prompy_text = prompt_picture_dict[input_type] + prompy_text = prompy_text.format(commodity) + for path in path_list: while True: - result = picyure_model_predict(path_list[0], prompy_text) + result = picture_model_predict(path, prompy_text) result_list = str(result).split("\n") result_list = [i for i in result_list if i != ""] if len(result_list) > 3: @@ -137,79 +157,154 @@ def picture_main(path_list, commodity, type, additional): guanjianci = response_re[0].split(":") maidian_list.append([i, guanjianci]) - return maidian_list + return_list.append(maidian_list) + + return code, return_list + +def type_2(path_list, commodity, input_type, additional): + code = 200 + return_list = [] + return_1_data = type_1(path_list, commodity, "1") + maidian = [i[0][1][0] for i in return_1_data] + + fenhao = "" + if additional != "": + for i in fenhao_list: + if i in additional: + fenhao = i + break + + if fenhao == "": + return code, [] + + maidian_user = [i for i in additional.split(fenhao) if i != ""] + maidian += maidian_user + + prompt_text = prompt_text_dict[input_type].format(commodity, "、".join(maidian)) + result = text_model_predict(prompt_text) + return_list.append(result) + return code, return_list - elif type == "2": - prompy_text = prompt_picture[type] - prompy_text = prompy_text.format(commodity, additional) - return_list = [] - for path in path_list: - result = picyure_model_predict(path, prompy_text) - return_list.append(result) - return return_list +def type_3(path_list, commodity, input_type, additional): + code = 200 + return_list = [] + return_1_data = type_1(path_list, commodity, "1") + maidian = [i[0][1][0] for i in return_1_data] + + fenhao = "" + if additional != "": + for i in fenhao_list: + if i in additional: + fenhao = i + break + + if fenhao == "": + return code, [] + + maidian_user = [i for i in additional.split(fenhao) if i != ""] + maidian += maidian_user + + prompt_text = prompt_text_dict[input_type].format(commodity, "、".join(maidian)) + result = text_model_predict(prompt_text) + return_list.append(result) + return code, return_list + +def type_4(path_list, commodity, input_type, additional): + code = 200 + return_list = [] + return_1_data = type_1(path_list, commodity, "1") + maidian = [i[0][1][0] for i in return_1_data] + + fenhao = "" + if additional != "": + for i in fenhao_list: + if i in additional: + fenhao = i + break + + if fenhao == "": + return code, [] + + maidian_user = [i for i in additional.split(fenhao) if i != ""] + maidian += maidian_user + + prompt_text = prompt_text_dict[input_type].format(commodity, "、".join(maidian)) + result = text_model_predict(prompt_text) + return_list.append(result) + return code, return_list + +def type_5(path_list, commodity, input_type): + code = 200 + return_list = [] + prompy_text = prompt_picture_dict[input_type] + prompy_text = prompy_text.format(commodity) + result_list_type = False + + for path in path_list: + while True: + if result_list_type == True: + break + result = picture_model_predict(path, prompy_text) + result_list = str(result).split("\n") + result_list = [i for i in result_list if i != ""] + result_list_new = [] + for i in result_list: + response_re = re.findall(RE_CHINA_NUMS, i) + if response_re == []: + continue + else: + result_list_new.append(i) + if result_list_new != []: + result_list_type = True + return_list.append(result_list_new) + + return code, return_list + +def type_6(path_list, commodity, input_type): + code = 200 + return_list = [] + commodity_list = [] + prompy_text = prompt_picture_dict[input_type] + prompy_text = prompy_text.format(commodity) + for path in path_list: + for i in range(5): + result = picture_model_predict(path, prompy_text) + commodity_list.append(result) + return_list.append(commodity_list) + return code, return_list + + +def type_7(path_list, additional): + code = 200 + prompy_text = additional + return_list = [] + for path in path_list: + result = picture_model_predict(path, prompy_text) + return_list.append(result) + return code, return_list + + +def picture_main(path_list, commodity, input_type, additional): + if input_type == "1": + return type_1(path_list, commodity, input_type) + + elif input_type == "2": + return type_2(path_list, commodity, input_type, additional) # - elif type == "3": - prompy_text = prompt_picture[type] - prompy_text = prompy_text.format(commodity, additional) - return_list = [] - for path in path_list: - result = picyure_model_predict(path, prompy_text) - return_list.append(result) - return return_list + elif input_type == "3": + return type_3(path_list, commodity, input_type, additional) # - elif type == "4": - prompy_text = prompt_picture[type] - prompy_text = prompy_text.format(commodity, additional) - return_list = [] - for path in path_list: - for i in range(5): - result = picyure_model_predict(path, prompy_text) - return_list.append(result) - return return_list - - elif type == "5": - return_list = [] - prompy_text = prompt_picture[type] - prompy_text = prompy_text.format(commodity) - result_list_type = False - - for path in path_list: - while True: - if result_list_type == True: - break - result = picyure_model_predict(path, prompy_text) - result_list = str(result).split("\n") - result_list = [i for i in result_list if i != ""] - result_list_new = [] - for i in result_list: - response_re = re.findall(RE_CHINA_NUMS, i) - if response_re == []: - continue - else: - result_list_new.append(i) - if result_list_new != []: - result_list_type = True - return_list.append(result_list_new) - - return return_list - - elif type == "6": - return_list = [] - prompy_text = prompt_picture[type] - prompy_text = prompy_text.format(commodity) - for path in path_list: - for i in range(5): - result = picyure_model_predict(path, prompy_text) - return_list.append(result) - return return_list - - elif type == "7": - prompy_text = additional - return_list = [] - for path in path_list: - result = picyure_model_predict(path, prompy_text) - return_list.append(result) - return return_list + elif input_type == "4": + return type_4(path_list, commodity, input_type, additional) + + elif input_type == "5": + return type_5(path_list, commodity, input_type) + + elif input_type == "6": + return type_6(path_list, commodity, input_type) + + elif input_type == "7": + return type_7(path_list, additional) else: return "1111" @@ -267,9 +362,9 @@ def upload_file(): result = [] for type_dan in type_list: print("type:", type_dan) - result_dan = picture_main(path_list, commodity, type_dan, additional) + code, result_dan = picture_main(path_list, commodity, type_dan, additional) result.append(result_dan) - return_text = {"texts": result, "probabilities": None, "status_code": 200} + return_text = {"texts": result, "probabilities": None, "status_code": code} except: return_text = {"texts": "运算出错", "probabilities": None, "status_code": 400} log.log('start at',