You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
120 lines
4.6 KiB
120 lines
4.6 KiB
# -*- coding: utf-8 -*-
|
|
|
|
"""
|
|
@Time : 2022/8/15 15:20
|
|
@Author :
|
|
@FileName:
|
|
@Software:
|
|
@Describe:
|
|
"""
|
|
import json
|
|
import numpy as np
|
|
import pandas as pd
|
|
from tqdm import tqdm
|
|
from bert4keras.backend import keras, K
|
|
from bert4keras.layers import Loss
|
|
from bert4keras.models import build_transformer_model
|
|
from bert4keras.tokenizers import SpTokenizer
|
|
from bert4keras.optimizers import Adam
|
|
from bert4keras.snippets import sequence_padding, open
|
|
from bert4keras.snippets import DataGenerator, AutoRegressiveDecoder
|
|
from keras.models import Model
|
|
from rouge import Rouge # pip install rouge
|
|
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
|
|
|
|
|
|
class Evaluator(keras.callbacks.Callback):
|
|
"""评估与保存
|
|
"""
|
|
def __init__(self):
|
|
self.rouge = Rouge()
|
|
self.smooth = SmoothingFunction().method1
|
|
self.best_bleu = 0.
|
|
|
|
# def on_epoch_end(self, epoch, logs=None):
|
|
# metrics = self.evaluate(valid_data) # 评测模型
|
|
# if metrics['bleu'] > self.best_bleu:
|
|
# self.best_bleu = metrics['bleu']
|
|
# model.save_weights('./best_model.weights') # 保存模型
|
|
# metrics['best_bleu'] = self.best_bleu
|
|
# print('valid_data:', metrics)
|
|
|
|
def evaluate(self, data, topk=1):
|
|
total = 0
|
|
rouge_1, rouge_2, rouge_l, bleu = 0, 0, 0, 0
|
|
for title, content in tqdm(data):
|
|
total += 1
|
|
title = ' '.join(title).lower()
|
|
pred_title = ' '.join(autotitle.generate(content,
|
|
topk=topk)).lower()
|
|
if pred_title.strip():
|
|
scores = self.rouge.get_scores(hyps=pred_title, refs=title)
|
|
rouge_1 += scores[0]['rouge-1']['f']
|
|
rouge_2 += scores[0]['rouge-2']['f']
|
|
rouge_l += scores[0]['rouge-l']['f']
|
|
bleu += sentence_bleu(
|
|
references=[title.split(' ')],
|
|
hypothesis=pred_title.split(' '),
|
|
smoothing_function=self.smooth
|
|
)
|
|
rouge_1 /= total
|
|
rouge_2 /= total
|
|
rouge_l /= total
|
|
bleu /= total
|
|
return {
|
|
'rouge-1': rouge_1,
|
|
'rouge-2': rouge_2,
|
|
'rouge-l': rouge_l,
|
|
'bleu': bleu,
|
|
}
|
|
|
|
def evaluate_t(self, data_1, data_2, topk=1):
|
|
total = 0
|
|
rouge_1, rouge_2, rouge_l, bleu = 0, 0, 0, 0
|
|
|
|
scores = self.rouge.get_scores(hyps=[data_1], refs=[data_2])
|
|
rouge_1 += scores[0]['rouge-1']['f']
|
|
rouge_2 += scores[0]['rouge-2']['f']
|
|
rouge_l += scores[0]['rouge-l']['f']
|
|
bleu += sentence_bleu(
|
|
references=[data_1.split(' ')],
|
|
hypothesis=data_2.split(' '),
|
|
smoothing_function=self.smooth
|
|
)
|
|
# rouge_1 /= total
|
|
# rouge_2 /= total
|
|
# rouge_l /= total
|
|
# bleu /= total
|
|
return [rouge_1, rouge_2, rouge_l, bleu]
|
|
|
|
|
|
eval_class = Evaluator()
|
|
|
|
# print(eval_class.evaluate_t("星 辰 的 话","星 辰 的 话 :"))
|
|
path = "data/一万字小说测试效果.xlsx"
|
|
path_out = "data/一万字小说测试效果测评.csv"
|
|
data = pd.read_excel(path).values.tolist()
|
|
data_new = {"rouge_1": [0,0,0,0,0,0,0,0,0,0,0],
|
|
"rouge_2": [0,0,0,0,0,0,0,0,0,0,0],
|
|
"rouge_l": [0,0,0,0,0,0,0,0,0,0,0],
|
|
"bleu": [0,0,0,0,0,0,0,0,0,0,0]}
|
|
total = 0
|
|
|
|
|
|
for i in data:
|
|
dan_list = i[2:-1]
|
|
for j in range(len(dan_list)):
|
|
eval_list = eval_class.evaluate_t(' '.join(dan_list[j]), ' '.join(i[-1]))
|
|
data_new["rouge_1"][j] += eval_list[0]
|
|
data_new["rouge_2"][j] += eval_list[1]
|
|
data_new["rouge_l"][j] += eval_list[2]
|
|
data_new["bleu"][j] += eval_list[3]
|
|
|
|
'''
|
|
生成文本(t5_未修正数据) 生成文本(unilm未修正数据) 生成文本(unilm修正数据) 生成文本(unilm修正数据_预训练) 生成文本(240w/24H) 生成文本(240W/48H) 生成文本(240W/24H/修) 生成文本(全部数据/72H/修) 生成文本(全部数据/72H/未修) 生成文本(t5修正数据) 生成文本(t5修正数据_190epoch)
|
|
|
|
'''
|
|
|
|
pd.DataFrame(data_new,index=["生成文本(t5_未修正数据)","生成文本(unilm未修正数据)","生成文本(unilm修正数据)",
|
|
"生成文本(unilm修正数据_预训练)","生成文本(240w/24H)","生成文本(240W/48H)","生成文本(240W/24H/修)",
|
|
"生成文本(全部数据/72H/修)", "生成文本(全部数据/72H/未修)", "生成文本(t5修正数据)", "生成文本(t5修正数据_190epoch)"]).to_csv(path_out)
|