You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
44 lines
2.4 KiB
44 lines
2.4 KiB
![]()
2 years ago
|
import time
|
||
|
|
||
|
from vllm import LLM, SamplingParams
|
||
|
|
||
|
prompts = [
|
||
|
"生成论文小标题内容#问:论文题目是“大学生村官管理研究”,目录是“一、大学生村官管理现状分析\\n1.1 村官数量及分布情况\\n1.2 村官岗位设置及职责\\n1.3 村官工作绩效评估\\n\\n二、大学生村官管理存在的问题\\n2.1 村官队伍结构不合理\\n2.2 村官工作能力不足\\n2.3 村官管理制度不健全\\n\\n三、大学生村官管理对策研究\\n3.1 加强村官队伍建设\\n3.2 提高村官工作能力\\n3.3 完善村官管理制度\\n\\n四、大学生村官管理案例分析\\n4.1 案例一:某村大学生村官工作情况分析\\n4.2 案例二:某村大学生村官管理策略探讨\\n\\n五、大学生村官管理的未来发展趋势\\n5.1 多元化村官队伍建设\\n5.2 信息化村官管理模式\\n5.3 村官职业化发展\\n\\n六、大学生村官管理的政策建议\\n6.1 加强对大学生村官的培训和管理\\n6.2 完善大学生村官管理制度\\n6.3 提高大学生村官的待遇和福利\\n\\n七、结论与展望”,请把其中的小标题“3.3 完善村官管理制度”的内容补充完整,补充内容字数在1500字左右\n答:\n"
|
||
|
]
|
||
|
|
||
|
# prompts = [
|
||
|
# "问:请列出张仲景的所有经方名称\n答:\n"
|
||
|
# ]
|
||
|
|
||
|
sampling_params = SamplingParams(temperature=0.95, top_p=0.7,presence_penalty=0.9,stop="</s>", max_tokens=2048)
|
||
|
|
||
|
models_path = "/home/majiahui/project/models-llm/openbuddy-llama-7b-finetune"
|
||
|
llm = LLM(model=models_path, tokenizer_mode="slow")
|
||
|
|
||
|
t1 = time.time()
|
||
|
outputs = llm.generate(prompts, sampling_params)
|
||
|
|
||
|
# Print the outputs.
|
||
|
zishu = 0
|
||
|
# t2 = time.time()
|
||
|
for i,output in enumerate(outputs):
|
||
|
generated_text = output.outputs[0].text
|
||
|
zishu += len(generated_text)
|
||
|
print("================================================================================")
|
||
|
print(i)
|
||
|
print("=================================================================================")
|
||
|
print(f"Generated text: {generated_text}")
|
||
|
|
||
|
t2 = time.time()
|
||
|
time_cost = t2-t1
|
||
|
print(time_cost)
|
||
|
print("speed", zishu/time_cost)
|
||
|
#
|
||
|
zishu_one = zishu/time_cost
|
||
|
print(f"speed: {zishu_one} tokens/s")
|
||
|
# # from vllm import LLM
|
||
|
# #
|
||
|
# # llm = LLM(model="/home/majiahui/models-LLM/openbuddy-llama-7b-v1.4-fp16") # Name or path of your model
|
||
|
# # output = llm.generate("Hello, my name is")
|
||
|
# # print(output)
|