Browse Source

更新模型问答方式

master
majiahui@haimaqingfan.com 3 months ago
parent
commit
d3606db8bf
  1. 0
      gunicorn_config_mulu_openbuddy.py
  2. 21
      gunicorn_config_openbuddy.py
  3. 21
      gunicorn_config_openbuddy_model_llama_3_2.py
  4. 11
      gunicorn_config_qwen.py
  5. 196
      main_mulu_openbuddy.py
  6. 273
      main_openbuddy.py
  7. 9
      main_openbuddy_llama_3_2_1b.py
  8. 273
      main_qwen.py
  9. 1
      run_api_gunicorn_mulu_openbuddyl.sh
  10. 1
      run_api_gunicorn_openbuddy.sh
  11. 1
      run_api_gunicorn_openbuddy_llama_3_2.sh
  12. 1
      run_api_gunicorn_qwen.sh
  13. 42
      测试公式提取.py

0
gunicorn_config_mulu_model.py → gunicorn_config_mulu_openbuddy.py

21
gunicorn_config_openbuddy.py

@ -0,0 +1,21 @@
# 并行工作线程数
workers = 16
# 监听内网端口5000【按需要更改】
bind = '0.0.0.0:12004'
loglevel = 'debug'
worker_class = "gevent"
# 设置守护进程【关闭连接时,程序仍在运行】
daemon = True
# 设置超时时间120s,默认为30s。按自己的需求进行设置
timeout = 120
# 设置访问日志和错误信息日志路径
accesslog = './logs/acess_openbuddy_model.log'
errorlog = './logs/error_openbuddy_model.log'
# access_log_format = '%(h) - %(t)s - %(u)s - %(s)s %(H)s'
# errorlog = '-' # 记录到标准输出
# 设置最大并发量
worker_connections = 20000

21
gunicorn_config_openbuddy_model_llama_3_2.py

@ -0,0 +1,21 @@
# 并行工作线程数
workers = 16
# 监听内网端口5000【按需要更改】
bind = '0.0.0.0:12014'
loglevel = 'debug'
worker_class = "gevent"
# 设置守护进程【关闭连接时,程序仍在运行】
daemon = True
# 设置超时时间120s,默认为30s。按自己的需求进行设置
timeout = 120
# 设置访问日志和错误信息日志路径
accesslog = './logs/acess_openbuddy_model_llama_3_2.log'
errorlog = './logs/error_openbuddy_model_llama_3_2.log'
# access_log_format = '%(h) - %(t)s - %(u)s - %(s)s %(H)s'
# errorlog = '-' # 记录到标准输出
# 设置最大并发量
worker_connections = 20000

11
gunicorn_config_kto_model.py → gunicorn_config_qwen.py

@ -1,7 +1,7 @@
# 并行工作线程数
workers = 8
workers = 16
# 监听内网端口5000【按需要更改】
bind = '0.0.0.0:12006'
bind = '0.0.0.0:12004'
loglevel = 'debug'
@ -9,13 +9,14 @@ worker_class = "gevent"
# 设置守护进程【关闭连接时,程序仍在运行】
daemon = True
# 设置超时时间120s,默认为30s。按自己的需求进行设置
timeout = 120
timeout = 300
# 设置访问日志和错误信息日志路径
accesslog = './logs/acess_kto_model.log'
errorlog = './logs/error_kto_model.log'
accesslog = './logs/acess.log'
errorlog = './logs/error.log'
# access_log_format = '%(h) - %(t)s - %(u)s - %(s)s %(H)s'
# errorlog = '-' # 记录到标准输出
# 设置最大并发量
worker_connections = 20000

196
main_mulu_openbuddy.py

@ -0,0 +1,196 @@
#coding:utf-8
# 这是一个示例 Python 脚本。
# 按 Shift+F10 执行或将其替换为您的代码。
# 按 双击 Shift 在所有地方搜索类、文件、工具窗口、操作和设置。
# def request_api_chatgpt(api_key, prompt):
# print(api_key)
# print(prompt)
# OPENAI_API_KEY = api_key
# url = "https://api.openai.com/v1/chat/completions"
# # url = "https://one.aiskt.com"
# headers = {
# "Content-Type": "application/json",
# "Authorization": f"Bearer {OPENAI_API_KEY}"
# }
# data = {
# "model": "gpt-4-turbo-preview",
# "messages": [
# {"role": "user", "content": "你好"},
# {"role": "assistant", "content": "你好!有什么我可以帮助你的吗?"},
# # {"role": "user", "content": prompt}
# {"role": "user", "content": "一张信用卡为多个gpt4账号付费会风控吗"}
# ],
# "top_p": 0.9,
# "temperature": 0.95
# }
# response = requests.post(url,
# headers=headers,
# data=json.dumps(data),
# timeout=1200)
#
# return response
from flask import Flask, jsonify
from flask import request
import requests
import time
import socket
import re
app = Flask(__name__)
app.config["JSON_AS_ASCII"] = False
def get_host_ip():
"""
查询本机ip地址
:return: ip
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip
chatgpt_url_predict = "http://{}:12000/predict".format(str(get_host_ip()))
chatgpt_url_search = "http://{}:12000/search".format(str(get_host_ip()))
def request_api_chatgpt(content, model, top_p, temperature):
data = {
"content": content,
"model": model,
"top_p": top_p,
"temperature": temperature
}
response = requests.post(
chatgpt_url_predict,
json=data,
timeout=100000
)
if response.status_code == 200:
return response.json()
else:
# logger.error(
# "【{}】 Failed to get a proper response from remote "
# "server. Status Code: {}. Response: {}"
# "".format(url, response.status_code, response.text)
# )
print("Failed to get a proper response from remote "
"server. Status Code: {}. Response: {}"
"".format(response.status_code, response.text))
return {}
def uuid_search(uuid):
data = {
"id": uuid
}
response = requests.post(
chatgpt_url_search,
json=data,
timeout=100000
)
if response.status_code == 200:
return response.json()
else:
# logger.error(
# "【{}】 Failed to get a proper response from remote "
# "server. Status Code: {}. Response: {}"
# "".format(url, response.status_code, response.text)
# )
print("Failed to get a proper response from remote "
"server. Status Code: {}. Response: {}"
"".format(response.status_code, response.text))
return {}
def uuid_search_mp(results):
results_list = [""] * len(results)
while True:
tiaochu_bool = True
for i in results_list:
if i == "":
tiaochu_bool = False
break
if tiaochu_bool == True:
break
for i in range(len(results)):
uuid = results[i]["texts"]["id"]
result = uuid_search(uuid)
if result["code"] == 200:
results_list[i] = result["text"]
time.sleep(3)
return results_list
@app.route("/predict", methods=["POST"])
def handle_query():
print(request.remote_addr)
model = request.json.get("model")
messages = request.json.get("messages")
top_p = request.json.get("top_p")
temperature = request.json.get("temperature")
# "messages": [
# {"role": "user", "content": "你好"},
# {"role": "assistant", "content": "你好!有什么我可以帮助你的吗?"},
# # {"role": "user", "content": prompt}
# {"role": "user", "content": "一张信用卡为多个gpt4账号付费会风控吗"}
# ],
content = "<|role|>user<|says|>{}<|end|>\n<|role|>assistant<|says|>".format(messages[-1]["content"])
print(model)
print(messages)
print(top_p)
print(temperature)
uid = request_api_chatgpt(content, model, top_p, temperature)
# {
# "probabilities": null,
# "status_code": 200,
# "texts": {
# "id": "29379d06-d08b-11ee-b56d-31fe0a8adccc"
# }
# }
results = uuid_search_mp([uid])[0]
return_text = {
'code': 200,
'id': uid["texts"]["id"],
'object': 0,
'created': 0,
'model': 0,
'choices': [
{
'index': 0,
'message': {
'role': 'assistant',
'content': results
},
'logprobs': None,
'finish_reason': 'stop'
}
],
'usage': 0,
'system_fingerprint': 0
}
return jsonify(return_text)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=12005, threaded=True, debug=False)

273
main_openbuddy.py

@ -0,0 +1,273 @@
#coding:utf-8
# 这是一个示例 Python 脚本。
# 按 Shift+F10 执行或将其替换为您的代码。
# 按 双击 Shift 在所有地方搜索类、文件、工具窗口、操作和设置。
# def request_api_chatgpt(api_key, prompt):
# print(api_key)
# print(prompt)
# OPENAI_API_KEY = api_key
# url = "https://api.openai.com/v1/chat/completions"
# # url = "https://one.aiskt.com"
# headers = {
# "Content-Type": "application/json",
# "Authorization": f"Bearer {OPENAI_API_KEY}"
# }
# data = {
# "model": "gpt-4-turbo-preview",
# "messages": [
# {"role": "user", "content": "你好"},
# {"role": "assistant", "content": "你好!有什么我可以帮助你的吗?"},
# # {"role": "user", "content": prompt}
# {"role": "user", "content": "一张信用卡为多个gpt4账号付费会风控吗"}
# ],
# "top_p": 0.9,
# "temperature": 0.95
# }
# response = requests.post(url,
# headers=headers,
# data=json.dumps(data),
# timeout=1200)
#
# return response
from flask import Flask, jsonify
from flask import request
import requests
import time
import socket
import re
app = Flask(__name__)
app.config["JSON_AS_ASCII"] = False
def get_host_ip():
"""
查询本机ip地址
:return: ip
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip
chatgpt_url_predict = "http://{}:12001/predict".format(str(get_host_ip()))
chatgpt_url_search = "http://{}:12001/search".format(str(get_host_ip()))
def check_problems(input, output):
pantten_formula = r'\\\[.*?\\\]'
pantten_picture = r'<mermaidStart>.*?<mermaidEnd>'
pantten_tb = r'<tbStart>.*?<tbEnd>'
error_data = ""
# 判断是否是小标题任务
if "任务:生成论文小标题内容" in input:
# 判断公式
formula_bool_list = re.findall(pantten_formula, output, re.DOTALL)
tb_bool_list = re.findall(pantten_tb, output, re.DOTALL)
picture_bool_list = re.findall(pantten_picture, output, re.DOTALL)
if "数学公式用\\[\\]进行包裹" not in input and formula_bool_list != []:
error_data += "多生成公式问题:\n"
error_data += "input:\n"
error_data += input
error_data += "output:\n"
error_data += output
error_data += "\n========================================================================\n"
# 判断公式
if "表格部分开始必须用<tbStart>标识,表格部分结束必须用<tbEnd>标识,必须返回html格式的表格" not in input and tb_bool_list != []:
error_data += "多生成表格问题:\n"
error_data += "input:\n"
error_data += input
error_data += "output:\n"
error_data += output
error_data += "\n========================================================================\n"
if "图片要求在文字中插入一张图" not in input and picture_bool_list != []:
error_data += "多生成图片问题:\n"
error_data += "input:\n"
error_data += input
error_data += "output:\n"
error_data += output
error_data += "\n========================================================================\n"
if error_data != "":
with open("logs/error_xiaobiaoti.log", "a", encoding="utf-8") as f:
f.write(error_data)
def return_type(input, output):
pantten_formula = r'\\\[.*?\\\]'
pantten_picture = r'<mermaidStart>.*?<mermaidEnd>'
pantten_tb = r'<tbStart>.*?<tbEnd>'
return_type_list = []
# 判断是否是小标题任务
if "任务:生成论文小标题内容" in input:
# 判断表格
tb_bool_list = re.findall(pantten_tb, output, re.DOTALL)
formula_bool_list = re.findall(pantten_formula, output, re.DOTALL)
picture_bool_list = re.findall(pantten_picture, output, re.DOTALL)
if tb_bool_list != []:
return_type_list.append("1")
if formula_bool_list != []:
return_type_list.append("2")
if picture_bool_list != []:
return_type_list.append("3")
return return_type_list
def request_api_chatgpt(content, model, top_p, temperature):
data = {
"content": content,
"model": model,
"top_p": top_p,
"temperature": temperature
}
response = requests.post(
chatgpt_url_predict,
json=data,
timeout=100000
)
if response.status_code == 200:
return response.json()
else:
# logger.error(
# "【{}】 Failed to get a proper response from remote "
# "server. Status Code: {}. Response: {}"
# "".format(url, response.status_code, response.text)
# )
print("Failed to get a proper response from remote "
"server. Status Code: {}. Response: {}"
"".format(response.status_code, response.text))
return {}
def uuid_search(uuid):
data = {
"id": uuid
}
response = requests.post(
chatgpt_url_search,
json=data,
timeout=100000
)
if response.status_code == 200:
return response.json()
else:
# logger.error(
# "【{}】 Failed to get a proper response from remote "
# "server. Status Code: {}. Response: {}"
# "".format(url, response.status_code, response.text)
# )
print("Failed to get a proper response from remote "
"server. Status Code: {}. Response: {}"
"".format(response.status_code, response.text))
return {}
def uuid_search_mp(results):
results_list = [""] * len(results)
while True:
tiaochu_bool = True
for i in results_list:
if i == "":
tiaochu_bool = False
break
if tiaochu_bool == True:
break
for i in range(len(results)):
uuid = results[i]["texts"]["id"]
result = uuid_search(uuid)
if result["code"] == 200:
results_list[i] = result["text"]
time.sleep(3)
return results_list
@app.route("/predict", methods=["POST"])
def handle_query():
print(request.remote_addr)
model = request.json.get("model")
messages = request.json.get("messages")
top_p = request.json.get("top_p")
temperature = request.json.get("temperature")
print(model)
print(messages)
print(top_p)
print(temperature)
# "messages": [
# {"role": "user", "content": "你好"},
# {"role": "assistant", "content": "你好!有什么我可以帮助你的吗?"},
# # {"role": "user", "content": prompt}
# {"role": "user", "content": "一张信用卡为多个gpt4账号付费会风控吗"}
# ],
# text = "User: " + messages[-1]["content"] + "\nAssistant:"
content = "<|role|>user<|says|>{}<|end|>\n<|role|>assistant<|says|>".format(messages[-1]["content"])
print(model)
print(messages)
print(top_p)
print(temperature)
uid = request_api_chatgpt(content, model, top_p, temperature)
# {
# "probabilities": null,
# "status_code": 200,
# "texts": {
# "id": "29379d06-d08b-11ee-b56d-31fe0a8adccc"
# }
# }
results = uuid_search_mp([uid])[0]
# 检查输入输出
check_problems(messages[0]["content"], results)
return_type_list = return_type(messages[0]["content"], results)
return_text = {
'code': 200,
'id': uid["texts"]["id"],
'object': 0,
'created': 0,
'model': model,
'choices': [
{
'index': 0,
'message': {
'role': 'assistant',
'content': results
},
'logprobs': None,
'finish_reason': 'stop'
}
],
'return_type_list': return_type_list,
'usage': 0,
'system_fingerprint': 0
}
return jsonify(return_text)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=12004, threaded=True, debug=False)

9
main_mulu_model.py → main_openbuddy_llama_3_2_1b.py

@ -57,8 +57,8 @@ def get_host_ip():
return ip
chatgpt_url_predict = "http://{}:12000/predict".format(str(get_host_ip()))
chatgpt_url_search = "http://{}:12000/search".format(str(get_host_ip()))
chatgpt_url_predict = "http://{}:12010/predict".format(str(get_host_ip()))
chatgpt_url_search = "http://{}:12010/search".format(str(get_host_ip()))
def request_api_chatgpt(prompt):
@ -145,7 +145,8 @@ def handle_query():
# # {"role": "user", "content": prompt}
# {"role": "user", "content": "一张信用卡为多个gpt4账号付费会风控吗"}
# ],
text = "User: " + messages[-1]["content"] + "\nAssistant:"
# text = "User: " + messages[-1]["content"] + "\nAssistant:"
text = "<|role|>user<|says|>{}<|end|>\n<|role|>assistant<|says|>".format(messages[-1]["content"])
uid = request_api_chatgpt(text)
# {
@ -183,5 +184,5 @@ def handle_query():
if __name__ == '__main__':
app.run(host="0.0.0.0", port=12005, threaded=True, debug=False)
app.run(host="0.0.0.0", port=12014, threaded=True, debug=False)

273
main_qwen.py

@ -0,0 +1,273 @@
#coding:utf-8
# 这是一个示例 Python 脚本。
# 按 Shift+F10 执行或将其替换为您的代码。
# 按 双击 Shift 在所有地方搜索类、文件、工具窗口、操作和设置。
# def request_api_chatgpt(api_key, prompt):
# print(api_key)
# print(prompt)
# OPENAI_API_KEY = api_key
# url = "https://api.openai.com/v1/chat/completions"
# # url = "https://one.aiskt.com"
# headers = {
# "Content-Type": "application/json",
# "Authorization": f"Bearer {OPENAI_API_KEY}"
# }
# data = {
# "model": "gpt-4-turbo-preview",
# "messages": [
# {"role": "user", "content": "你好"},
# {"role": "assistant", "content": "你好!有什么我可以帮助你的吗?"},
# # {"role": "user", "content": prompt}
# {"role": "user", "content": "一张信用卡为多个gpt4账号付费会风控吗"}
# ],
# "top_p": 0.9,
# "temperature": 0.95
# }
# response = requests.post(url,
# headers=headers,
# data=json.dumps(data),
# timeout=1200)
#
# return response
from flask import Flask, jsonify
from flask import request
import requests
import time
import socket
import re
app = Flask(__name__)
app.config["JSON_AS_ASCII"] = False
def get_host_ip():
"""
查询本机ip地址
:return: ip
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip
chatgpt_url_predict = "http://{}:12001/predict".format(str(get_host_ip()))
chatgpt_url_search = "http://{}:12001/search".format(str(get_host_ip()))
def check_problems(input, output):
pantten_formula = r'\\\[.*?\\\]'
pantten_picture = r'<mermaidStart>.*?<mermaidEnd>'
pantten_tb = r'<tbStart>.*?<tbEnd>'
error_data = ""
# 判断是否是小标题任务
if "任务:生成论文小标题内容" in input:
# 判断公式
formula_bool_list = re.findall(pantten_formula, output, re.DOTALL)
tb_bool_list = re.findall(pantten_tb, output, re.DOTALL)
picture_bool_list = re.findall(pantten_picture, output, re.DOTALL)
if "数学公式用\\[\\]进行包裹" not in input and formula_bool_list != []:
error_data += "多生成公式问题:\n"
error_data += "input:\n"
error_data += input
error_data += "output:\n"
error_data += output
error_data += "\n========================================================================\n"
# 判断公式
if "表格部分开始必须用<tbStart>标识,表格部分结束必须用<tbEnd>标识,必须返回html格式的表格" not in input and tb_bool_list != []:
error_data += "多生成表格问题:\n"
error_data += "input:\n"
error_data += input
error_data += "output:\n"
error_data += output
error_data += "\n========================================================================\n"
if "图片要求在文字中插入一张图" not in input and picture_bool_list != []:
error_data += "多生成图片问题:\n"
error_data += "input:\n"
error_data += input
error_data += "output:\n"
error_data += output
error_data += "\n========================================================================\n"
if error_data != "":
with open("logs/error_xiaobiaoti.log", "a", encoding="utf-8") as f:
f.write(error_data)
def return_type(input, output):
pantten_formula = r'\\\[.*?\\\]'
pantten_picture = r'<mermaidStart>.*?<mermaidEnd>'
pantten_tb = r'<tbStart>.*?<tbEnd>'
return_type_list = []
# 判断是否是小标题任务
if "任务:生成论文小标题内容" in input:
# 判断表格
tb_bool_list = re.findall(pantten_tb, output, re.DOTALL)
formula_bool_list = re.findall(pantten_formula, output, re.DOTALL)
picture_bool_list = re.findall(pantten_picture, output, re.DOTALL)
if tb_bool_list != []:
return_type_list.append("1")
if formula_bool_list != []:
return_type_list.append("2")
if picture_bool_list != []:
return_type_list.append("3")
return return_type_list
def request_api_chatgpt(content, model, top_p, temperature):
data = {
"content": content,
"model": model,
"top_p": top_p,
"temperature": temperature
}
response = requests.post(
chatgpt_url_predict,
json=data,
timeout=100000
)
if response.status_code == 200:
return response.json()
else:
# logger.error(
# "【{}】 Failed to get a proper response from remote "
# "server. Status Code: {}. Response: {}"
# "".format(url, response.status_code, response.text)
# )
print("Failed to get a proper response from remote "
"server. Status Code: {}. Response: {}"
"".format(response.status_code, response.text))
return {}
def uuid_search(uuid):
data = {
"id": uuid
}
response = requests.post(
chatgpt_url_search,
json=data,
timeout=100000
)
if response.status_code == 200:
return response.json()
else:
# logger.error(
# "【{}】 Failed to get a proper response from remote "
# "server. Status Code: {}. Response: {}"
# "".format(url, response.status_code, response.text)
# )
print("Failed to get a proper response from remote "
"server. Status Code: {}. Response: {}"
"".format(response.status_code, response.text))
return {}
def uuid_search_mp(results):
results_list = [""] * len(results)
while True:
tiaochu_bool = True
for i in results_list:
if i == "":
tiaochu_bool = False
break
if tiaochu_bool == True:
break
for i in range(len(results)):
uuid = results[i]["texts"]["id"]
result = uuid_search(uuid)
if result["code"] == 200:
results_list[i] = result["text"]
time.sleep(3)
return results_list
@app.route("/predict", methods=["POST"])
def handle_query():
print(request.remote_addr)
model = request.json.get("model")
messages = request.json.get("messages")
top_p = request.json.get("top_p")
temperature = request.json.get("temperature")
print(model)
print(messages)
print(top_p)
print(temperature)
# "messages": [
# {"role": "user", "content": "你好"},
# {"role": "assistant", "content": "你好!有什么我可以帮助你的吗?"},
# # {"role": "user", "content": prompt}
# {"role": "user", "content": "一张信用卡为多个gpt4账号付费会风控吗"}
# ],
# text = "User: " + messages[-1]["content"] + "\nAssistant:"
content = "<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n".format(messages[-1]["content"])
print(model)
print(messages)
print(top_p)
print(temperature)
uid = request_api_chatgpt(content, model, top_p, temperature)
# {
# "probabilities": null,
# "status_code": 200,
# "texts": {
# "id": "29379d06-d08b-11ee-b56d-31fe0a8adccc"
# }
# }
results = uuid_search_mp([uid])[0]
# 检查输入输出
check_problems(messages[0]["content"], results)
return_type_list = return_type(messages[0]["content"], results)
return_text = {
'code': 200,
'id': uid["texts"]["id"],
'object': 0,
'created': 0,
'model': model,
'choices': [
{
'index': 0,
'message': {
'role': 'assistant',
'content': results
},
'logprobs': None,
'finish_reason': 'stop'
}
],
'return_type_list': return_type_list,
'usage': 0,
'system_fingerprint': 0
}
return jsonify(return_text)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=12004, threaded=True, debug=False)

1
run_api_gunicorn_mulu_openbuddyl.sh

@ -0,0 +1 @@
gunicorn main_mulu_openbuddy.py:app -c gunicorn_config_mulu_openbuddy.py

1
run_api_gunicorn_openbuddy.sh

@ -0,0 +1 @@
gunicorn main_openbuddy:app -c gunicorn_config_openbuddy.py

1
run_api_gunicorn_openbuddy_llama_3_2.sh

@ -0,0 +1 @@
gunicorn main_openbuddy_llama_3_2_1b:app -c gunicorn_config_openbuddy_model_llama_3_2.py

1
run_api_gunicorn_qwen.sh

@ -0,0 +1 @@
gunicorn main_qwen:app -c gunicorn_config_qwen.py

42
测试公式提取.py

@ -0,0 +1,42 @@
# coding:utf-8
import json
import re
pan = r'\\\[.*?\\\]'
# sentence = "2.4.1 时域有限差分原理\n\n时域有限差分(FDTD)方法是一种计算电磁波传播和散射问题的数值分析技术。该方法基于Maxwell方程在时域内的直接求解,通过离散化时间和空间来模拟电磁场的行为。FDTD方法的核心在于将连续的电磁场方程转换为可以在计算机上求解的离散形式。\n\nMaxwell方程的基本形式包括:\n\\[\n\\nabla \\times \\mathbf{E} = -\\frac{\\partial \\mathbf{B}}{\\partial t}\n\\]\n\\[\n\\nabla \\times \\mathbf{H} = \\frac{\\partial \\mathbf{D}}{\\partial t} + \\mathbf{J}\n\\]\n其中,\\(\\mathbf{E}\\) 和 \\(\\mathbf{H}\\) 分别是电场和磁场,\\(\\mathbf{D}\\) 和 \\(\\mathbf{B}\\) 是电位移场和磁感应场,\\(\\mathbf{J}\\) 是电流密度。\n\n在FDTD方法中,空间被划分为一个由Yee格子组成的网格,每个网格点上的电场和磁场分量都被独立更新。时间也被离散化,更新算法交替在电场和磁场之间执行,形成一个可以迭代的计算过程。\n\n更新电场和磁场的离散方程如下:\n\\[\nE_x^{n+1}(i, j, k) = E_x^n(i, j, k) + \\frac{\\Delta t}{\\epsilon} \\left( \\frac{H_z^{n+0.5}(i, j+1, k) - H_z^{n+0.5}(i, j, k)}{\\Delta y} - \\frac{H_y^{n+0.5}(i, j, k+1) - H_y^{n+0.5}(i, j, k)}{\\Delta z} \\right)\n\\]\n\\[\nH_z^{n+0.5}(i, j, k) = H_z^n(i, j, k) - \\frac{\\Delta t}{\\mu} \\left( \\frac{E_x^{n+1}(i, j, k) - E_x^{n+1}(i, j-1, k)}{\\Delta y} - \\frac{E_y^{n+1}(i, j, k) - E_y^{n+1}(i, j, k-1)}{\\Delta x} \\right)\n\\]\n\n<mermaidStart>\nstateDiagram-v2\n [*] --> MaxwellEquations: Maxwell方程\n MaxwellEquations --> YeeGrid: Yee格子划分\n YeeGrid --> UpdateE: 更新电场E\n UpdateE --> UpdateH: 更新磁场H\n UpdateH --> CheckConvergence: 检查收敛性\n CheckConvergence --> [*]\n CheckConvergence --> UpdateE\n<mermaidEnd>\n\n通过上述迭代过程,FDTD方法能够模拟复杂介质中电磁波的传播和相互作用,广泛应用于光学、天线设计、微波工程等领域。该方法的优点在于其直观性和相对简单的实现方式,但同时也存在时间步长和空间步长受稳定性条件限制的缺点。"
sentence = "4.3 德国破产管理人选任和责任追究制度的改革\n\n德国在破产管理人选任和责任追究方面的制度改革,体现了对破产程序透明度和效率的重视。该国通过立法明确了破产管理人的职责范围、选任标准以及违规行为的法律后果,从而提高了破产管理人的专业性和责任感。\n\n在选任方面,德国规定破产管理人必须具备特定的资格和经验。这包括但不限于财务管理、法律知识以及破产程序的理解。此外,破产管理人的选任还需经过相关机构的严格审核,确保其具备履行职责的能力。具体的选任流程可以用以下公式表示:\n\n\\[\n选任流程 = \\frac{申请者资格审核}{专业能力评估} \\times \\frac{背景调查}{最终确认}\n\\]\n\n在责任追究方面,德国法律对破产管理人的行为设定了明确的法律责任。一旦破产管理人未能恰当履行职责,或因疏忽导致破产程序的延误或其他负面影响,将面临民事赔偿责任甚至刑事责任。此外,破产管理人还需向破产清算委员会报告其工作进展和遇到的问题,接受监督和评估。\n\n德国的破产管理人选任和责任追究制度改革,有效提升了破产管理人的专业水平和工作效率,同时也增强了破产程序的公正性和透明度。以下是一个简单的代码示例,展示了如何使用Python来模拟破产管理人选任的过程:\n\n```python\ndef select_bankruptcy_manager(qualifications, experience, legal_knowledge, bankruptcy_program_understanding):\n # 定义选任标准\n qualification_threshold = 0.6\n experience_threshold = 0.5\n legal_knowledge_threshold = 0.7\n bankruptcy_program_understanding_threshold = 0.8\n\n # 计算申请者符合标准的程度\n qualifications_score = max(qualifications, 1) if qualifications >= qualification_threshold else 0\n experience_score = max(experience, 1) if experience >= experience_threshold else 0\n legal_knowledge_score = max(legal_knowledge, 1) if legal_knowledge >= legal_knowledge_threshold else 0\n bankruptcy_program_understanding_score = max(bankruptcy_program_understanding, 1) if bankruptcy_program_understanding >= bankruptcy_program_understanding_threshold else 0\n\n # 综合评分\n total_score = sum([qualifications_score, experience_score, legal_knowledge_score, bankruptcy_program_understanding_score])\n\n # 根据总分决定是否选任\n if total_score >= 4:\n return \"合格\"\n else:\n return \"不合格\"\n\n# 示例应用\napplicant_qualifications = 0.75\napplicant_experience = 0.6\napplicant_legal_knowledge = 0.9\napplicant_bankruptcy_program_understanding = 0.85\nresult = select_bankruptcy_manager(applicant_qualifications, applicant_experience, applicant_legal_knowledge, applicant_bankruptcy_program_understanding)\nprint(result)\n```\n\n通过上述代码,我们可以模拟破产管理人的选任过程,根据申请者的不同资质和经验,计算出其是否符合选任标准。这种方法有助于提高破产管理人选任的科学性和公正性。"
print(re.findall(pan, sentence, re.DOTALL))
# sentence_new = sentence.replace("\\[", "<formulaStart>").replace("\\]", "<formulaEnd>")
# print(sentence_new)
b = '''背景:我是一名博士生,我想写一篇论文。
要求根据论文题目论破产管理人的规制目录是绪论
1.1 研究背景与研究意义
1.2 研究综述
1.3 研究方法与研究思路
破产管理人的规制的理论依据
2.1 破产管理人的特殊性
2.2 破产程序法定主义
2.3 破产管理人的独立性原则
破产管理人监管的现状及问题分析
3.1 监管主体
3.2 监管方式
3.3 监管内容
3.4 监管结果
国外破产管理人规制经验借鉴
4.1 美国破产管理人监督体系的构建
4.2 英国破产管理人和破产清算委员会制度的设立
4.3 德国破产管理人选任和责任追究制度的改革
我国破产管理人监管机制的改进建议
5.1 构建多元主体监督制度
5.2 建立专业化破产管理人队伍
5.3 完善破产管理人入选制度
5.4 制度完善后的保障措施
结论
6.1 研究结论
6.2 研究不足为小标题4.3 德国破产管理人选任和责任追究制度的改革填充1000字左右的中文内容内容第一行返回4.3 德国破产管理人选任和责任追究制度的改革且不包含目录中其他标题禁止出现首先其次等字样必要时候应根据条数罗列这段内容中必须包含公式代码其中数学公式用\[\]进行包裹代码要求在文中合理位置插入一段代码代码要求质量要高'''
print(json.dumps(b,ensure_ascii=False))
Loading…
Cancel
Save