Browse Source

首次提交

master
majiahui@haimaqingfan.com 8 months ago
commit
1c70de6788
  1. 21
      gunicorn_config.py
  2. 21
      gunicorn_config_kto_model.py
  3. 21
      gunicorn_config_mulu_model.py
  4. 187
      main.py
  5. 187
      main_kto_model.py
  6. 187
      main_mulu_model.py
  7. 1
      run_api_gunicorn.sh
  8. 1
      run_api_gunicorn_kto_model.sh
  9. 1
      run_api_gunicorn_mulu_model.sh
  10. 39
      接口说明.md

21
gunicorn_config.py

@ -0,0 +1,21 @@
# 并行工作线程数
workers = 16
# 监听内网端口5000【按需要更改】
bind = '0.0.0.0:12004'
loglevel = 'debug'
worker_class = "gevent"
# 设置守护进程【关闭连接时,程序仍在运行】
daemon = True
# 设置超时时间120s,默认为30s。按自己的需求进行设置
timeout = 300
# 设置访问日志和错误信息日志路径
accesslog = './logs/acess.log'
errorlog = './logs/error.log'
# access_log_format = '%(h) - %(t)s - %(u)s - %(s)s %(H)s'
# errorlog = '-' # 记录到标准输出
# 设置最大并发量
worker_connections = 20000

21
gunicorn_config_kto_model.py

@ -0,0 +1,21 @@
# 并行工作线程数
workers = 8
# 监听内网端口5000【按需要更改】
bind = '0.0.0.0:12006'
loglevel = 'debug'
worker_class = "gevent"
# 设置守护进程【关闭连接时,程序仍在运行】
daemon = True
# 设置超时时间120s,默认为30s。按自己的需求进行设置
timeout = 120
# 设置访问日志和错误信息日志路径
accesslog = './logs/acess_kto_model.log'
errorlog = './logs/error_kto_model.log'
# access_log_format = '%(h) - %(t)s - %(u)s - %(s)s %(H)s'
# errorlog = '-' # 记录到标准输出
# 设置最大并发量
worker_connections = 20000

21
gunicorn_config_mulu_model.py

@ -0,0 +1,21 @@
# 并行工作线程数
workers = 64
# 监听内网端口5000【按需要更改】
bind = '0.0.0.0:12005'
loglevel = 'debug'
worker_class = "gevent"
# 设置守护进程【关闭连接时,程序仍在运行】
daemon = True
# 设置超时时间120s,默认为30s。按自己的需求进行设置
timeout = 120
# 设置访问日志和错误信息日志路径
accesslog = './logs/acess_mulu_model.log'
errorlog = './logs/error_mulu_model.log'
# access_log_format = '%(h) - %(t)s - %(u)s - %(s)s %(H)s'
# errorlog = '-' # 记录到标准输出
# 设置最大并发量
worker_connections = 20000

187
main.py

@ -0,0 +1,187 @@
#coding:utf-8
# 这是一个示例 Python 脚本。
# 按 Shift+F10 执行或将其替换为您的代码。
# 按 双击 Shift 在所有地方搜索类、文件、工具窗口、操作和设置。
# def request_api_chatgpt(api_key, prompt):
# print(api_key)
# print(prompt)
# OPENAI_API_KEY = api_key
# url = "https://api.openai.com/v1/chat/completions"
# # url = "https://one.aiskt.com"
# headers = {
# "Content-Type": "application/json",
# "Authorization": f"Bearer {OPENAI_API_KEY}"
# }
# data = {
# "model": "gpt-4-turbo-preview",
# "messages": [
# {"role": "user", "content": "你好"},
# {"role": "assistant", "content": "你好!有什么我可以帮助你的吗?"},
# # {"role": "user", "content": prompt}
# {"role": "user", "content": "一张信用卡为多个gpt4账号付费会风控吗"}
# ],
# "top_p": 0.9,
# "temperature": 0.95
# }
# response = requests.post(url,
# headers=headers,
# data=json.dumps(data),
# timeout=1200)
#
# return response
from flask import Flask, jsonify
from flask import request
import requests
import time
import socket
app = Flask(__name__)
app.config["JSON_AS_ASCII"] = False
def get_host_ip():
"""
查询本机ip地址
:return: ip
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip
chatgpt_url_predict = "http://{}:12001/predict".format(str(get_host_ip()))
chatgpt_url_search = "http://{}:12001/search".format(str(get_host_ip()))
def request_api_chatgpt(prompt):
data = {
"texts": prompt
}
response = requests.post(
chatgpt_url_predict,
json=data,
timeout=100000
)
if response.status_code == 200:
return response.json()
else:
# logger.error(
# "【{}】 Failed to get a proper response from remote "
# "server. Status Code: {}. Response: {}"
# "".format(url, response.status_code, response.text)
# )
print("Failed to get a proper response from remote "
"server. Status Code: {}. Response: {}"
"".format(response.status_code, response.text))
return {}
def uuid_search(uuid):
data = {
"id": uuid
}
response = requests.post(
chatgpt_url_search,
json=data,
timeout=100000
)
if response.status_code == 200:
return response.json()
else:
# logger.error(
# "【{}】 Failed to get a proper response from remote "
# "server. Status Code: {}. Response: {}"
# "".format(url, response.status_code, response.text)
# )
print("Failed to get a proper response from remote "
"server. Status Code: {}. Response: {}"
"".format(response.status_code, response.text))
return {}
def uuid_search_mp(results):
results_list = [""] * len(results)
while True:
tiaochu_bool = True
for i in results_list:
if i == "":
tiaochu_bool = False
break
if tiaochu_bool == True:
break
for i in range(len(results)):
uuid = results[i]["texts"]["id"]
result = uuid_search(uuid)
if result["code"] == 200:
results_list[i] = result["text"]
time.sleep(3)
return results_list
@app.route("/predict", methods=["POST"])
def handle_query():
print(request.remote_addr)
model = request.json.get("model")
messages = request.json.get("messages")
top_p = request.json.get("top_p")
temperature = request.json.get("temperature")
# "messages": [
# {"role": "user", "content": "你好"},
# {"role": "assistant", "content": "你好!有什么我可以帮助你的吗?"},
# # {"role": "user", "content": prompt}
# {"role": "user", "content": "一张信用卡为多个gpt4账号付费会风控吗"}
# ],
text = "User: " + messages[-1]["content"] + "\nAssistant:"
uid = request_api_chatgpt(text)
# {
# "probabilities": null,
# "status_code": 200,
# "texts": {
# "id": "29379d06-d08b-11ee-b56d-31fe0a8adccc"
# }
# }
results = uuid_search_mp([uid])[0]
return_text = {
'code': 200,
'id': uid["texts"]["id"],
'object': 0,
'created': 0,
'model': 0,
'choices': [
{
'index': 0,
'message': {
'role': 'assistant',
'content': results
},
'logprobs': None,
'finish_reason': 'stop'
}
],
'usage': 0,
'system_fingerprint': 0
}
return jsonify(return_text)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=12004, threaded=True, debug=False)

187
main_kto_model.py

@ -0,0 +1,187 @@
#coding:utf-8
# 这是一个示例 Python 脚本。
# 按 Shift+F10 执行或将其替换为您的代码。
# 按 双击 Shift 在所有地方搜索类、文件、工具窗口、操作和设置。
# def request_api_chatgpt(api_key, prompt):
# print(api_key)
# print(prompt)
# OPENAI_API_KEY = api_key
# url = "https://api.openai.com/v1/chat/completions"
# # url = "https://one.aiskt.com"
# headers = {
# "Content-Type": "application/json",
# "Authorization": f"Bearer {OPENAI_API_KEY}"
# }
# data = {
# "model": "gpt-4-turbo-preview",
# "messages": [
# {"role": "user", "content": "你好"},
# {"role": "assistant", "content": "你好!有什么我可以帮助你的吗?"},
# # {"role": "user", "content": prompt}
# {"role": "user", "content": "一张信用卡为多个gpt4账号付费会风控吗"}
# ],
# "top_p": 0.9,
# "temperature": 0.95
# }
# response = requests.post(url,
# headers=headers,
# data=json.dumps(data),
# timeout=1200)
#
# return response
from flask import Flask, jsonify
from flask import request
import requests
import time
import socket
app = Flask(__name__)
app.config["JSON_AS_ASCII"] = False
def get_host_ip():
"""
查询本机ip地址
:return: ip
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip
chatgpt_url_predict = "http://{}:12002/predict".format(str(get_host_ip()))
chatgpt_url_search = "http://{}:12002/search".format(str(get_host_ip()))
def request_api_chatgpt(prompt):
data = {
"texts": prompt
}
response = requests.post(
chatgpt_url_predict,
json=data,
timeout=100000
)
if response.status_code == 200:
return response.json()
else:
# logger.error(
# "【{}】 Failed to get a proper response from remote "
# "server. Status Code: {}. Response: {}"
# "".format(url, response.status_code, response.text)
# )
print("Failed to get a proper response from remote "
"server. Status Code: {}. Response: {}"
"".format(response.status_code, response.text))
return {}
def uuid_search(uuid):
data = {
"id": uuid
}
response = requests.post(
chatgpt_url_search,
json=data,
timeout=100000
)
if response.status_code == 200:
return response.json()
else:
# logger.error(
# "【{}】 Failed to get a proper response from remote "
# "server. Status Code: {}. Response: {}"
# "".format(url, response.status_code, response.text)
# )
print("Failed to get a proper response from remote "
"server. Status Code: {}. Response: {}"
"".format(response.status_code, response.text))
return {}
def uuid_search_mp(results):
results_list = [""] * len(results)
while True:
tiaochu_bool = True
for i in results_list:
if i == "":
tiaochu_bool = False
break
if tiaochu_bool == True:
break
for i in range(len(results)):
uuid = results[i]["texts"]["id"]
result = uuid_search(uuid)
if result["code"] == 200:
results_list[i] = result["text"]
time.sleep(3)
return results_list
@app.route("/predict", methods=["POST"])
def handle_query():
print(request.remote_addr)
model = request.json.get("model")
messages = request.json.get("messages")
top_p = request.json.get("top_p")
temperature = request.json.get("temperature")
# "messages": [
# {"role": "user", "content": "你好"},
# {"role": "assistant", "content": "你好!有什么我可以帮助你的吗?"},
# # {"role": "user", "content": prompt}
# {"role": "user", "content": "一张信用卡为多个gpt4账号付费会风控吗"}
# ],
text = "User: " + messages[-1]["content"] + "\nAssistant:"
uid = request_api_chatgpt(text)
# {
# "probabilities": null,
# "status_code": 200,
# "texts": {
# "id": "29379d06-d08b-11ee-b56d-31fe0a8adccc"
# }
# }
results = uuid_search_mp([uid])[0]
return_text = {
'code': 200,
'id': uid["texts"]["id"],
'object': 0,
'created': 0,
'model': 0,
'choices': [
{
'index': 0,
'message': {
'role': 'assistant',
'content': results
},
'logprobs': None,
'finish_reason': 'stop'
}
],
'usage': 0,
'system_fingerprint': 0
}
return jsonify(return_text)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=12006, threaded=True, debug=False)

187
main_mulu_model.py

@ -0,0 +1,187 @@
#coding:utf-8
# 这是一个示例 Python 脚本。
# 按 Shift+F10 执行或将其替换为您的代码。
# 按 双击 Shift 在所有地方搜索类、文件、工具窗口、操作和设置。
# def request_api_chatgpt(api_key, prompt):
# print(api_key)
# print(prompt)
# OPENAI_API_KEY = api_key
# url = "https://api.openai.com/v1/chat/completions"
# # url = "https://one.aiskt.com"
# headers = {
# "Content-Type": "application/json",
# "Authorization": f"Bearer {OPENAI_API_KEY}"
# }
# data = {
# "model": "gpt-4-turbo-preview",
# "messages": [
# {"role": "user", "content": "你好"},
# {"role": "assistant", "content": "你好!有什么我可以帮助你的吗?"},
# # {"role": "user", "content": prompt}
# {"role": "user", "content": "一张信用卡为多个gpt4账号付费会风控吗"}
# ],
# "top_p": 0.9,
# "temperature": 0.95
# }
# response = requests.post(url,
# headers=headers,
# data=json.dumps(data),
# timeout=1200)
#
# return response
from flask import Flask, jsonify
from flask import request
import requests
import time
import socket
app = Flask(__name__)
app.config["JSON_AS_ASCII"] = False
def get_host_ip():
"""
查询本机ip地址
:return: ip
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip
chatgpt_url_predict = "http://{}:12000/predict".format(str(get_host_ip()))
chatgpt_url_search = "http://{}:12000/search".format(str(get_host_ip()))
def request_api_chatgpt(prompt):
data = {
"texts": prompt
}
response = requests.post(
chatgpt_url_predict,
json=data,
timeout=100000
)
if response.status_code == 200:
return response.json()
else:
# logger.error(
# "【{}】 Failed to get a proper response from remote "
# "server. Status Code: {}. Response: {}"
# "".format(url, response.status_code, response.text)
# )
print("Failed to get a proper response from remote "
"server. Status Code: {}. Response: {}"
"".format(response.status_code, response.text))
return {}
def uuid_search(uuid):
data = {
"id": uuid
}
response = requests.post(
chatgpt_url_search,
json=data,
timeout=100000
)
if response.status_code == 200:
return response.json()
else:
# logger.error(
# "【{}】 Failed to get a proper response from remote "
# "server. Status Code: {}. Response: {}"
# "".format(url, response.status_code, response.text)
# )
print("Failed to get a proper response from remote "
"server. Status Code: {}. Response: {}"
"".format(response.status_code, response.text))
return {}
def uuid_search_mp(results):
results_list = [""] * len(results)
while True:
tiaochu_bool = True
for i in results_list:
if i == "":
tiaochu_bool = False
break
if tiaochu_bool == True:
break
for i in range(len(results)):
uuid = results[i]["texts"]["id"]
result = uuid_search(uuid)
if result["code"] == 200:
results_list[i] = result["text"]
time.sleep(3)
return results_list
@app.route("/predict", methods=["POST"])
def handle_query():
print(request.remote_addr)
model = request.json.get("model")
messages = request.json.get("messages")
top_p = request.json.get("top_p")
temperature = request.json.get("temperature")
# "messages": [
# {"role": "user", "content": "你好"},
# {"role": "assistant", "content": "你好!有什么我可以帮助你的吗?"},
# # {"role": "user", "content": prompt}
# {"role": "user", "content": "一张信用卡为多个gpt4账号付费会风控吗"}
# ],
text = "User: " + messages[-1]["content"] + "\nAssistant:"
uid = request_api_chatgpt(text)
# {
# "probabilities": null,
# "status_code": 200,
# "texts": {
# "id": "29379d06-d08b-11ee-b56d-31fe0a8adccc"
# }
# }
results = uuid_search_mp([uid])[0]
return_text = {
'code': 200,
'id': uid["texts"]["id"],
'object': 0,
'created': 0,
'model': 0,
'choices': [
{
'index': 0,
'message': {
'role': 'assistant',
'content': results
},
'logprobs': None,
'finish_reason': 'stop'
}
],
'usage': 0,
'system_fingerprint': 0
}
return jsonify(return_text)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=12005, threaded=True, debug=False)

1
run_api_gunicorn.sh

@ -0,0 +1 @@
gunicorn main:app -c gunicorn_config.py

1
run_api_gunicorn_kto_model.sh

@ -0,0 +1 @@
gunicorn main_kto_model:app -c gunicorn_config_kto_model.py

1
run_api_gunicorn_mulu_model.sh

@ -0,0 +1 @@
gunicorn main_mulu_model:app -c gunicorn_config_mulu_model.py

39
接口说明.md

@ -0,0 +1,39 @@
# gpt 平替接口
请求示例:
```
requests.post("http://101.37.83.210:12004",
headers=headers,
data=json.dumps({
"model": "gpt-4-turbo-preview",
"messages": [
{"role": "user", "content": "你好"},
{"role": "assistant", "content": "你好!有什么我可以帮助你的吗?"},
{"role": "user", "content": prompt}
],
"top_p": 0.9,
"temperature": 0.95,
}),
timeout=1200)
```
# gpt 生成论文接口
请求示例:
```
requests.post("http://101.37.83.210:12005",
headers=headers,
data=json.dumps({
"model": "gpt-4-turbo-preview",
"messages": [
{"role": "user", "content": "你好"},
{"role": "assistant", "content": "你好!有什么我可以帮助你的吗?"},
{"role": "user", "content": prompt}
],
"top_p": 0.9,
"temperature": 0.95,
"table_and_formulas": "1,2"
}),
timeout=1200)
```
Loading…
Cancel
Save