You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
101 lines
4.2 KiB
101 lines
4.2 KiB
# coding:utf8
|
|
import os, sys
|
|
import io
|
|
|
|
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf8')
|
|
cur_dir = os.path.dirname(os.path.abspath(__file__)) or os.getcwd()
|
|
par_dir = os.path.abspath(os.path.join(cur_dir, os.path.pardir))
|
|
sys.path.append(cur_dir)
|
|
sys.path.append(par_dir)
|
|
import json
|
|
from django.http import HttpResponse
|
|
from text_analysis.tools import to_kafka
|
|
from django.views.decorators.csrf import csrf_exempt
|
|
from log_util.set_logger import set_logger
|
|
|
|
logging = set_logger('logs/results.log')
|
|
import traceback
|
|
import queue
|
|
import requests
|
|
from text_analysis.tools.tool import get_content
|
|
import uuid
|
|
|
|
import time
|
|
|
|
global task_queue
|
|
task_queue = queue.Queue()
|
|
|
|
|
|
@csrf_exempt
|
|
def chatGptNew(request):
|
|
if request.method == 'POST':
|
|
try:
|
|
# txt=request.body.encode("utf-8")
|
|
raw_data = json.loads(request.body)
|
|
task_queue.put(raw_data)
|
|
return HttpResponse(json.dumps({"code": 1, "msg": "请求正常!"}, ensure_ascii=False))
|
|
except:
|
|
logging.error(traceback.format_exc())
|
|
return HttpResponse(json.dumps({"code": 0, "msg": "请求json格式不正确!"}, ensure_ascii=False))
|
|
else:
|
|
return HttpResponse(json.dumps({"code": 0, "msg": "请求方式错误,改为post请求"}, ensure_ascii=False))
|
|
|
|
|
|
def chatgpt():
|
|
while True:
|
|
if task_queue.qsize() > 0:
|
|
# try:
|
|
logging.info("取任务队列长度{}".format(task_queue.qsize()))
|
|
raw_data = task_queue.get()
|
|
try:
|
|
output = raw_data["output"]
|
|
res_tmp = {key: "" for key in output}
|
|
if "id" in res_tmp.keys():
|
|
res_tmp["id"] = str(uuid.uuid4())
|
|
data = get_content(raw_data, logging)
|
|
url = "https://api.openai.com/v1/chat/completions"
|
|
headers = {
|
|
"Content-Type": "application/json;charset=UTF-8",
|
|
"Authorization": "Bearer " + data["authorization"]
|
|
}
|
|
payload = json.dumps({
|
|
"model": data["model"],
|
|
"messages": [{"role": "user", "content": data["prompt"]}],
|
|
"temperature": float(data["temperature"]),
|
|
"top_p": float(data["top_p"]),
|
|
"n": int(data["n"])
|
|
})
|
|
logging.info("prompt为{}".format(data["prompt"]))
|
|
response = requests.request("POST", url, headers=headers, data=payload)
|
|
logging.info("GPT返回值:{}-{}".format(response,response.text))
|
|
d = json.loads(response.text)
|
|
result = d['choices'][0]['message']['content']
|
|
res_tmp["content"] = result
|
|
res_tmp_json = json.dumps(res_tmp, ensure_ascii=False)
|
|
raw_data["result"] = {"successCode": "1", "errorLog": "", "results": res_tmp_json}
|
|
# print(raw_data)
|
|
#logging.info(raw_data)
|
|
to_kafka.send_kafka(raw_data, logging)
|
|
except:
|
|
raw_data["result"] = {"successCode": "0", "errorLog": "", "results": ""}
|
|
raw_data["result"]["errorLog"] = traceback.format_exc()
|
|
res_tmp_json = json.dumps(res_tmp, ensure_ascii=False)
|
|
raw_data["result"]["results"] = res_tmp_json
|
|
logging.info("调用gpt失败{}-{}".format(raw_data, traceback.format_exc()))
|
|
to_kafka.send_kafka(raw_data, logging)
|
|
# except:
|
|
# raw_data["result"] = {"successCode": "0", "errorLog": "", "results": ""}
|
|
# if response and response.text:
|
|
# raw_data["result"]["errorLog"] = response.text
|
|
# else:
|
|
# raw_data["result"]["errorLog"] = traceback.format_exc()
|
|
# res_tmp_json = json.dumps(res_tmp, ensure_ascii=False)
|
|
# raw_data["result"]["results"]=res_tmp_json
|
|
# logging.info("解析失败{}-{}".format(raw_data, traceback.format_exc()))
|
|
# to_kafka.send_kafka(raw_data, logging)
|
|
else:
|
|
# logging.info("暂无任务,进入休眠--")
|
|
time.sleep(10)
|
|
|
|
|
|
|