千问开源大模型
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

149 lines
6.7 KiB

  1. # coding:utf8
  2. import os, sys
  3. import io
  4. sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf8')
  5. cur_dir = os.path.dirname(os.path.abspath(__file__)) or os.getcwd()
  6. par_dir = os.path.abspath(os.path.join(cur_dir, os.path.pardir))
  7. sys.path.append(cur_dir)
  8. sys.path.append(par_dir)
  9. import json
  10. from django.http import HttpResponse
  11. from text_analysis.tools import to_kafka
  12. from django.views.decorators.csrf import csrf_exempt
  13. from log_util.set_logger import set_logger
  14. from openai import OpenAI
  15. logging = set_logger('logs/results.log')
  16. import traceback
  17. import queue
  18. import requests
  19. from text_analysis.tools.tool import get_content,parse_gptResult
  20. import uuid
  21. import time
  22. from kazoo.client import KazooClient
  23. from kazoo.protocol.states import EventType
  24. # openai_api_key = "EMPTY"
  25. # openai_api_base = "http://10.0.32.225:9000/v1"
  26. # client = OpenAI(api_key=openai_api_key,base_url=openai_api_base)
  27. task_queue = queue.Queue()
  28. stop_dict={}
  29. @csrf_exempt
  30. def QwenModel(request):
  31. if request.method == 'POST':
  32. try:
  33. # txt=request.body.encode("utf-8")
  34. raw_data = json.loads(request.body)
  35. task_queue.put(raw_data)
  36. return HttpResponse(json.dumps({"code": 1, "msg": "请求正常!"}, ensure_ascii=False))
  37. except:
  38. logging.error(traceback.format_exc())
  39. return HttpResponse(json.dumps({"code": 0, "msg": "请求json格式不正确!"}, ensure_ascii=False))
  40. else:
  41. return HttpResponse(json.dumps({"code": 0, "msg": "请求方式错误,改为post请求"}, ensure_ascii=False))
  42. def Qwen():
  43. while True:
  44. if task_queue.qsize() > 0:
  45. try:
  46. logging.info("取任务队列长度{}".format(task_queue.qsize()))
  47. raw_data = task_queue.get()
  48. output = raw_data["output"]
  49. res_tmp = {key: "" for key in output}
  50. if "id" in res_tmp.keys():
  51. res_tmp["id"] = str(uuid.uuid4())
  52. task_id=raw_data["scenes_id"]
  53. task_version=raw_data["version"]
  54. logging.info("任务数据为:{}".format(raw_data))
  55. logging.info("当前version信息为:{}".format(stop_dict))
  56. if task_id in stop_dict.keys() and task_version!=stop_dict[task_id]["version"]:
  57. logging.info("已暂停任务,过滤掉。{}".format(raw_data))
  58. continue
  59. data = get_content(raw_data, logging)
  60. # logging.info("请求信息为{},解析后模型请求为{}".format(raw_data,data))
  61. url="http://10.0.32.225:9000/v1/chat/completions"
  62. headers = {
  63. "Content-Type": "application/json;charset=UTF-8"
  64. }
  65. payload = json.dumps({
  66. "model":"Qwen2-72B-Instruct-GPTQ-Int4",
  67. "messages": [{"role": "user", "content": data["prompt"]}],
  68. "temperature": float(data["temperature"]),
  69. "top_p": float(data["top_p"]),
  70. "n": int(data["n"])
  71. })
  72. # logging.info("prompt为{}".format(data["prompt"]))
  73. response = requests.request("POST", url, headers=headers, data=payload,timeout=180)
  74. logging.info("Prompt为:{}—Qwen返回值:{}-{}".format(data["prompt"],response,response.text))
  75. d = json.loads(response.text)
  76. result = d['choices'][0]['message']['content']
  77. # response = client.chat.completions.create(
  78. # model="Qwen2-72B-Instruct-GPTQ-Int4",
  79. # messages=[{"role": "user", "content": data["prompt"]}],
  80. # temperature=float(data["temperature"]),
  81. # top_p=float(data["top_p"]),
  82. # n=int(data["n"])
  83. # # stream=True
  84. # )
  85. # logging.info("Qwen返回值:{}—请求信息:{}".format(response,data))
  86. # result = response.choices[0].message.content
  87. #添加 0是文本,1是json格式
  88. fieldType = raw_data["input"]['fieldType']
  89. if fieldType == 0:
  90. res_tmp["content"] = result
  91. res_tmp_json = json.dumps(res_tmp, ensure_ascii=False)
  92. raw_data["result"] = {"successCode": "1", "errorLog": "", "results": res_tmp_json,"status":1,"message":"成功"}
  93. else:
  94. res=parse_gptResult(res_tmp,result)
  95. if res:
  96. res_tmp_json = json.dumps(res, ensure_ascii=False)
  97. raw_data["result"] = {"successCode": "1", "errorLog": "", "results": res_tmp_json,"status":1,"message":"成功"}
  98. else:
  99. res_tmp_json = json.dumps(res_tmp, ensure_ascii=False)
  100. raw_data["result"] = {"successCode": "0", "errorLog": "Qwen返回值不是json格式,无法解析!", "results": res_tmp_json,"status":2,"message":"GPT返回结果非json格式"}
  101. logging.info(raw_data)
  102. to_kafka.send_kafka(raw_data, logging)
  103. except:
  104. raw_data["result"] = {"successCode": "0", "errorLog": "", "results": "","status":2,"message":"异常"}
  105. raw_data["result"]["errorLog"] = traceback.format_exc()
  106. res_tmp_json = json.dumps(res_tmp, ensure_ascii=False)
  107. raw_data["result"]["results"] = res_tmp_json
  108. logging.info("调用Qwen失败{}-{}".format(raw_data, traceback.format_exc()))
  109. to_kafka.send_kafka(raw_data, logging)
  110. else:
  111. logging.info("暂无任务,进入休眠--")
  112. time.sleep(10)
  113. def zk_monitoring():
  114. try:
  115. #线上环境
  116. zk = KazooClient(hosts='172.18.1.146:2181,172.18.1.147:2181,172.18.1.148:2181')
  117. #测试环境
  118. # zk = KazooClient(hosts='172.16.12.55:2181,172.16.12.56:2181,172.16.12.57:2181')
  119. zk.start()
  120. # 设置监听器
  121. @zk.DataWatch("/analyze")
  122. def watch_node(data, stat, event):
  123. if event is not None and event.type == EventType.CHANGED:
  124. data, stat = zk.get("/analyze")
  125. logging.info("执行删除操作:{}".format(data))
  126. d = json.loads(data)
  127. id = d["scenes_id"]
  128. stop_dict[id] = {}
  129. stop_dict[id]["version"] = d["version"]
  130. stop_dict[id]["operation"] = d["operation"]
  131. # 保持程序运行以监听节点变化
  132. try:
  133. while True:
  134. time.sleep(1)
  135. except:
  136. logging.info("Stopping...")
  137. # 关闭连接
  138. zk.stop()
  139. zk.close()
  140. except:
  141. logging.error(traceback.format_exc())