update: 修改requests为aiohttp,实现异步http请求

This commit is contained in:
killua4396 2024-05-18 11:09:44 +08:00
parent b26f3192bc
commit f426878f9e
1 changed files with 80 additions and 77 deletions

View File

@ -10,7 +10,7 @@ from config import get_config
import uuid
import json
import asyncio
import requests
import aiohttp
# 依赖注入获取logger
logger = get_logger()
@ -266,11 +266,12 @@ async def sct_llm_handler(ws,session_id,response_type,llm_info,tts_info,db,redis
'Authorization': f"Bearer {Config.MINIMAX_LLM.API_KEY}",
'Content-Type': 'application/json'
}
response = requests.post(Config.MINIMAX_LLM.URL, headers=headers, data=payload,stream=True) #调用大模型
except Exception as e:
logger.error(f"llm调用发生错误: {str(e)}")
logger.error(f"编辑http请求时发生错误: {str(e)}")
try:
for chunk in response.iter_lines():
async with aiohttp.ClientSession() as client:
async with client.post(Config.MINIMAX_LLM.URL, headers=headers, data=payload) as response: #发送大模型请求
async for chunk in response.content.iter_any():
chunk_data = parseChunkDelta(chunk)
is_end = chunk_data == "end"
if not is_end:
@ -418,8 +419,9 @@ async def scl_llm_handler(ws,session_id,response_type,llm_info,tts_info,db,redis
'Authorization': f"Bearer {Config.MINIMAX_LLM.API_KEY}",
'Content-Type': 'application/json'
}
response = requests.post(Config.MINIMAX_LLM.URL, headers=headers, data=payload,stream=True)
for chunk in response.iter_lines():
async with aiohttp.ClientSession() as client:
async with client.post(Config.MINIMAX_LLM.URL, headers=headers, data=payload) as response: #发送大模型请求
async for chunk in response.content.iter_any():
chunk_data = parseChunkDelta(chunk)
is_end = chunk_data == "end"
if not is_end:
@ -578,8 +580,9 @@ async def voice_call_llm_handler(ws,session_id,llm_info,tts_info,db,redis,asr_re
'Authorization': f"Bearer {Config.MINIMAX_LLM.API_KEY}",
'Content-Type': 'application/json'
}
response = requests.post(Config.MINIMAX_LLM.URL, headers=headers, data=payload,stream=True)
for chunk in response.iter_lines():
async with aiohttp.ClientSession() as client:
async with client.post(Config.MINIMAX_LLM.URL, headers=headers, data=payload) as response: #发送大模型请求
async for chunk in response.content.iter_any():
chunk_data = parseChunkDelta(chunk)
is_end = chunk_data == "end"
if not is_end: