async def get_llm_data(): async for chunk in chat_engine.stream_generate(params_kw.prompt,sys_prompt=params.kw.sys_prompt): yield chunk await stream_response(request, get_llm_data)