This commit is contained in:
yumoqing 2025-06-14 21:37:56 +08:00
parent ddedfdd95a
commit 1e8a3a7c22
3 changed files with 62 additions and 3 deletions

View File

@ -97,9 +97,12 @@ class BaseChatLLM:
messages.append(self._build_sys_message(sys_prompt)) messages.append(self._build_sys_message(sys_prompt))
messages.append(self._build_user_message(prompt, image_path=image_path)) messages.append(self._build_user_message(prompt, image_path=image_path))
# debug(f'{messages=}') # debug(f'{messages=}')
all_txt = ''
for d in self._gen(messages): for d in self._gen(messages):
if d['choices'][0]['finish_reason'] == 'stop': if d['choices'][0]['finish_reason'] == 'stop':
messages.append(self._build_assistant_message(d['text'])) messages.append(self._build_assistant_message(all_txt))
else:
all_txt += d['choices'][0]['delta']['content']
yield d yield d
self._set_session_messages(session, messages) self._set_session_messages(session, messages)
@ -143,7 +146,7 @@ class BaseChatLLM:
async for d in self._async_generator(session, prompt, image_path, video_path, audio_path, sys_prompt): async for d in self._async_generator(session, prompt, image_path, video_path, audio_path, sys_prompt):
s = f'data: {json.dumps(d)}\n' s = f'data: {json.dumps(d)}\n'
yield s yield s
yield 'data: [done]' yield 'data: [DONE]'
def build_kwargs(self, inputs, streamer): def build_kwargs(self, inputs, streamer):
generate_kwargs = dict( generate_kwargs = dict(

57
llmengine/client/llmclient Executable file
View File

@ -0,0 +1,57 @@
#!/home/ymq/py3/bin/python
from traceback import format_exc
import asyncio
import codecs
import json
import argparse
from appPublic.streamhttpclient import liner, StreamHttpClient
from appPublic.log import MyLogger
def user_message(prompt, fn=None):
x = ''
if fn:
x = user_file(fn)
return prompt + x
def user_file(fn):
with codecs.open(fn, 'r', 'utf-8') as f:
return f.read()
async def main():
parser = argparse.ArgumentParser(prog='devops')
parser.add_argument('-f', '--file')
parser.add_argument('-p', '--prompt')
parser.add_argument('-s', '--sys_prompt')
parser.add_argument('-m', '--model')
parser.add_argument('url')
args = parser.parse_args()
d = {
'model': args.model,
'stream': True,
'prompt': user_message(args.prompt, args.file),
'sys_prompt':args.sys_prompt
}
hc = StreamHttpClient()
headers = {
'Content-Type': 'application/json'
}
i = 0
buffer = ''
reco = hc('POST', args.url, headers=headers, data=json.dumps(d))
async for chunk in liner(reco):
chunk = chunk[6:]
if chunk != '[DONE]':
try:
f = json.loads(chunk)
except Exception as e:
print(f'****{chunk=} error {e} {format_exc()}')
continue
if not f['choices'][0]['finish_reason']:
print(f['choices'][0]['delta']['content'], end='', flush=True)
else:
pass
print('\n\n')
if __name__ == '__main__':
MyLogger('null', levelname='error', logfile='/dev/null')
asyncio.new_event_loop().run_until_complete(main())

View File

@ -21,7 +21,6 @@ def init():
rf.register('chat_completions', chat_completions) rf.register('chat_completions', chat_completions)
async def chat_completions(request, params_kw, *params, **kw): async def chat_completions(request, params_kw, *params, **kw):
debug(f'{params_kw=}, {params=}, {kw=}')
async def gor(): async def gor():
se = ServerEnv() se = ServerEnv()
engine = se.chat_engine engine = se.chat_engine