From 4a685b6a7846a6d6b85e22d3a71424f532cc438e Mon Sep 17 00:00:00 2001 From: yumoqing Date: Fri, 30 May 2025 03:57:11 +0000 Subject: [PATCH] bugfix --- test/chatllm | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/chatllm b/test/chatllm index 00a1526..3a7c531 100755 --- a/test/chatllm +++ b/test/chatllm @@ -6,6 +6,7 @@ import argparse def get_args(): parser = argparse.ArgumentParser(description="Example script using argparse") parser.add_argument('--gpus', '-g', type=str, required=False, default='0', help='Identify GPU id, default is 0, comma split') + parser.add_argument("--stream", action="store_true", help="是否流式输出") parser.add_argument('modelpath', type=str, help='Path to model folder') args = parser.parse_args() return args @@ -16,7 +17,8 @@ def main(): os.environ['CUDA_LAUNCH_BLOCKING'] = '1' gpus = args.gpus.split(',') cnt=len(gpus) - cmdline = f'/share/vllm-0.8.5/bin/python -m llmengine.chatllm --model {args.modelpath} --gpus {cnt}' + stream=' --stream' if args.stream else ' ' + cmdline = f'/share/vllm-0.8.5/bin/python -m llmengine.chatllm --model {args.modelpath} --gpus {cnt} {stream}' print(args, cmdline) os.system(cmdline)