This commit is contained in:
ymq1 2025-06-23 04:40:20 +00:00
parent 9c055fe41f
commit fcaee5e657
18 changed files with 609 additions and 4 deletions

View File

@ -3,10 +3,10 @@ import os
import sys import sys
import argparse import argparse
from base_chat_llm import BaseChatLLM, get_llm_class from llmengine.base_chat_llm import BaseChatLLM, get_llm_class
from gemma3_it import Gemma3LLM from llmengine.gemma3_it import Gemma3LLM
from medgemma3_it import MedgemmaLLM from llmengine.medgemma3_it import MedgemmaLLM
from qwen3 import Qwen3LLM from llmengine.qwen3 import Qwen3LLM
from appPublic.registerfunction import RegisterFunction from appPublic.registerfunction import RegisterFunction
from appPublic.log import debug, exception from appPublic.log import debug, exception

View File

@ -0,0 +1,48 @@
[Unit]
Description=A Embedding Service using Qwen3-Embedding-0.6B
# After=network.target DeepSeek70B-kyyds671b-ray.service
# Requires=DeepSeek70B-kyyds671b-ray.service
StartLimitIntervalSec=60
StartLimitBurst=5
[Service]
# 核心启动参数(保持原有配置)
User=ymq
Group=ymq
WorkingDirectory=/share/ymq/run/embeddings
#定义环境变量,所有节点的启动脚本与服务需一致
#Environment="NCCL_SOCKET_IFNAME=enp196s0f0np0"
#ExecStartPre=/data/kyyds671b/ray_check.sh
ExecStart=/share/ymq/run/embeddings/start.sh
ExecStop=/share/ymq/run/embeddings/stop.sh
# 超时与停止控制(新增部分)
# 启动超时延长至 120 秒
# TimeoutStartSec=120
# 停止等待时间 30 秒
# TimeoutStopSec=30
# 优先发送 SIGINT 信号(更适合 Python 程序)
# KillSignal=SIGINT
# 最终强制终止信号
# RestartKillSignal=SIGKILL
# 混合终止模式
# KillMode=mixed
# 重启策略
# Restart=on-failure
# RestartSec=10s
# 服务管理(保持原有配置+增强)
#Restart=always
#RestartSec=10 # 重启间隔从 5 秒调整为 10 秒
#append 是继续写入相当于>> file是从新写入 相当于>
StandardOutput=append:/var/log/embeddings/embeddings.log
StandardError=append:/var/log/embeddings/error.log
SyslogIdentifier=embeddings
# 资源限制(保持可选配置)
#LimitNOFILE=65536
#LimitNPROC=65536
# GPU 支持
#Environment=CUDA_VISIBLE_DEVICES=0,1
[Install]
WantedBy=multi-user.target

3
test/embeddings/start.sh Executable file
View File

@ -0,0 +1,3 @@
#!/bin/bash
CUDA_VISIBLE_DEVICES=7 /share/vllm-0.8.5/bin/python -m llmengine.embedding -p 9998 /d/ymq/models/Qwen/Qwen3-Embedding-0.6B

4
test/embeddings/stop.sh Executable file
View File

@ -0,0 +1,4 @@
#!/usr/bin/bash
killname Qwen/Qwen3-Embedding

BIN
test/gemma3/.run.sh.swp Normal file

Binary file not shown.

View File

@ -0,0 +1,51 @@
{
"filesroot":"$[workdir]$/files",
"logger":{
"name":"llmengine",
"levelname":"info",
"logfile":"$[workdir]$/logs/llmengine.log"
},
"website":{
"paths":[
["$[workdir]$/wwwroot",""]
],
"client_max_size":10000,
"host":"0.0.0.0",
"port":9995,
"coding":"utf-8",
"ssl_gg":{
"crtfile":"$[workdir]$/conf/www.bsppo.com.pem",
"keyfile":"$[workdir]$/conf/www.bsppo.com.key"
},
"indexes":[
"index.html",
"index.ui"
],
"startswiths":[
{
"leading":"/idfile",
"registerfunction":"idfile"
},{
"leading": "/v1/chat/completions",
"registerfunction": "chat_completions"
}
],
"processors":[
[".tmpl","tmpl"],
[".app","app"],
[".ui","bui"],
[".dspy","dspy"],
[".md","md"]
],
"rsakey_oops":{
"privatekey":"$[workdir]$/conf/rsa_private_key.pem",
"publickey":"$[workdir]$/conf/rsa_public_key.pem"
},
"session_max_time":3000,
"session_issue_time":2500,
"session_redis_notuse":{
"url":"redis://127.0.0.1:6379"
}
}
}

View File

@ -0,0 +1,6 @@
{
"ymq": {
"ref_text": "\u8f7b\u91cf\u5e94\u7528\u670d\u52a1\u5668\u5907\u6848\u6761\u4ef6\uff1a\u8d2d\u4e70\u65f6\u957f\u57283\u4e2a\u6708\u53ca\u4ee5\u4e0a",
"ref_audio": "/data/ymq/py/f5tts/files/87/103/66/49/record.wav"
}
}

View File

@ -0,0 +1,16 @@
[Unit]
Wants=systemd-networkd.service
[Service]
User=ymq
Group=ymq
Type=forking
WorkingDirectory=/share/ymq/run/gemma3
ExecStart=/share/ymq/run/gemma3/start.sh
ExecStop=/share/ymq/run/gemma3/stop.sh
StandardOutput=append:/var/log/gemma3/gemma3.log
StandardError=append:/var/log/gemma3/gemma3.log
SyslogIdentifier=gemma3
[Install]
WantedBy=multi-user.target

4
test/gemma3/install.sh Executable file
View File

@ -0,0 +1,4 @@
sudo mkdir /var/log/gemma3
sudo cp gemma3.service /etc/systemd/system
sudo systemctl enable gemma3
sudo systemctl start gemma3

View File

@ -0,0 +1,342 @@
2025-06-09 08:13:26.400[llmengine][info][/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/auth_api.py:151]checkAuth() called ... request.path='/v1/chat/completions'
2025-06-09 08:13:26.411[llmengine][debug][/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py:40]params_kw={'{\n"prompt":"who are you"\n}': ''}, args=[]
2025-06-09 08:13:26.418[llmengine][exception][/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/auth_api.py:168]Exception=client(127.0.0.1) None access /v1/chat/completions cost 0.008466005325317383, (0.000392913818359375), except=name 'stream_response' is not defined
Traceback (most recent call last):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/auth_api.py", line 161, in checkAuth
ret = await handler(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/processorResource.py", line 351, in _handle
ret = await processor.handle(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/baseProcessor.py", line 95, in handle
await self.execute(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/baseProcessor.py", line 86, in execute
await self.datahandle(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py", line 46, in datahandle
x = await self.path_call(request, self.path)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py", line 42, in path_call
return await f(request, params_kw, *args)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/server.py", line 42, in chat_completions
return await stream_response(request, gor)
NameError: name 'stream_response' is not defined
Traceback (most recent call last):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/auth_api.py", line 161, in checkAuth
ret = await handler(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/processorResource.py", line 351, in _handle
ret = await processor.handle(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/baseProcessor.py", line 95, in handle
await self.execute(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/baseProcessor.py", line 86, in execute
await self.datahandle(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py", line 46, in datahandle
x = await self.path_call(request, self.path)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py", line 42, in path_call
return await f(request, params_kw, *args)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/server.py", line 42, in chat_completions
return await stream_response(request, gor)
NameError: name 'stream_response' is not defined
2025-06-09 08:15:08.876[llmengine][info][/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/auth_api.py:151]checkAuth() called ... request.path='/v1/chat/completions'
2025-06-09 08:15:08.884[llmengine][debug][/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py:40]params_kw={'{\n"prompt":"who are you"\n}': ''}, args=[]
2025-06-09 08:15:08.891[llmengine][exception][/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/auth_api.py:168]Exception=client(127.0.0.1) None access /v1/chat/completions cost 0.005657672882080078, (9.679794311523438e-05), except=get_session() missing 1 required positional argument: 'request'
Traceback (most recent call last):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/auth_api.py", line 161, in checkAuth
ret = await handler(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/processorResource.py", line 351, in _handle
ret = await processor.handle(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/baseProcessor.py", line 95, in handle
await self.execute(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/baseProcessor.py", line 86, in execute
await self.datahandle(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py", line 46, in datahandle
x = await self.path_call(request, self.path)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py", line 42, in path_call
return await f(request, params_kw, *args)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/server.py", line 43, in chat_completions
return await stream_response(request, gor)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/globalEnv.py", line 58, in stream_response
async for d in async_data_generator():
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/server.py", line 31, in gor
session = await get_session()
TypeError: get_session() missing 1 required positional argument: 'request'
Traceback (most recent call last):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/auth_api.py", line 161, in checkAuth
ret = await handler(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/processorResource.py", line 351, in _handle
ret = await processor.handle(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/baseProcessor.py", line 95, in handle
await self.execute(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/baseProcessor.py", line 86, in execute
await self.datahandle(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py", line 46, in datahandle
x = await self.path_call(request, self.path)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py", line 42, in path_call
return await f(request, params_kw, *args)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/server.py", line 43, in chat_completions
return await stream_response(request, gor)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/globalEnv.py", line 58, in stream_response
async for d in async_data_generator():
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/server.py", line 31, in gor
session = await get_session()
TypeError: get_session() missing 1 required positional argument: 'request'
2025-06-09 08:19:30.169[llmengine][info][/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/auth_api.py:151]checkAuth() called ... request.path='/v1/chat/completions'
2025-06-09 08:19:30.177[llmengine][debug][/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py:40]params_kw={'{\n"prompt":"who are you"\n}': ''}, args=[]
2025-06-09 08:19:30.223[llmengine][exception][/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/auth_api.py:168]Exception=client(127.0.0.1) None access /v1/chat/completions cost 0.03934144973754883, (0.00010514259338378906), except='None' has no attribute 'startswith'
Traceback (most recent call last):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/auth_api.py", line 161, in checkAuth
ret = await handler(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/processorResource.py", line 351, in _handle
ret = await processor.handle(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/baseProcessor.py", line 95, in handle
await self.execute(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/baseProcessor.py", line 86, in execute
await self.datahandle(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py", line 46, in datahandle
x = await self.path_call(request, self.path)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py", line 42, in path_call
return await f(request, params_kw, *args)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/server.py", line 45, in chat_completions
return await stream_response(request, gor)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/globalEnv.py", line 58, in stream_response
async for d in async_data_generator():
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/server.py", line 41, in gor
async for d in engine.async_stream_generate(session, params_kw.prompt, **kwargs):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/base_chat_llm.py", line 112, in async_stream_generate
for d in self._generator(session, prompt,
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/base_chat_llm.py", line 66, in _generator
for d in self._gen(messages):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/base_chat_llm.py", line 137, in _gen
inputs = self._messages2inputs(messages)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/qwen3.py", line 32, in _messages2inputs
text = self.tokenizer.apply_chat_template(
File "/share/vllm-0.8.5/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 1695, in apply_chat_template
rendered_chat = compiled_template.render(
File "/share/vllm-0.8.5/lib/python3.10/site-packages/jinja2/environment.py", line 1295, in render
self.environment.handle_exception()
File "/share/vllm-0.8.5/lib/python3.10/site-packages/jinja2/environment.py", line 942, in handle_exception
raise rewrite_traceback_stack(source=source)
File "<template>", line 20, in top-level template code
File "/share/vllm-0.8.5/lib/python3.10/site-packages/jinja2/sandbox.py", line 399, in call
if not __self.is_safe_callable(__obj):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/jinja2/sandbox.py", line 265, in is_safe_callable
getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
jinja2.exceptions.UndefinedError: 'None' has no attribute 'startswith'
Traceback (most recent call last):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/auth_api.py", line 161, in checkAuth
ret = await handler(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/processorResource.py", line 351, in _handle
ret = await processor.handle(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/baseProcessor.py", line 95, in handle
await self.execute(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/baseProcessor.py", line 86, in execute
await self.datahandle(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py", line 46, in datahandle
x = await self.path_call(request, self.path)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py", line 42, in path_call
return await f(request, params_kw, *args)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/server.py", line 45, in chat_completions
return await stream_response(request, gor)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/globalEnv.py", line 58, in stream_response
async for d in async_data_generator():
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/server.py", line 41, in gor
async for d in engine.async_stream_generate(session, params_kw.prompt, **kwargs):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/base_chat_llm.py", line 112, in async_stream_generate
for d in self._generator(session, prompt,
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/base_chat_llm.py", line 66, in _generator
for d in self._gen(messages):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/base_chat_llm.py", line 137, in _gen
inputs = self._messages2inputs(messages)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/qwen3.py", line 32, in _messages2inputs
text = self.tokenizer.apply_chat_template(
File "/share/vllm-0.8.5/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 1695, in apply_chat_template
rendered_chat = compiled_template.render(
File "/share/vllm-0.8.5/lib/python3.10/site-packages/jinja2/environment.py", line 1295, in render
self.environment.handle_exception()
File "/share/vllm-0.8.5/lib/python3.10/site-packages/jinja2/environment.py", line 942, in handle_exception
raise rewrite_traceback_stack(source=source)
File "<template>", line 20, in top-level template code
File "/share/vllm-0.8.5/lib/python3.10/site-packages/jinja2/sandbox.py", line 399, in call
if not __self.is_safe_callable(__obj):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/jinja2/sandbox.py", line 265, in is_safe_callable
getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
jinja2.exceptions.UndefinedError: 'None' has no attribute 'startswith'
2025-06-09 08:28:03.514[llmengine][info][/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/auth_api.py:151]checkAuth() called ... request.path='/v1/chat/completions'
2025-06-09 08:28:03.522[llmengine][debug][/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py:40]params_kw={'{\n"prompt":"who are you"\n}': ''}, args=[]
2025-06-09 08:28:03.526[llmengine][debug][/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/qwen3.py:33]messages=[{'role': 'user', 'content': None}]
2025-06-09 08:28:03.579[llmengine][exception][/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/auth_api.py:168]Exception=client(127.0.0.1) None access /v1/chat/completions cost 0.05059671401977539, (0.00011324882507324219), except='None' has no attribute 'startswith'
Traceback (most recent call last):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/auth_api.py", line 161, in checkAuth
ret = await handler(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/processorResource.py", line 351, in _handle
ret = await processor.handle(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/baseProcessor.py", line 95, in handle
await self.execute(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/baseProcessor.py", line 86, in execute
await self.datahandle(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py", line 46, in datahandle
x = await self.path_call(request, self.path)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py", line 42, in path_call
return await f(request, params_kw, *args)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/server.py", line 45, in chat_completions
return await stream_response(request, gor)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/globalEnv.py", line 58, in stream_response
async for d in async_data_generator():
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/server.py", line 41, in gor
async for d in engine.async_stream_generate(session, params_kw.prompt, **kwargs):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/base_chat_llm.py", line 112, in async_stream_generate
for d in self._generator(session, prompt,
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/base_chat_llm.py", line 66, in _generator
for d in self._gen(messages):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/base_chat_llm.py", line 137, in _gen
inputs = self._messages2inputs(messages)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/qwen3.py", line 34, in _messages2inputs
text = self.tokenizer.apply_chat_template(
File "/share/vllm-0.8.5/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 1695, in apply_chat_template
rendered_chat = compiled_template.render(
File "/share/vllm-0.8.5/lib/python3.10/site-packages/jinja2/environment.py", line 1295, in render
self.environment.handle_exception()
File "/share/vllm-0.8.5/lib/python3.10/site-packages/jinja2/environment.py", line 942, in handle_exception
raise rewrite_traceback_stack(source=source)
File "<template>", line 20, in top-level template code
File "/share/vllm-0.8.5/lib/python3.10/site-packages/jinja2/sandbox.py", line 399, in call
if not __self.is_safe_callable(__obj):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/jinja2/sandbox.py", line 265, in is_safe_callable
getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
jinja2.exceptions.UndefinedError: 'None' has no attribute 'startswith'
Traceback (most recent call last):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/auth_api.py", line 161, in checkAuth
ret = await handler(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/processorResource.py", line 351, in _handle
ret = await processor.handle(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/baseProcessor.py", line 95, in handle
await self.execute(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/baseProcessor.py", line 86, in execute
await self.datahandle(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py", line 46, in datahandle
x = await self.path_call(request, self.path)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py", line 42, in path_call
return await f(request, params_kw, *args)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/server.py", line 45, in chat_completions
return await stream_response(request, gor)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/globalEnv.py", line 58, in stream_response
async for d in async_data_generator():
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/server.py", line 41, in gor
async for d in engine.async_stream_generate(session, params_kw.prompt, **kwargs):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/base_chat_llm.py", line 112, in async_stream_generate
for d in self._generator(session, prompt,
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/base_chat_llm.py", line 66, in _generator
for d in self._gen(messages):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/base_chat_llm.py", line 137, in _gen
inputs = self._messages2inputs(messages)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/qwen3.py", line 34, in _messages2inputs
text = self.tokenizer.apply_chat_template(
File "/share/vllm-0.8.5/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 1695, in apply_chat_template
rendered_chat = compiled_template.render(
File "/share/vllm-0.8.5/lib/python3.10/site-packages/jinja2/environment.py", line 1295, in render
self.environment.handle_exception()
File "/share/vllm-0.8.5/lib/python3.10/site-packages/jinja2/environment.py", line 942, in handle_exception
raise rewrite_traceback_stack(source=source)
File "<template>", line 20, in top-level template code
File "/share/vllm-0.8.5/lib/python3.10/site-packages/jinja2/sandbox.py", line 399, in call
if not __self.is_safe_callable(__obj):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/jinja2/sandbox.py", line 265, in is_safe_callable
getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
jinja2.exceptions.UndefinedError: 'None' has no attribute 'startswith'
2025-06-09 08:31:48.954[llmengine][info][/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/auth_api.py:151]checkAuth() called ... request.path='/v1/chat/completions'
2025-06-09 08:31:48.961[llmengine][debug][/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py:40]params_kw={'{\n"prompt":"who are you"\n}': ''}, args=[]
2025-06-09 08:31:48.964[llmengine][debug][/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/server.py:29]params_kw={'{\n"prompt":"who are you"\n}': ''}, params=(), kw={}
2025-06-09 08:31:48.968[llmengine][debug][/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/qwen3.py:33]messages=[{'role': 'user', 'content': None}]
2025-06-09 08:31:49.009[llmengine][exception][/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/auth_api.py:168]Exception=client(127.0.0.1) None access /v1/chat/completions cost 0.04324674606323242, (8.392333984375e-05), except='None' has no attribute 'startswith'
Traceback (most recent call last):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/auth_api.py", line 161, in checkAuth
ret = await handler(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/processorResource.py", line 351, in _handle
ret = await processor.handle(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/baseProcessor.py", line 95, in handle
await self.execute(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/baseProcessor.py", line 86, in execute
await self.datahandle(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py", line 46, in datahandle
x = await self.path_call(request, self.path)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py", line 42, in path_call
return await f(request, params_kw, *args)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/server.py", line 46, in chat_completions
return await stream_response(request, gor)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/globalEnv.py", line 58, in stream_response
async for d in async_data_generator():
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/server.py", line 42, in gor
async for d in engine.async_stream_generate(session, params_kw.prompt, **kwargs):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/base_chat_llm.py", line 112, in async_stream_generate
for d in self._generator(session, prompt,
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/base_chat_llm.py", line 66, in _generator
for d in self._gen(messages):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/base_chat_llm.py", line 137, in _gen
inputs = self._messages2inputs(messages)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/qwen3.py", line 34, in _messages2inputs
text = self.tokenizer.apply_chat_template(
File "/share/vllm-0.8.5/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 1695, in apply_chat_template
rendered_chat = compiled_template.render(
File "/share/vllm-0.8.5/lib/python3.10/site-packages/jinja2/environment.py", line 1295, in render
self.environment.handle_exception()
File "/share/vllm-0.8.5/lib/python3.10/site-packages/jinja2/environment.py", line 942, in handle_exception
raise rewrite_traceback_stack(source=source)
File "<template>", line 20, in top-level template code
File "/share/vllm-0.8.5/lib/python3.10/site-packages/jinja2/sandbox.py", line 399, in call
if not __self.is_safe_callable(__obj):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/jinja2/sandbox.py", line 265, in is_safe_callable
getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
jinja2.exceptions.UndefinedError: 'None' has no attribute 'startswith'
Traceback (most recent call last):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/auth_api.py", line 161, in checkAuth
ret = await handler(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/processorResource.py", line 351, in _handle
ret = await processor.handle(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/baseProcessor.py", line 95, in handle
await self.execute(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/baseProcessor.py", line 86, in execute
await self.datahandle(request)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py", line 46, in datahandle
x = await self.path_call(request, self.path)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py", line 42, in path_call
return await f(request, params_kw, *args)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/server.py", line 46, in chat_completions
return await stream_response(request, gor)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/globalEnv.py", line 58, in stream_response
async for d in async_data_generator():
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/server.py", line 42, in gor
async for d in engine.async_stream_generate(session, params_kw.prompt, **kwargs):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/base_chat_llm.py", line 112, in async_stream_generate
for d in self._generator(session, prompt,
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/base_chat_llm.py", line 66, in _generator
for d in self._gen(messages):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/base_chat_llm.py", line 137, in _gen
inputs = self._messages2inputs(messages)
File "/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/qwen3.py", line 34, in _messages2inputs
text = self.tokenizer.apply_chat_template(
File "/share/vllm-0.8.5/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 1695, in apply_chat_template
rendered_chat = compiled_template.render(
File "/share/vllm-0.8.5/lib/python3.10/site-packages/jinja2/environment.py", line 1295, in render
self.environment.handle_exception()
File "/share/vllm-0.8.5/lib/python3.10/site-packages/jinja2/environment.py", line 942, in handle_exception
raise rewrite_traceback_stack(source=source)
File "<template>", line 20, in top-level template code
File "/share/vllm-0.8.5/lib/python3.10/site-packages/jinja2/sandbox.py", line 399, in call
if not __self.is_safe_callable(__obj):
File "/share/vllm-0.8.5/lib/python3.10/site-packages/jinja2/sandbox.py", line 265, in is_safe_callable
getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
jinja2.exceptions.UndefinedError: 'None' has no attribute 'startswith'
2025-06-09 08:37:22.471[llmengine][info][/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/auth_api.py:151]checkAuth() called ... request.path='/v1/chat/completions'
2025-06-09 08:37:22.479[llmengine][debug][/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py:40]params_kw={'prompt': 'who are you'}, args=[]
2025-06-09 08:37:22.483[llmengine][debug][/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/server.py:29]params_kw={'prompt': 'who are you'}, params=(), kw={}
2025-06-09 08:37:22.486[llmengine][debug][/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/qwen3.py:33]messages=[{'role': 'user', 'content': 'who are you'}]
2025-06-09 08:48:12.725[llmengine][info][/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/auth_api.py:151]checkAuth() called ... request.path='/v1/chat/completions'
2025-06-09 08:48:12.735[llmengine][debug][/share/vllm-0.8.5/lib/python3.10/site-packages/ahserver/functionProcessor.py:40]params_kw={'prompt': 'who are you'}, args=[]
2025-06-09 08:48:12.738[llmengine][debug][/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/server.py:29]params_kw={'prompt': 'who are you'}, params=(), kw={}
2025-06-09 08:48:12.742[llmengine][debug][/share/vllm-0.8.5/lib/python3.10/site-packages/llmengine/qwen3.py:33]messages=[{'role': 'user', 'content': 'who are you'}]

4
test/gemma3/start.sh Executable file
View File

@ -0,0 +1,4 @@
#!/usr/bin/bash
CUDA_VISIBLE_DEVICES=5 /share/vllm-0.8.5/bin/python -m llmengine.server -p 9999 /share/models/google/gemma-3-4b-it &
CUDA_VISIBLE_DEVICES=5 /share/vllm-0.8.5/bin/python -m llmengine.server -p 9999 /share/models/google/gemma-3-4b-it &

3
test/gemma3/stop.sh Normal file
View File

@ -0,0 +1,3 @@
#!/usr/bin/bash
/d/ymq/bin/killname gemma-3-4b-it

View File

@ -0,0 +1,50 @@
{
"filesroot":"$[workdir]$/files",
"logger":{
"name":"llmengine",
"levelname":"info",
"logfile":"$[workdir]$/logs/llmengine.log"
},
"website":{
"paths":[
["$[workdir]$/wwwroot",""]
],
"client_max_size":10000,
"host":"0.0.0.0",
"port":9995,
"coding":"utf-8",
"indexes":[
"index.html",
"index.ui"
],
"startswiths":[
{
"leading":"/idfile",
"registerfunction":"idfile"
},{
"leading": "/v1/rerank",
"registerfunction": "rerank"
},{
"leading": "/docs",
"registerfunction": "docs"
}
],
"processors":[
[".tmpl","tmpl"],
[".app","app"],
[".ui","bui"],
[".dspy","dspy"],
[".md","md"]
],
"rsakey_oops":{
"privatekey":"$[workdir]$/conf/rsa_private_key.pem",
"publickey":"$[workdir]$/conf/rsa_public_key.pem"
},
"session_max_time":3000,
"session_issue_time":2500,
"session_redis_notuse":{
"url":"redis://127.0.0.1:6379"
}
}
}

View File

View File

@ -0,0 +1,48 @@
[Unit]
Description=A Rerank Service using Qwen3-Reranker-0.6B
# After=network.target DeepSeek70B-kyyds671b-ray.service
# Requires=DeepSeek70B-kyyds671b-ray.service
StartLimitIntervalSec=60
StartLimitBurst=5
[Service]
# 核心启动参数(保持原有配置)
User=ymq
Group=ymq
WorkingDirectory=/share/ymq/run/reranker
#定义环境变量,所有节点的启动脚本与服务需一致
#Environment="NCCL_SOCKET_IFNAME=enp196s0f0np0"
#ExecStartPre=/data/kyyds671b/ray_check.sh
ExecStart=/share/ymq/run/reranker/start.sh
ExecStop=/share/ymq/run/reranker/stop.sh
# 超时与停止控制(新增部分)
# 启动超时延长至 120 秒
# TimeoutStartSec=120
# 停止等待时间 30 秒
# TimeoutStopSec=30
# 优先发送 SIGINT 信号(更适合 Python 程序)
# KillSignal=SIGINT
# 最终强制终止信号
# RestartKillSignal=SIGKILL
# 混合终止模式
# KillMode=mixed
# 重启策略
# Restart=on-failure
# RestartSec=10s
# 服务管理(保持原有配置+增强)
#Restart=always
#RestartSec=10 # 重启间隔从 5 秒调整为 10 秒
#append 是继续写入相当于>> file是从新写入 相当于>
StandardOutput=append:/var/log/rerank/rerank.log
StandardError=append:/var/log/rerank/error.log
SyslogIdentifier=rerank
# 资源限制(保持可选配置)
#LimitNOFILE=65536
#LimitNPROC=65536
# GPU 支持
#Environment=CUDA_VISIBLE_DEVICES=0,1
[Install]
WantedBy=multi-user.target

4
test/reranker/start.sh Executable file
View File

@ -0,0 +1,4 @@
#!/bin/bash
# CUDA_VISIBLE_DEVICES=7 /share/vllm-0.8.5/bin/python -m llmengine.rerank -p 9997 /d/ymq/models/Qwen/Qwen3-Reranker-0___6B
CUDA_VISIBLE_DEVICES=7 /share/vllm-0.8.5/bin/python -m llmengine.rerank -p 9997 /share/models/BAAI/bge-reranker-v2-m3

5
test/reranker/stop.sh Executable file
View File

@ -0,0 +1,5 @@
#!/usr/bin/bash
#killname Qwen/Qwen3-Reranker
killname BAAI/bge-reranker

17
test/reranker/t.sh Executable file
View File

@ -0,0 +1,17 @@
#!/usr/bin/bash
curl http://localhost:9997/v1/rerank \
-H "Content-Type: application/json" \
-d @- <<EOF
{
"model": "rerank-001",
"query": "什么是量子计算?",
"documents": [
"量子计算是一种使用量子比特进行计算的方式。",
"古典计算机使用的是二进制位。",
"天气预报依赖于统计模型。",
"量子计算与物理学密切相关。"
],
"top_n": 5
}
EOF