优化重排序模型
This commit is contained in:
parent
564bddfcde
commit
56cab36bc4
23
llmengine/base_bgererank.py
Normal file
23
llmengine/base_bgererank.py
Normal file
@ -0,0 +1,23 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Dict
|
||||
|
||||
model_pathMap = {}
|
||||
|
||||
def rerank_register(model_key, Klass):
|
||||
"""Register a rerank model class for a given model key."""
|
||||
global model_pathMap
|
||||
model_pathMap[model_key] = Klass
|
||||
|
||||
def get_rerank_class(model_path):
|
||||
"""Find the rerank model class for a given model path."""
|
||||
for k, klass in model_pathMap.items():
|
||||
if len(model_path.split(k)) > 1:
|
||||
return klass
|
||||
print(f'{model_pathMap=}')
|
||||
return None
|
||||
|
||||
class BaseRerank(ABC):
|
||||
@abstractmethod
|
||||
async def rerank(self, query: str, documents: List[str], top_k: int) -> List[Dict]:
|
||||
"""Rerank documents based on query relevance."""
|
||||
pass
|
100
llmengine/bgererank.py
Normal file
100
llmengine/bgererank.py
Normal file
@ -0,0 +1,100 @@
|
||||
from traceback import format_exc
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
from llmengine.m2v3bgererank import M2V3BgeRerank
|
||||
from llmengine.base_bgererank import get_rerank_class
|
||||
from typing import List, Dict
|
||||
from appPublic.registerfunction import RegisterFunction
|
||||
from appPublic.worker import awaitify
|
||||
from appPublic.log import debug, exception
|
||||
from ahserver.serverenv import ServerEnv
|
||||
from ahserver.globalEnv import stream_response
|
||||
from ahserver.webapp import webserver
|
||||
|
||||
helptext = """BGE Reranker API:
|
||||
|
||||
1. Rerank Endpoint:
|
||||
path: /v1/bgererank
|
||||
method: POST
|
||||
headers: {
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
data: {
|
||||
"query": "苹果公司在北京开设新店",
|
||||
"documents": [
|
||||
"苹果公司在上海开设新店",
|
||||
"苹果公司在北京发布新产品"
|
||||
],
|
||||
"top_k": 5
|
||||
}
|
||||
response: {
|
||||
"object": "list",
|
||||
"data": [
|
||||
{
|
||||
"text": "苹果公司在北京发布新产品",
|
||||
"rerank_score": 0.95
|
||||
},
|
||||
...
|
||||
]
|
||||
}
|
||||
|
||||
2. Docs Endpoint:
|
||||
path: /v1/docs
|
||||
method: GET
|
||||
response: This help text
|
||||
"""
|
||||
|
||||
def init():
|
||||
rf = RegisterFunction()
|
||||
rf.register('bgererank', rerank)
|
||||
rf.register('docs', docs)
|
||||
debug("注册路由: bgererank, docs")
|
||||
|
||||
async def docs(request, params_kw, *params, **kw):
|
||||
return helptext
|
||||
|
||||
async def rerank(request, params_kw, *params, **kw):
|
||||
debug(f'{params_kw=}')
|
||||
se = ServerEnv()
|
||||
engine = se.engine
|
||||
query = params_kw.get('query')
|
||||
documents = params_kw.get('documents', [])
|
||||
top_k = params_kw.get('top_k', 10)
|
||||
if query is None:
|
||||
e = exception(f'query is None')
|
||||
raise e
|
||||
if not isinstance(documents, list) or not all(isinstance(doc, str) for doc in documents):
|
||||
e = exception(f'documents must be a list of strings')
|
||||
raise e
|
||||
try:
|
||||
reranked = await engine.rerank(query, documents, top_k)
|
||||
debug(f'{reranked=}')
|
||||
return {
|
||||
"object": "list",
|
||||
"data": reranked
|
||||
}
|
||||
except Exception as e:
|
||||
exception(f'重排序失败: {str(e)}')
|
||||
raise
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(prog="BGE Reranker Service")
|
||||
parser.add_argument('-w', '--workdir')
|
||||
parser.add_argument('-p', '--port')
|
||||
parser.add_argument('model_path')
|
||||
args = parser.parse_args()
|
||||
Klass = get_rerank_class(args.model_path)
|
||||
if Klass is None:
|
||||
e = Exception(f'{args.model_path} has not mapping to a model class')
|
||||
exception(f'{e}, {format_exc()}')
|
||||
raise e
|
||||
se = ServerEnv()
|
||||
se.engine = Klass(args.model_path)
|
||||
workdir = args.workdir or os.getcwd()
|
||||
port = args.port
|
||||
debug(f'{args=}')
|
||||
webserver(init, workdir, port)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
70
llmengine/m2v3bgererank.py
Normal file
70
llmengine/m2v3bgererank.py
Normal file
@ -0,0 +1,70 @@
|
||||
from typing import List, Dict
|
||||
from appPublic.log import debug, info, error
|
||||
from appPublic.worker import awaitify
|
||||
from llmengine.base_bgererank import BaseRerank, rerank_register
|
||||
from pymilvus.model.reranker import BGERerankFunction
|
||||
import torch
|
||||
import asyncio
|
||||
|
||||
class M2V3BgeRerank(BaseRerank):
|
||||
def __init__(self, model_id: str):
|
||||
"""Initialize BGE Reranker with GPU support and FP16."""
|
||||
try:
|
||||
self.model_id = model_id
|
||||
self.model_name = model_id.split('/')[-1]
|
||||
# Load BGE Reranker with cuda:0 and FP16
|
||||
self.reranker = BGERerankFunction(
|
||||
model_name=model_id,
|
||||
device="cuda:7" if torch.cuda.is_available() else "cpu",
|
||||
use_fp16=True
|
||||
)
|
||||
info(f"BGE Reranker 初始化成功,模型路径: {model_id}, 设备: {'cuda:0' if torch.cuda.is_available() else 'cpu'}, FP16: True")
|
||||
except Exception as e:
|
||||
error(f"BGE Reranker 初始化失败: {str(e)}")
|
||||
raise
|
||||
|
||||
async def rerank(self, query: str, documents: List[str], top_k: int = 10) -> List[Dict]:
|
||||
"""
|
||||
使用 BGE Reranker 对文档进行重排序,仅接收 text 列表,返回 text 和 rerank_score。
|
||||
|
||||
参数:
|
||||
query (str): 查询文本
|
||||
documents (List[str]): 仅包含文本的文档列表
|
||||
top_k (int): 返回的最大结果数,默认为 10
|
||||
|
||||
返回:
|
||||
List[Dict]: 重排序后的文档列表,包含 text 和 rerank_score
|
||||
"""
|
||||
try:
|
||||
if not query:
|
||||
raise ValueError("查询文本不能为空")
|
||||
if not documents:
|
||||
info("无文档可重排序")
|
||||
return []
|
||||
|
||||
debug(f"重排序输入: query={query[:50]}..., 文档数量={len(documents)}")
|
||||
|
||||
# 定义同步 rerank 函数
|
||||
def sync_rerank(query, texts, top_k):
|
||||
return self.reranker(query=query, documents=texts, top_k=min(top_k, len(texts)))
|
||||
|
||||
# 使用 run_in_executor 运行同步 rerank
|
||||
loop = asyncio.get_event_loop()
|
||||
rerank_results = await loop.run_in_executor(None, lambda: sync_rerank(query, documents, top_k))
|
||||
|
||||
# 构建重排序结果,仅包含 text 和 rerank_score
|
||||
reranked = [
|
||||
{
|
||||
"text": result.text,
|
||||
"rerank_score": result.score
|
||||
}
|
||||
for result in rerank_results
|
||||
]
|
||||
info(f"重排序返回 {len(reranked)} 条结果")
|
||||
return reranked
|
||||
|
||||
except Exception as e:
|
||||
error(f"重排序失败: {str(e)}")
|
||||
raise
|
||||
|
||||
rerank_register('bge-reranker-v2-m3', M2V3BgeRerank)
|
@ -352,6 +352,15 @@ class MilvusConnection:
|
||||
debug(
|
||||
f'Inserting document: file_path={file_path}, userid={userid}, db_type={db_type}, knowledge_base_id={knowledge_base_id}, document_id={document_id}')
|
||||
try:
|
||||
# 检查是否已存在相同的 file_path、userid 和 knowledge_base_id
|
||||
collection = Collection(collection_name)
|
||||
expr = f'file_path == "{file_path}" && userid == "{userid}" && knowledge_base_id == "{knowledge_base_id}"'
|
||||
debug(f"检查重复文档: {expr}")
|
||||
results = collection.query(expr=expr, output_fields=["document_id"])
|
||||
if results:
|
||||
raise ValueError(
|
||||
f"文档已存在: file_path={file_path}, userid={userid}, knowledge_base_id={knowledge_base_id}")
|
||||
|
||||
if not os.path.exists(file_path):
|
||||
raise ValueError(f"文件 {file_path} 不存在")
|
||||
|
||||
@ -929,7 +938,7 @@ class MilvusConnection:
|
||||
return []
|
||||
|
||||
async def _rerank_results(self, query: str, results: List[Dict], top_n: int) -> List[Dict]:
|
||||
"""调用重排序服务"""
|
||||
"""调用 BGE 重排序服务,返回原始 results 的所有字段并添加 rerank_score"""
|
||||
try:
|
||||
if not results:
|
||||
debug("无结果需要重排序")
|
||||
@ -942,32 +951,41 @@ class MilvusConnection:
|
||||
top_n = min(top_n, len(results))
|
||||
debug(f"重排序 top_n={top_n}, 原始结果数={len(results)}")
|
||||
|
||||
# 构造请求的 documents,仅包含 text
|
||||
documents = [result["text"] for result in results]
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(
|
||||
"http://localhost:9997/v1/rerank",
|
||||
"http://localhost:8887/v1/bgererank",
|
||||
headers={"Content-Type": "application/json"},
|
||||
json={
|
||||
"model": "rerank-001",
|
||||
"query": query,
|
||||
"documents": documents,
|
||||
"top_n": top_n
|
||||
"top_k": top_n
|
||||
}
|
||||
) as response:
|
||||
if response.status != 200:
|
||||
error(f"重排序服务调用失败,状态码: {response.status}")
|
||||
raise RuntimeError(f"重排序服务调用失败: {response.status}")
|
||||
result = await response.json()
|
||||
if result.get("object") != "rerank.result" or not result.get("data"):
|
||||
if result.get("object") != "list" or not result.get("data"):
|
||||
error(f"重排序服务响应格式错误: {result}")
|
||||
raise RuntimeError("重排序服务响应格式错误")
|
||||
rerank_data = result["data"]
|
||||
|
||||
# 映射结果,保留原始 results 的所有字段
|
||||
reranked_results = []
|
||||
for item in rerank_data:
|
||||
index = item["index"]
|
||||
if index < len(results):
|
||||
results[index]["rerank_score"] = item["relevance_score"]
|
||||
reranked_results.append(results[index])
|
||||
for item in result["data"]:
|
||||
# 查找原始 results 中匹配的文档
|
||||
try:
|
||||
index = next(i for i, r in enumerate(results) if r["text"] == item["text"])
|
||||
result_doc = results[index].copy()
|
||||
result_doc["rerank_score"] = item["rerank_score"]
|
||||
reranked_results.append(result_doc)
|
||||
debug(f"重排序结果: text={item['text'][:200]}..., rerank_score={item['rerank_score']:.6f}")
|
||||
except StopIteration:
|
||||
error(f"未找到匹配的原始文档: text={item['text'][:50]}...")
|
||||
continue
|
||||
|
||||
debug(f"成功重排序 {len(reranked_results)} 条结果")
|
||||
return reranked_results[:top_n]
|
||||
except Exception as e:
|
||||
|
82
test/bgererank/=1.3.3
Normal file
82
test/bgererank/=1.3.3
Normal file
@ -0,0 +1,82 @@
|
||||
Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
Requirement already satisfied: FlagEmbedding in /share/vllm-0.8.5/lib/python3.10/site-packages (1.3.5)
|
||||
Requirement already satisfied: torch>=1.6.0 in /share/vllm-0.8.5/lib/python3.10/site-packages (from FlagEmbedding) (2.6.0)
|
||||
Requirement already satisfied: transformers>=4.44.2 in /share/vllm-0.8.5/lib/python3.10/site-packages (from FlagEmbedding) (4.51.3)
|
||||
Requirement already satisfied: datasets>=2.19.0 in /share/vllm-0.8.5/lib/python3.10/site-packages (from FlagEmbedding) (3.6.0)
|
||||
Requirement already satisfied: accelerate>=0.20.1 in /share/vllm-0.8.5/lib/python3.10/site-packages (from FlagEmbedding) (1.7.0)
|
||||
Requirement already satisfied: sentence_transformers in /share/vllm-0.8.5/lib/python3.10/site-packages (from FlagEmbedding) (4.1.0)
|
||||
Requirement already satisfied: peft in /share/vllm-0.8.5/lib/python3.10/site-packages (from FlagEmbedding) (0.16.0)
|
||||
Requirement already satisfied: ir-datasets in /share/vllm-0.8.5/lib/python3.10/site-packages (from FlagEmbedding) (0.5.11)
|
||||
Requirement already satisfied: sentencepiece in /share/vllm-0.8.5/lib/python3.10/site-packages (from FlagEmbedding) (0.2.0)
|
||||
Requirement already satisfied: protobuf in /share/vllm-0.8.5/lib/python3.10/site-packages (from FlagEmbedding) (4.25.7)
|
||||
Requirement already satisfied: psutil in /share/vllm-0.8.5/lib/python3.10/site-packages (from accelerate>=0.20.1->FlagEmbedding) (7.0.0)
|
||||
Requirement already satisfied: numpy<3.0.0,>=1.17 in /share/vllm-0.8.5/lib/python3.10/site-packages (from accelerate>=0.20.1->FlagEmbedding) (2.2.5)
|
||||
Requirement already satisfied: safetensors>=0.4.3 in /share/vllm-0.8.5/lib/python3.10/site-packages (from accelerate>=0.20.1->FlagEmbedding) (0.5.3)
|
||||
Requirement already satisfied: packaging>=20.0 in /share/vllm-0.8.5/lib/python3.10/site-packages (from accelerate>=0.20.1->FlagEmbedding) (24.2)
|
||||
Requirement already satisfied: pyyaml in /share/vllm-0.8.5/lib/python3.10/site-packages (from accelerate>=0.20.1->FlagEmbedding) (6.0.2)
|
||||
Requirement already satisfied: huggingface-hub>=0.21.0 in /share/vllm-0.8.5/lib/python3.10/site-packages (from accelerate>=0.20.1->FlagEmbedding) (0.30.2)
|
||||
Requirement already satisfied: pyarrow>=15.0.0 in /share/vllm-0.8.5/lib/python3.10/site-packages (from datasets>=2.19.0->FlagEmbedding) (20.0.0)
|
||||
Requirement already satisfied: fsspec[http]<=2025.3.0,>=2023.1.0 in /share/vllm-0.8.5/lib/python3.10/site-packages (from datasets>=2.19.0->FlagEmbedding) (2025.3.0)
|
||||
Requirement already satisfied: xxhash in /share/vllm-0.8.5/lib/python3.10/site-packages (from datasets>=2.19.0->FlagEmbedding) (3.5.0)
|
||||
Requirement already satisfied: multiprocess<0.70.17 in /share/vllm-0.8.5/lib/python3.10/site-packages (from datasets>=2.19.0->FlagEmbedding) (0.70.16)
|
||||
Requirement already satisfied: tqdm>=4.66.3 in /share/vllm-0.8.5/lib/python3.10/site-packages (from datasets>=2.19.0->FlagEmbedding) (4.67.1)
|
||||
Requirement already satisfied: pandas in /share/vllm-0.8.5/lib/python3.10/site-packages (from datasets>=2.19.0->FlagEmbedding) (2.3.0)
|
||||
Requirement already satisfied: dill<0.3.9,>=0.3.0 in /share/vllm-0.8.5/lib/python3.10/site-packages (from datasets>=2.19.0->FlagEmbedding) (0.3.8)
|
||||
Requirement already satisfied: filelock in /share/vllm-0.8.5/lib/python3.10/site-packages (from datasets>=2.19.0->FlagEmbedding) (3.18.0)
|
||||
Requirement already satisfied: requests>=2.32.2 in /share/vllm-0.8.5/lib/python3.10/site-packages (from datasets>=2.19.0->FlagEmbedding) (2.32.3)
|
||||
Requirement already satisfied: nvidia-cusparselt-cu12==0.6.2 in /share/vllm-0.8.5/lib/python3.10/site-packages (from torch>=1.6.0->FlagEmbedding) (0.6.2)
|
||||
Requirement already satisfied: sympy==1.13.1 in /share/vllm-0.8.5/lib/python3.10/site-packages (from torch>=1.6.0->FlagEmbedding) (1.13.1)
|
||||
Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.4.127 in /share/vllm-0.8.5/lib/python3.10/site-packages (from torch>=1.6.0->FlagEmbedding) (12.4.127)
|
||||
Requirement already satisfied: triton==3.2.0 in /share/vllm-0.8.5/lib/python3.10/site-packages (from torch>=1.6.0->FlagEmbedding) (3.2.0)
|
||||
Requirement already satisfied: jinja2 in /share/vllm-0.8.5/lib/python3.10/site-packages (from torch>=1.6.0->FlagEmbedding) (3.1.6)
|
||||
Requirement already satisfied: nvidia-cusolver-cu12==11.6.1.9 in /share/vllm-0.8.5/lib/python3.10/site-packages (from torch>=1.6.0->FlagEmbedding) (11.6.1.9)
|
||||
Requirement already satisfied: typing-extensions>=4.10.0 in /share/vllm-0.8.5/lib/python3.10/site-packages (from torch>=1.6.0->FlagEmbedding) (4.13.2)
|
||||
Requirement already satisfied: nvidia-nvtx-cu12==12.4.127 in /share/vllm-0.8.5/lib/python3.10/site-packages (from torch>=1.6.0->FlagEmbedding) (12.4.127)
|
||||
Requirement already satisfied: nvidia-nccl-cu12==2.21.5 in /share/vllm-0.8.5/lib/python3.10/site-packages (from torch>=1.6.0->FlagEmbedding) (2.21.5)
|
||||
Requirement already satisfied: nvidia-cuda-cupti-cu12==12.4.127 in /share/vllm-0.8.5/lib/python3.10/site-packages (from torch>=1.6.0->FlagEmbedding) (12.4.127)
|
||||
Requirement already satisfied: nvidia-cuda-runtime-cu12==12.4.127 in /share/vllm-0.8.5/lib/python3.10/site-packages (from torch>=1.6.0->FlagEmbedding) (12.4.127)
|
||||
Requirement already satisfied: nvidia-cufft-cu12==11.2.1.3 in /share/vllm-0.8.5/lib/python3.10/site-packages (from torch>=1.6.0->FlagEmbedding) (11.2.1.3)
|
||||
Requirement already satisfied: nvidia-cublas-cu12==12.4.5.8 in /share/vllm-0.8.5/lib/python3.10/site-packages (from torch>=1.6.0->FlagEmbedding) (12.4.5.8)
|
||||
Requirement already satisfied: nvidia-curand-cu12==10.3.5.147 in /share/vllm-0.8.5/lib/python3.10/site-packages (from torch>=1.6.0->FlagEmbedding) (10.3.5.147)
|
||||
Requirement already satisfied: nvidia-nvjitlink-cu12==12.4.127 in /share/vllm-0.8.5/lib/python3.10/site-packages (from torch>=1.6.0->FlagEmbedding) (12.4.127)
|
||||
Requirement already satisfied: nvidia-cudnn-cu12==9.1.0.70 in /share/vllm-0.8.5/lib/python3.10/site-packages (from torch>=1.6.0->FlagEmbedding) (9.1.0.70)
|
||||
Requirement already satisfied: networkx in /share/vllm-0.8.5/lib/python3.10/site-packages (from torch>=1.6.0->FlagEmbedding) (3.4.2)
|
||||
Requirement already satisfied: nvidia-cusparse-cu12==12.3.1.170 in /share/vllm-0.8.5/lib/python3.10/site-packages (from torch>=1.6.0->FlagEmbedding) (12.3.1.170)
|
||||
Requirement already satisfied: mpmath<1.4,>=1.1.0 in /share/vllm-0.8.5/lib/python3.10/site-packages (from sympy==1.13.1->torch>=1.6.0->FlagEmbedding) (1.3.0)
|
||||
Requirement already satisfied: tokenizers<0.22,>=0.21 in /share/vllm-0.8.5/lib/python3.10/site-packages (from transformers>=4.44.2->FlagEmbedding) (0.21.1)
|
||||
Requirement already satisfied: regex!=2019.12.17 in /share/vllm-0.8.5/lib/python3.10/site-packages (from transformers>=4.44.2->FlagEmbedding) (2024.11.6)
|
||||
Requirement already satisfied: zlib-state>=0.1.3 in /share/vllm-0.8.5/lib/python3.10/site-packages (from ir-datasets->FlagEmbedding) (0.1.9)
|
||||
Requirement already satisfied: trec-car-tools>=2.5.4 in /share/vllm-0.8.5/lib/python3.10/site-packages (from ir-datasets->FlagEmbedding) (2.6)
|
||||
Requirement already satisfied: lz4>=3.1.10 in /share/vllm-0.8.5/lib/python3.10/site-packages (from ir-datasets->FlagEmbedding) (4.4.4)
|
||||
Requirement already satisfied: ijson>=3.1.3 in /share/vllm-0.8.5/lib/python3.10/site-packages (from ir-datasets->FlagEmbedding) (3.4.0)
|
||||
Requirement already satisfied: warc3-wet>=0.2.3 in /share/vllm-0.8.5/lib/python3.10/site-packages (from ir-datasets->FlagEmbedding) (0.2.5)
|
||||
Requirement already satisfied: inscriptis>=2.2.0 in /share/vllm-0.8.5/lib/python3.10/site-packages (from ir-datasets->FlagEmbedding) (2.6.0)
|
||||
Requirement already satisfied: beautifulsoup4>=4.4.1 in /share/vllm-0.8.5/lib/python3.10/site-packages (from ir-datasets->FlagEmbedding) (4.13.4)
|
||||
Requirement already satisfied: lxml>=4.5.2 in /share/vllm-0.8.5/lib/python3.10/site-packages (from ir-datasets->FlagEmbedding) (4.9.4)
|
||||
Requirement already satisfied: warc3-wet-clueweb09>=0.2.5 in /share/vllm-0.8.5/lib/python3.10/site-packages (from ir-datasets->FlagEmbedding) (0.2.5)
|
||||
Requirement already satisfied: unlzw3>=0.2.1 in /share/vllm-0.8.5/lib/python3.10/site-packages (from ir-datasets->FlagEmbedding) (0.2.3)
|
||||
Requirement already satisfied: scikit-learn in /share/vllm-0.8.5/lib/python3.10/site-packages (from sentence_transformers->FlagEmbedding) (1.7.0)
|
||||
Requirement already satisfied: Pillow in /share/vllm-0.8.5/lib/python3.10/site-packages (from sentence_transformers->FlagEmbedding) (11.2.1)
|
||||
Requirement already satisfied: scipy in /share/vllm-0.8.5/lib/python3.10/site-packages (from sentence_transformers->FlagEmbedding) (1.15.2)
|
||||
Requirement already satisfied: soupsieve>1.2 in /share/vllm-0.8.5/lib/python3.10/site-packages (from beautifulsoup4>=4.4.1->ir-datasets->FlagEmbedding) (2.7)
|
||||
Requirement already satisfied: aiohttp!=4.0.0a0,!=4.0.0a1 in /share/vllm-0.8.5/lib/python3.10/site-packages (from fsspec[http]<=2025.3.0,>=2023.1.0->datasets>=2.19.0->FlagEmbedding) (3.10.10)
|
||||
Requirement already satisfied: idna<4,>=2.5 in /share/vllm-0.8.5/lib/python3.10/site-packages (from requests>=2.32.2->datasets>=2.19.0->FlagEmbedding) (3.10)
|
||||
Requirement already satisfied: charset-normalizer<4,>=2 in /share/vllm-0.8.5/lib/python3.10/site-packages (from requests>=2.32.2->datasets>=2.19.0->FlagEmbedding) (3.4.1)
|
||||
Requirement already satisfied: urllib3<3,>=1.21.1 in /share/vllm-0.8.5/lib/python3.10/site-packages (from requests>=2.32.2->datasets>=2.19.0->FlagEmbedding) (2.4.0)
|
||||
Requirement already satisfied: certifi>=2017.4.17 in /share/vllm-0.8.5/lib/python3.10/site-packages (from requests>=2.32.2->datasets>=2.19.0->FlagEmbedding) (2025.4.26)
|
||||
Requirement already satisfied: cbor>=1.0.0 in /share/vllm-0.8.5/lib/python3.10/site-packages (from trec-car-tools>=2.5.4->ir-datasets->FlagEmbedding) (1.0.0)
|
||||
Requirement already satisfied: MarkupSafe>=2.0 in /share/vllm-0.8.5/lib/python3.10/site-packages (from jinja2->torch>=1.6.0->FlagEmbedding) (3.0.2)
|
||||
Requirement already satisfied: python-dateutil>=2.8.2 in /share/vllm-0.8.5/lib/python3.10/site-packages (from pandas->datasets>=2.19.0->FlagEmbedding) (2.9.0.post0)
|
||||
Requirement already satisfied: tzdata>=2022.7 in /share/vllm-0.8.5/lib/python3.10/site-packages (from pandas->datasets>=2.19.0->FlagEmbedding) (2025.2)
|
||||
Requirement already satisfied: pytz>=2020.1 in /share/vllm-0.8.5/lib/python3.10/site-packages (from pandas->datasets>=2.19.0->FlagEmbedding) (2025.2)
|
||||
Requirement already satisfied: threadpoolctl>=3.1.0 in /share/vllm-0.8.5/lib/python3.10/site-packages (from scikit-learn->sentence_transformers->FlagEmbedding) (3.6.0)
|
||||
Requirement already satisfied: joblib>=1.2.0 in /share/vllm-0.8.5/lib/python3.10/site-packages (from scikit-learn->sentence_transformers->FlagEmbedding) (1.5.1)
|
||||
Requirement already satisfied: async-timeout<5.0,>=4.0 in /share/vllm-0.8.5/lib/python3.10/site-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.3.0,>=2023.1.0->datasets>=2.19.0->FlagEmbedding) (4.0.3)
|
||||
Requirement already satisfied: aiosignal>=1.1.2 in /share/vllm-0.8.5/lib/python3.10/site-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.3.0,>=2023.1.0->datasets>=2.19.0->FlagEmbedding) (1.3.2)
|
||||
Requirement already satisfied: yarl<2.0,>=1.12.0 in /share/vllm-0.8.5/lib/python3.10/site-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.3.0,>=2023.1.0->datasets>=2.19.0->FlagEmbedding) (1.20.0)
|
||||
Requirement already satisfied: multidict<7.0,>=4.5 in /share/vllm-0.8.5/lib/python3.10/site-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.3.0,>=2023.1.0->datasets>=2.19.0->FlagEmbedding) (6.4.3)
|
||||
Requirement already satisfied: aiohappyeyeballs>=2.3.0 in /share/vllm-0.8.5/lib/python3.10/site-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.3.0,>=2023.1.0->datasets>=2.19.0->FlagEmbedding) (2.6.1)
|
||||
Requirement already satisfied: attrs>=17.3.0 in /share/vllm-0.8.5/lib/python3.10/site-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.3.0,>=2023.1.0->datasets>=2.19.0->FlagEmbedding) (25.3.0)
|
||||
Requirement already satisfied: frozenlist>=1.1.1 in /share/vllm-0.8.5/lib/python3.10/site-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.3.0,>=2023.1.0->datasets>=2.19.0->FlagEmbedding) (1.6.0)
|
||||
Requirement already satisfied: six>=1.5 in /share/vllm-0.8.5/lib/python3.10/site-packages (from python-dateutil>=2.8.2->pandas->datasets>=2.19.0->FlagEmbedding) (1.17.0)
|
||||
Requirement already satisfied: propcache>=0.2.1 in /share/vllm-0.8.5/lib/python3.10/site-packages (from yarl<2.0,>=1.12.0->aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.3.0,>=2023.1.0->datasets>=2.19.0->FlagEmbedding) (0.3.1)
|
47
test/bgererank/conf/config.json
Normal file
47
test/bgererank/conf/config.json
Normal file
@ -0,0 +1,47 @@
|
||||
{
|
||||
"filesroot": "$[workdir]$/files",
|
||||
"logger": {
|
||||
"name": "llmengine",
|
||||
"levelname": "info",
|
||||
"logfile": "$[workdir]$/logs/llmengine.log"
|
||||
},
|
||||
"website": {
|
||||
"paths": [
|
||||
["$[workdir]$/wwwroot", ""]
|
||||
],
|
||||
"client_max_size": 10000,
|
||||
"host": "0.0.0.0",
|
||||
"port": 8887,
|
||||
"coding": "utf-8",
|
||||
"indexes": [
|
||||
"index.html",
|
||||
"index.ui"
|
||||
],
|
||||
"startswiths": [
|
||||
{
|
||||
"leading": "/v1/bgererank",
|
||||
"registerfunction": "bgererank"
|
||||
},
|
||||
{
|
||||
"leading": "/v1/docs",
|
||||
"registerfunction": "docs"
|
||||
}
|
||||
],
|
||||
"processors": [
|
||||
[".tmpl", "tmpl"],
|
||||
[".app", "app"],
|
||||
[".ui", "bui"],
|
||||
[".dspy", "dspy"],
|
||||
[".md", "md"]
|
||||
],
|
||||
"rsakey_oops": {
|
||||
"privatekey": "$[workdir]$/conf/rsa_private_key.pem",
|
||||
"publickey": "$[workdir]$/conf/rsa_public_key.pem"
|
||||
},
|
||||
"session_max_time": 3000,
|
||||
"session_issue_time": 2500,
|
||||
"session_redis_notuse": {
|
||||
"url": "redis://127.0.0.1:6379"
|
||||
}
|
||||
}
|
||||
}
|
14
test/bgererank/entities.service
Normal file
14
test/bgererank/entities.service
Normal file
@ -0,0 +1,14 @@
|
||||
[Unit]
|
||||
Wants=systemd-networkd.service
|
||||
|
||||
[Service]
|
||||
WorkingDirectory=/share/run/entities
|
||||
ExecStart=/share/run/entities/start.sh
|
||||
ExecStop=/share/run/entities/stop.sh
|
||||
StandardOutput=append:/var/log/entities/entities.log
|
||||
StandardError=append:/var/log/entities/entities.log
|
||||
SyslogIdentifier=entities
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
0
test/bgererank/logs/llmengine.log
Normal file
0
test/bgererank/logs/llmengine.log
Normal file
3
test/bgererank/start.sh
Executable file
3
test/bgererank/start.sh
Executable file
@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
/share/vllm-0.8.5/bin/python -m llmengine.bgererank -p 8887 /share/models/BAAI/bge-reranker-v2-m3
|
10
test/bgererank/stop.sh
Normal file
10
test/bgererank/stop.sh
Normal file
@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
# 查找并终止运行在端口 8887 上的进程
|
||||
pid=$(lsof -t -i:8887)
|
||||
if [ -n "$pid" ]; then
|
||||
echo "终止进程: $pid"
|
||||
kill -9 $pid
|
||||
else
|
||||
echo "未找到运行在端口 8887 上的进程"
|
||||
fi
|
Loading…
Reference in New Issue
Block a user