数据库的服务化:llmengine/base_connection.py llmengine/connection.py llmengine/kgc.py test/connection/ llmengine/milvus_connection.py

This commit is contained in:
wangmeihua 2025-06-27 17:52:30 +08:00
parent 3def9dc17e
commit 279c8a5eee
21 changed files with 2044 additions and 0 deletions

View File

@ -0,0 +1,29 @@
from abc import ABC, abstractmethod
from typing import Dict
import logging
logger = logging.getLogger(__name__)
connection_pathMap = {}
def connection_register(connection_key, Klass):
"""为给定的连接键注册一个连接类"""
global connection_pathMap
connection_pathMap[connection_key] = Klass
logger.info(f"Registered {connection_key} with class {Klass}")
def get_connection_class(connection_path):
"""根据连接路径查找对应的连接类"""
global connection_pathMap
logger.debug(f"connection_pathMap: {connection_pathMap}")
klass = connection_pathMap.get(connection_path)
if klass is None:
logger.error(f"{connection_path} has not mapping to a connection class")
raise Exception(f"{connection_path} has not mapping to a connection class")
return klass
class BaseConnection(ABC):
@abstractmethod
async def handle_connection(self, action: str, params: Dict = None) -> Dict:
"""处理数据库操作,根据 action 执行创建集合等"""
pass

316
llmengine/connection.py Normal file
View File

@ -0,0 +1,316 @@
import milvus_connection
from traceback import format_exc
import argparse
import logging
from aiohttp import web
from llmengine.base_connection import get_connection_class
from appPublic.registerfunction import RegisterFunction
from appPublic.log import debug, exception
from ahserver.serverenv import ServerEnv
from ahserver.webapp import webserver
import os
import json
logger = logging.getLogger(__name__)
helptext = """Milvus Connection Service API (using pymilvus Collection API):
1. Create Collection Endpoint:
path: /v1/createcollection
method: POST
headers: {"Content-Type": "application/json"}
data: {
"db_type": "textdb"
}
response:
- Success: HTTP 200, {"status": "success", "collection_name": "ragdb_textdb", "message": "集合 ragdb_textdb 创建成功"}
- Error: HTTP 400, {"status": "error", "collection_name": "ragdb_textdb", "message": "<error message>"}
2. Delete Collection Endpoint:
path: /v1/deletecollection
method: POST
headers: {"Content-Type": "application/json"}
data: {
"db_type": "textdb"
}
response:
- Success: HTTP 200, {"status": "success", "collection_name": "ragdb_textdb", "message": "集合 ragdb_textdb 删除成功"}
- Error: HTTP 400, {"status": "error", "collection_name": "ragdb_textdb", "message": "<error message>"}
3. Insert File Endpoint:
path: /v1/insertfile
method: POST
headers: {"Content-Type": "application/json"}
data: {
"file_path": "/path/to/file.txt",
"userid": "user1",
"db_type": "textdb"
}
response:
- Success: HTTP 200, {"status": "success", "document_id": "<uuid>", "collection_name": "ragdb_textdb", "message": "文件 /path/to/file.txt 成功嵌入并处理三元组"}
- Error: HTTP 400, {"status": "error", "document_id": "<uuid>", "collection_name": "ragdb_textdb", "message": "<error message>"}
4. Delete File Endpoint:
path: /v1/deletefile
method: POST
headers: {"Content-Type": "application/json"}
data: {
"db_type": "textdb",
"userid": "user1",
"filename": "test.txt"
}
response:
- Success: HTTP 200, {"status": "success", "collection_name": "ragdb_textdb", "message": "成功删除 X 条记录userid=user1, filename=test.txt"}
- Error: HTTP 400, {"status": "error", "collection_name": "ragdb_textdb", "message": "<error message>"}
5. Fused Search Query Endpoint:
path: /v1/fusedsearchquery
method: POST
headers: {"Content-Type": "application/json"}
data: {
"query": "苹果公司在北京开设新店",
"userid": "user1",
"db_type": "textdb",
"file_paths": ["/path/to/file.txt"],
"limit": 5,
"offset": 0,
"use_rerank": true
}
response:
- Success: HTTP 200, [
{
"text": "<完整文本内容>",
"distance": 0.95,
"source": "fused_triplets_X|fused_query",
"rerank_score": 0.92, // use_rerank=true
"metadata": {
"userid": "user1",
"document_id": "<uuid>",
"filename": "test.txt",
"file_path": "/path/to/file.txt",
"upload_time": "2025-06-27T15:58:00",
"file_type": "txt"
}
},
...
]
- Error: HTTP 400, {"status": "error", "message": "<error message>"}
6. Connection Endpoint (for compatibility):
path: /v1/connection
method: POST
headers: {"Content-Type": "application/json"}
data: {
"action": "<initialize|get_params|create_collection|delete_collection|insert_document|delete_document|fused_search>",
"params": {...}
}
response:
- Success: HTTP 200, {"status": "success", ...}
- Error: HTTP 400, {"status": "error", "message": "<error message>"}
7. Docs Endpoint:
path: /v1/docs
method: GET
response: This help text
"""
def init():
rf = RegisterFunction()
rf.register('createcollection', create_collection)
rf.register('deletecollection', delete_collection)
rf.register('insertfile', insert_file)
rf.register('deletefile', delete_file)
rf.register('fusedsearchquery', fused_search_query)
rf.register('connection', handle_connection)
rf.register('docs', docs)
async def docs(request, params_kw, *params, **kw):
return web.Response(text=helptext, content_type='text/plain')
async def not_implemented(request, params_kw, *params, **kw):
return web.json_response({
"status": "error",
"message": "功能尚未实现"
}, dumps=lambda obj: json.dumps(obj, ensure_ascii=False), status=501)
async def create_collection(request, params_kw, *params, **kw):
debug(f'{params_kw=}')
se = ServerEnv()
engine = se.engine
db_type = params_kw.get('db_type')
if db_type is None:
debug(f'db_type 未提供')
return web.json_response({
"status": "error",
"message": "db_type 参数未提供"
}, dumps=lambda obj: json.dumps(obj, ensure_ascii=False), status=400)
try:
result = await engine.handle_connection("create_collection", {"db_type": db_type})
debug(f'{result=}')
return web.json_response(result, dumps=lambda obj: json.dumps(obj, ensure_ascii=False))
except Exception as e:
debug(f'创建集合失败: {str(e)}')
return web.json_response({
"status": "error",
"collection_name": f"ragdb_{db_type}",
"message": str(e)
}, dumps=lambda obj: json.dumps(obj, ensure_ascii=False), status=400)
async def delete_collection(request, params_kw, *params, **kw):
debug(f'{params_kw=}')
se = ServerEnv()
engine = se.engine
db_type = params_kw.get('db_type')
if db_type is None:
debug(f'db_type 未提供')
return web.json_response({
"status": "error",
"message": "db_type 参数未提供"
}, dumps=lambda obj: json.dumps(obj, ensure_ascii=False), status=400)
try:
result = await engine.handle_connection("delete_collection", {"db_type": db_type})
debug(f'{result=}')
return web.json_response(result, dumps=lambda obj: json.dumps(obj, ensure_ascii=False))
except Exception as e:
debug(f'删除集合失败: {str(e)}')
return web.json_response({
"status": "error",
"collection_name": f"ragdb_{db_type}",
"message": str(e)
}, dumps=lambda obj: json.dumps(obj, ensure_ascii=False), status=400)
async def insert_file(request, params_kw, *params, **kw):
debug(f'{params_kw=}')
se = ServerEnv()
engine = se.engine
file_path = params_kw.get('file_path')
userid = params_kw.get('userid')
db_type = params_kw.get('db_type')
if not all([file_path, userid, db_type]):
debug(f'file_path, userid 或 db_type 未提供')
return web.json_response({
"status": "error",
"message": "file_path, userid 或 db_type 未提供"
}, dumps=lambda obj: json.dumps(obj, ensure_ascii=False), status=400)
try:
result = await engine.handle_connection("insert_document", {
"file_path": file_path,
"userid": userid,
"db_type": db_type
})
debug(f'{result=}')
return web.json_response(result, dumps=lambda obj: json.dumps(obj, ensure_ascii=False))
except Exception as e:
debug(f'插入文件失败: {str(e)}')
return web.json_response({
"status": "error",
"document_id": "",
"collection_name": f"ragdb_{db_type}",
"message": str(e)
}, dumps=lambda obj: json.dumps(obj, ensure_ascii=False), status=400)
async def delete_file(request, params_kw, *params, **kw):
debug(f'{params_kw=}')
se = ServerEnv()
engine = se.engine
db_type = params_kw.get('db_type')
userid = params_kw.get('userid')
filename = params_kw.get('filename')
if not all([db_type, userid, filename]):
debug(f'db_type, userid 或 filename 未提供')
return web.json_response({
"status": "error",
"message": "db_type, userid 或 filename 未提供"
}, dumps=lambda obj: json.dumps(obj, ensure_ascii=False), status=400)
try:
result = await engine.handle_connection("delete_document", {
"db_type": db_type,
"userid": userid,
"filename": filename
})
debug(f'{result=}')
return web.json_response(result, dumps=lambda obj: json.dumps(obj, ensure_ascii=False))
except Exception as e:
debug(f'删除文件失败: {str(e)}')
return web.json_response({
"status": "error",
"collection_name": f"ragdb_{db_type}",
"message": str(e)
}, dumps=lambda obj: json.dumps(obj, ensure_ascii=False), status=400)
async def fused_search_query(request, params_kw, *params, **kw):
debug(f'{params_kw=}')
se = ServerEnv()
engine = se.engine
query = params_kw.get('query')
userid = params_kw.get('userid')
db_type = params_kw.get('db_type')
file_paths = params_kw.get('file_paths')
limit = params_kw.get('limit', 5)
offset = params_kw.get('offset', 0)
use_rerank = params_kw.get('use_rerank', True)
if not all([query, userid, db_type, file_paths]):
debug(f'query, userid, db_type 或 file_paths 未提供')
return web.json_response({
"status": "error",
"message": "query, userid, db_type 或 file_paths 未提供"
}, dumps=lambda obj: json.dumps(obj, ensure_ascii=False), status=400)
try:
result = await engine.handle_connection("fused_search", {
"query": query,
"userid": userid,
"db_type": db_type,
"file_paths": file_paths,
"limit": limit,
"offset": offset,
"use_rerank": use_rerank
})
debug(f'{result=}')
return web.json_response(result, dumps=lambda obj: json.dumps(obj, ensure_ascii=False))
except Exception as e:
debug(f'融合搜索失败: {str(e)}')
return web.json_response({
"status": "error",
"message": str(e)
}, dumps=lambda obj: json.dumps(obj, ensure_ascii=False), status=400)
async def handle_connection(request, params_kw, *params, **kw):
debug(f'{params_kw=}')
se = ServerEnv()
engine = se.engine
try:
data = await request.json()
action = data.get('action')
if not action:
debug(f'action 未提供')
return web.json_response({
"status": "error",
"message": "action 参数未提供"
}, dumps=lambda obj: json.dumps(obj, ensure_ascii=False), status=400)
result = await engine.handle_connection(action, data.get('params', {}))
debug(f'{result=}')
return web.json_response(result, dumps=lambda obj: json.dumps(obj, ensure_ascii=False))
except Exception as e:
debug(f'处理连接操作失败: {str(e)}')
return web.json_response({
"status": "error",
"message": str(e)
}, dumps=lambda obj: json.dumps(obj, ensure_ascii=False), status=400)
def main():
parser = argparse.ArgumentParser(prog="Milvus Connection Service")
parser.add_argument('-w', '--workdir')
parser.add_argument('-p', '--port', default='8888')
parser.add_argument('connection_path')
args = parser.parse_args()
logger.debug(f"Arguments: {args}")
Klass = get_connection_class(args.connection_path)
se = ServerEnv()
se.engine = Klass()
workdir = args.workdir or os.getcwd()
port = args.port
debug(f'{args=}')
webserver(init, workdir, port)
if __name__ == '__main__':
main()

174
llmengine/kgc.py Normal file
View File

@ -0,0 +1,174 @@
import logging
import os
import re
from py2neo import Graph, Node, Relationship
from typing import Set, List, Dict, Tuple
# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class KnowledgeGraph:
def __init__(self, triples: List[Dict], document_id: str):
self.triples = triples
self.document_id = document_id
self.g = Graph("bolt://10.18.34.18:7687", auth=('neo4j', '261229..wmh'))
logger.info(f"开始构建知识图谱document_id: {self.document_id}, 三元组数量: {len(triples)}")
def _normalize_label(self, entity_type: str) -> str:
"""规范化实体类型为 Neo4j 标签"""
if not entity_type or not entity_type.strip():
return 'Entity'
entity_type = re.sub(r'[^\w\s]', '', entity_type.strip())
words = entity_type.split()
label = '_'.join(word.capitalize() for word in words if word)
return label or 'Entity'
def _clean_relation(self, relation: str) -> Tuple[str, str]:
"""清洗关系,返回 (rel_type, rel_name)"""
relation = relation.strip()
if not relation:
return 'RELATED_TO', '相关'
if relation.startswith('<') and relation.endswith('>'):
cleaned_relation = relation[1:-1]
rel_name = cleaned_relation
rel_type = re.sub(r'[^\w\s]', '', cleaned_relation).replace(' ', '_').upper()
else:
rel_name = relation
rel_type = re.sub(r'[^\w\s]', '', relation).replace(' ', '_').upper()
if 'instance of' in relation.lower():
rel_type = 'INSTANCE_OF'
rel_name = '实例'
elif 'subclass of' in relation.lower():
rel_type = 'SUBCLASS_OF'
rel_name = '子类'
elif 'part of' in relation.lower():
rel_type = 'PART_OF'
rel_name = '部分'
logger.debug(f"处理关系: {relation} -> {rel_type} ({rel_name})")
return rel_type, rel_name
def read_nodes(self) -> Tuple[Dict[str, Set], Dict[str, List], List[Dict]]:
"""从三元组列表中读取节点和关系"""
nodes_by_label = {}
relations_by_type = {}
triples = []
try:
for triple in self.triples:
if not all(key in triple for key in ['head', 'head_type', 'type', 'tail', 'tail_type']):
logger.warning(f"无效三元组: {triple}")
continue
head, relation, tail, head_type, tail_type = (
triple['head'], triple['type'], triple['tail'], triple['head_type'], triple['tail_type']
)
head_label = self._normalize_label(head_type)
tail_label = self._normalize_label(tail_type)
logger.debug(f"实体类型: {head_type} -> {head_label}, {tail_type} -> {tail_label}")
if head_label not in nodes_by_label:
nodes_by_label[head_label] = set()
if tail_label not in nodes_by_label:
nodes_by_label[tail_label] = set()
nodes_by_label[head_label].add(head)
nodes_by_label[tail_label].add(tail)
rel_type, rel_name = self._clean_relation(relation)
if rel_type not in relations_by_type:
relations_by_type[rel_type] = []
relations_by_type[rel_type].append({
'head': head,
'tail': tail,
'head_label': head_label,
'tail_label': tail_label,
'rel_name': rel_name
})
triples.append({
'head': head,
'relation': relation,
'tail': tail,
'head_type': head_type,
'tail_type': tail_type
})
logger.info(f"读取节点: {sum(len(nodes) for nodes in nodes_by_label.values())}")
logger.info(f"读取关系: {sum(len(rels) for rels in relations_by_type.values())}")
return nodes_by_label, relations_by_type, triples
except Exception as e:
logger.error(f"读取三元组失败: {str(e)}")
raise RuntimeError(f"读取三元组失败: {str(e)}")
def create_node(self, label: str, nodes: Set[str]):
"""创建节点,包含 document_id 属性"""
count = 0
for node_name in nodes:
query = f"MATCH (n:{label} {{name: '{node_name}', document_id: '{self.document_id}'}}) RETURN n"
try:
if self.g.run(query).data():
continue
node = Node(label, name=node_name, document_id=self.document_id)
self.g.create(node)
count += 1
logger.debug(f"创建节点: {label} - {node_name} (document_id: {self.document_id})")
except Exception as e:
logger.error(f"创建节点失败: {label} - {node_name}, 错误: {str(e)}")
logger.info(f"创建 {label} 节点: {count}/{len(nodes)}")
return count
def create_relationship(self, rel_type: str, relations: List[Dict]):
"""创建关系"""
count = 0
total = len(relations)
seen_edges = set()
for rel in relations:
head, tail, head_label, tail_label, rel_name = (
rel['head'], rel['tail'], rel['head_label'], rel['tail_label'], rel['rel_name']
)
edge_key = f"{head_label}:{head}###{tail_label}:{tail}###{rel_type}"
if edge_key in seen_edges:
continue
seen_edges.add(edge_key)
query = (
f"MATCH (p:{head_label} {{name: '{head}', document_id: '{self.document_id}'}}), "
f"(q:{tail_label} {{name: '{tail}', document_id: '{self.document_id}'}}) "
f"CREATE (p)-[r:{rel_type} {{name: '{rel_name}'}}]->(q)"
)
try:
self.g.run(query)
count += 1
logger.debug(f"创建关系: {head} -[{rel_type}]-> {tail} (document_id: {self.document_id})")
except Exception as e:
logger.error(f"创建关系失败: {query}, 错误: {str(e)}")
logger.info(f"创建 {rel_type} 关系: {count}/{total}")
return count
def create_graphnodes(self):
"""创建所有节点"""
nodes_by_label, _, _ = self.read_nodes()
total = 0
for label, nodes in nodes_by_label.items():
total += self.create_node(label, nodes)
logger.info(f"总计创建节点: {total}")
return total
def create_graphrels(self):
"""创建所有关系"""
_, relations_by_type, _ = self.read_nodes()
total = 0
for rel_type, relations in relations_by_type.items():
total += self.create_relationship(rel_type, relations)
logger.info(f"总计创建关系: {total}")
return total
def export_data(self):
"""导出节点到文件,包含 document_id"""
nodes_by_label, _, _ = self.read_nodes()
os.makedirs('dict', exist_ok=True)
for label, nodes in nodes_by_label.items():
with open(f'dict/{label.lower()}.txt', 'w', encoding='utf-8') as f:
f.write('\n'.join(f"{name}\t{self.document_id}" for name in sorted(nodes)))
logger.info(f"导出 {label} 节点到 dict/{label.lower()}.txt: {len(nodes)}")
return

View File

@ -0,0 +1,917 @@
import os
import logging
import yaml
from pymilvus import connections, utility, Collection, CollectionSchema, FieldSchema, DataType
from threading import Lock
from llmengine.base_connection import connection_register
from typing import Dict, List
import aiohttp
from langchain_core.documents import Document
from langchain_text_splitters import RecursiveCharacterTextSplitter
import uuid
from datetime import datetime
from filetxt.loader import fileloader
from llmengine.kgc import KnowledgeGraph
import numpy as np
from py2neo import Graph
from scipy.spatial.distance import cosine
logger = logging.getLogger(__name__)
CONFIG_PATH = os.getenv('CONFIG_PATH', '/share/wangmeihua/rag/conf/milvusconfig.yaml')
try:
with open(CONFIG_PATH, 'r', encoding='utf-8') as f:
config = yaml.safe_load(f)
MILVUS_DB_PATH = config['database']['milvus_db_path']
NEO4J_URI = "bolt://10.18.34.18:7687"
NEO4J_USER = "neo4j"
NEO4J_PASSWORD = "261229..wmh"
except Exception as e:
logger.error(f"加载配置文件 {CONFIG_PATH} 失败: {str(e)}")
raise RuntimeError(f"无法加载配置文件: {str(e)}")
# 嵌入缓存
EMBED_CACHE = {}
class MilvusConnection:
_instance = None
_lock = Lock()
def __new__(cls):
with cls._lock:
if cls._instance is None:
cls._instance = super(MilvusConnection, cls).__new__(cls)
cls._instance._initialized = False
return cls._instance
def __init__(self):
if self._initialized:
return
self.db_path = MILVUS_DB_PATH
self.neo4j_uri = NEO4J_URI
self.neo4j_user = NEO4J_USER
self.neo4j_password = NEO4J_PASSWORD
self._initialize_connection()
self._initialized = True
logger.info(f"MilvusConnection initialized with db_path: {self.db_path}")
def _initialize_connection(self):
"""初始化 Milvus 连接,确保单一连接"""
try:
db_dir = os.path.dirname(self.db_path)
if not os.path.exists(db_dir):
os.makedirs(db_dir, exist_ok=True)
logger.debug(f"创建 Milvus 目录: {db_dir}")
if not os.access(db_dir, os.W_OK):
raise RuntimeError(f"Milvus 目录 {db_dir} 不可写")
if not connections.has_connection("default"):
connections.connect("default", uri=self.db_path)
logger.debug(f"已连接到 Milvus Lite路径: {self.db_path}")
else:
logger.debug("已存在 Milvus 连接,跳过重复连接")
except Exception as e:
logger.error(f"连接 Milvus 失败: {str(e)}")
raise RuntimeError(f"连接 Milvus 失败: {str(e)}")
async def handle_connection(self, action: str, params: Dict = None) -> Dict:
"""处理数据库操作"""
try:
if action == "initialize":
if not connections.has_connection("default"):
self._initialize_connection()
return {"status": "success", "message": f"Milvus 连接已初始化,路径: {self.db_path}"}
elif action == "get_params":
return {"status": "success", "params": {"uri": self.db_path}}
elif action == "create_collection":
if not params or "db_type" not in params:
return {"status": "error", "message": "缺少 db_type 参数"}
return self._create_collection(params["db_type"])
elif action == "delete_collection":
if not params or "db_type" not in params:
return {"status": "error", "message": "缺少 db_type 参数"}
return self._delete_collection(params["db_type"])
elif action == "insert_document":
if not params or "file_path" not in params or "userid" not in params or "db_type" not in params:
return {"status": "error", "message": "缺少 file_path, userid 或 db_type 参数"}
return await self._insert_document(
params["file_path"],
params["userid"],
params["db_type"]
)
elif action == "delete_document":
if not params or "db_type" not in params or "userid" not in params or "filename" not in params:
return {"status": "error", "message": "缺少 db_type, userid 或 filename 参数"}
return self._delete_document(
params["db_type"],
params["userid"],
params["filename"]
)
elif action == "fused_search":
if not params or "query" not in params or "userid" not in params or "db_type" not in params or "file_paths" not in params:
return {"status": "error", "message": "缺少 query, userid, db_type 或 file_paths 参数"}
return await self._fused_search(
params["query"],
params["userid"],
params["db_type"],
params["file_paths"],
params.get("limit", 5),
params.get("offset", 0),
params.get("use_rerank", True)
)
else:
return {"status": "error", "message": f"未知的 action: {action}"}
except Exception as e:
logger.error(f"处理操作失败: {str(e)}")
return {"status": "error", "message": str(e)}
def _create_collection(self, db_type: str) -> Dict:
"""创建 Milvus 集合"""
try:
if not db_type:
raise ValueError("db_type 不能为空")
if "_" in db_type:
raise ValueError("db_type 不能包含下划线")
if len(db_type) > 100:
raise ValueError("db_type 的长度应小于 100")
collection_name = f"ragdb_{db_type}"
if len(collection_name) > 255:
raise ValueError(f"集合名称 {collection_name} 超过 255 个字符")
logger.debug(f"集合名称: {collection_name}")
fields = [
FieldSchema(name="pk", dtype=DataType.VARCHAR, is_primary=True, max_length=36, auto_id=True),
FieldSchema(name="userid", dtype=DataType.VARCHAR, max_length=100),
FieldSchema(name="document_id", dtype=DataType.VARCHAR, max_length=36),
FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=65535),
FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=1024),
FieldSchema(name="filename", dtype=DataType.VARCHAR, max_length=255),
FieldSchema(name="file_path", dtype=DataType.VARCHAR, max_length=1024),
FieldSchema(name="upload_time", dtype=DataType.VARCHAR, max_length=64),
FieldSchema(name="file_type", dtype=DataType.VARCHAR, max_length=64),
]
schema = CollectionSchema(
fields=fields,
description=f"{db_type} 数据集合,跨用户使用,包含 document_id 和元数据字段",
auto_id=True,
primary_field="pk",
)
if utility.has_collection(collection_name):
try:
collection = Collection(collection_name)
existing_schema = collection.schema
expected_fields = {f.name for f in fields}
actual_fields = {f.name for f in existing_schema.fields}
vector_field = next((f for f in existing_schema.fields if f.name == "vector"), None)
schema_compatible = False
if expected_fields == actual_fields and vector_field is not None and vector_field.dtype == DataType.FLOAT_VECTOR:
dim = vector_field.params.get('dim', None) if hasattr(vector_field, 'params') and vector_field.params else None
schema_compatible = dim == 1024
logger.debug(f"检查集合 {collection_name} 的 schema: 字段匹配={expected_fields == actual_fields}, "
f"vector_field存在={vector_field is not None}, dtype={vector_field.dtype if vector_field else ''}, "
f"dim={dim if dim is not None else '未定义'}")
if not schema_compatible:
logger.warning(f"集合 {collection_name} 的 schema 不兼容,原因: "
f"字段不匹配: {expected_fields.symmetric_difference(actual_fields) or ''}, "
f"vector_field: {vector_field is not None}, "
f"dtype: {vector_field.dtype if vector_field else ''}, "
f"dim: {vector_field.params.get('dim', '未定义') if vector_field and hasattr(vector_field, 'params') and vector_field.params else '未定义'}")
utility.drop_collection(collection_name)
else:
collection.load()
logger.debug(f"集合 {collection_name} 已存在并加载成功")
return {
"status": "success",
"collection_name": collection_name,
"message": f"集合 {collection_name} 已存在"
}
except Exception as e:
logger.error(f"加载集合 {collection_name} 失败: {str(e)}")
return {
"status": "error",
"collection_name": collection_name,
"message": str(e)
}
try:
collection = Collection(collection_name, schema)
collection.create_index(
field_name="vector",
index_params={"index_type": "AUTOINDEX", "metric_type": "COSINE"}
)
for field in ["userid", "document_id", "filename", "file_path", "upload_time", "file_type"]:
collection.create_index(
field_name=field,
index_params={"index_type": "INVERTED"}
)
collection.load()
logger.debug(f"成功创建并加载集合: {collection_name}")
return {
"status": "success",
"collection_name": collection_name,
"message": f"集合 {collection_name} 创建成功"
}
except Exception as e:
logger.error(f"创建集合 {collection_name} 失败: {str(e)}")
return {
"status": "error",
"collection_name": collection_name,
"message": str(e)
}
except Exception as e:
logger.error(f"创建集合失败: {str(e)}")
return {
"status": "error",
"collection_name": collection_name,
"message": str(e)
}
def _delete_collection(self, db_type: str) -> Dict:
"""删除 Milvus 集合"""
try:
if not db_type:
raise ValueError("db_type 不能为空")
if "_" in db_type:
raise ValueError("db_type 不能包含下划线")
if len(db_type) > 100:
raise ValueError("db_type 的长度应小于 100")
collection_name = f"ragdb_{db_type}"
if len(collection_name) > 255:
raise ValueError(f"集合名称 {collection_name} 超过 255 个字符")
logger.debug(f"集合名称: {collection_name}")
if not utility.has_collection(collection_name):
logger.debug(f"集合 {collection_name} 不存在")
return {
"status": "success",
"collection_name": collection_name,
"message": f"集合 {collection_name} 不存在,无需删除"
}
try:
utility.drop_collection(collection_name)
logger.debug(f"成功删除集合: {collection_name}")
return {
"status": "success",
"collection_name": collection_name,
"message": f"集合 {collection_name} 删除成功"
}
except Exception as e:
logger.error(f"删除集合 {collection_name} 失败: {str(e)}")
return {
"status": "error",
"collection_name": collection_name,
"message": str(e)
}
except Exception as e:
logger.error(f"删除集合失败: {str(e)}")
return {
"status": "error",
"collection_name": collection_name,
"message": str(e)
}
async def _insert_document(self, file_path: str, userid: str, db_type: str) -> Dict:
"""将文档插入 Milvus 并抽取三元组到 Neo4j"""
document_id = str(uuid.uuid4())
collection_name = f"ragdb_{db_type}"
try:
if not userid or not db_type:
raise ValueError("userid 和 db_type 不能为空")
if "_" in userid or "_" in db_type:
raise ValueError("userid 和 db_type 不能包含下划线")
if not os.path.exists(file_path):
raise ValueError(f"文件 {file_path} 不存在")
if len(db_type) > 100:
raise ValueError("db_type 的长度应小于 100")
supported_formats = {'pdf', 'doc', 'docx', 'xlsx', 'xls', 'ppt', 'pptx', 'csv', 'txt'}
ext = file_path.rsplit('.', 1)[1].lower() if '.' in file_path else ''
if ext not in supported_formats:
raise ValueError(f"不支持的文件格式: {ext}, 支持的格式: {', '.join(supported_formats)}")
logger.info(f"生成 document_id: {document_id} for file: {file_path}")
logger.debug(f"加载文件: {file_path}")
text = fileloader(file_path)
if not text or not text.strip():
raise ValueError(f"文件 {file_path} 加载为空")
document = Document(page_content=text)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=2000,
chunk_overlap=200,
length_function=len,
)
logger.debug("开始分片文件内容")
chunks = text_splitter.split_documents([document])
if not chunks:
raise ValueError(f"文件 {file_path} 未生成任何文档块")
logger.debug(f"文件 {file_path} 分割为 {len(chunks)} 个文档块")
filename = os.path.basename(file_path).rsplit('.', 1)[0]
upload_time = datetime.now().isoformat()
documents = []
for i, chunk in enumerate(chunks):
chunk.metadata.update({
'userid': userid,
'document_id': document_id,
'filename': filename + '.' + ext,
'file_path': file_path,
'upload_time': upload_time,
'file_type': ext,
})
documents.append(chunk)
logger.debug(f"文档块 {i} 元数据: {chunk.metadata}")
logger.debug(f"确保集合 {collection_name} 存在")
create_result = self._create_collection(db_type)
if create_result["status"] == "error":
raise RuntimeError(f"集合创建失败: {create_result['message']}")
logger.debug("调用嵌入服务生成向量")
texts = [doc.page_content for doc in documents]
embeddings = await self._get_embeddings(texts)
await self._insert_to_milvus(collection_name, documents, embeddings)
logger.info(f"成功插入 {len(documents)} 个文档块到 {collection_name}")
logger.debug("调用三元组抽取服务")
try:
triples = await self._extract_triples(text)
if triples:
logger.debug(f"抽取到 {len(triples)} 个三元组,插入 Neo4j")
kg = KnowledgeGraph(triples=triples, document_id=document_id)
kg.create_graphnodes()
kg.create_graphrels()
kg.export_data()
logger.info(f"文件 {file_path} 三元组成功插入 Neo4j")
else:
logger.warning(f"文件 {file_path} 未抽取到三元组")
except Exception as e:
logger.warning(f"处理三元组失败: {str(e)}, 但不影响 Milvus 插入")
return {
"status": "success",
"document_id": document_id,
"collection_name": collection_name,
"message": f"文件 {file_path} 成功嵌入并处理三元组"
}
except Exception as e:
logger.error(f"插入文档失败: {str(e)}")
return {
"status": "error",
"document_id": document_id,
"collection_name": collection_name,
"message": str(e)
}
async def _get_embeddings(self, texts: List[str]) -> List[List[float]]:
"""调用嵌入服务获取文本的向量,带缓存"""
try:
# 检查缓存
uncached_texts = [text for text in texts if text not in EMBED_CACHE]
if uncached_texts:
async with aiohttp.ClientSession() as session:
async with session.post(
"http://localhost:9998/v1/embeddings",
headers={"Content-Type": "application/json"},
json={"input": uncached_texts}
) as response:
if response.status != 200:
logger.error(f"嵌入服务调用失败,状态码: {response.status}")
raise RuntimeError(f"嵌入服务调用失败: {response.status}")
result = await response.json()
if result.get("object") != "list" or not result.get("data"):
logger.error(f"嵌入服务响应格式错误: {result}")
raise RuntimeError("嵌入服务响应格式错误")
embeddings = [item["embedding"] for item in result["data"]]
for text, embedding in zip(uncached_texts, embeddings):
EMBED_CACHE[text] = np.array(embedding) / np.linalg.norm(embedding)
logger.debug(f"成功获取 {len(embeddings)} 个新嵌入向量,缓存大小: {len(EMBED_CACHE)}")
# 返回缓存中的嵌入
return [EMBED_CACHE[text] for text in texts]
except Exception as e:
logger.error(f"嵌入服务调用失败: {str(e)}")
raise RuntimeError(f"嵌入服务调用失败: {str(e)}")
async def _extract_triples(self, text: str) -> List[Dict]:
"""调用三元组抽取服务"""
try:
async with aiohttp.ClientSession() as session:
async with session.post(
"http://localhost:9991/v1/triples",
headers={"Content-Type": "application/json; charset=utf-8"},
json={"text": text}
) as response:
if response.status != 200:
logger.error(f"三元组抽取服务调用失败,状态码: {response.status}")
raise RuntimeError(f"三元组抽取服务调用失败: {response.status}")
result = await response.json()
if result.get("object") != "list" or not result.get("data"):
logger.error(f"三元组抽取服务响应格式错误: {result}")
raise RuntimeError("三元组抽取服务响应格式错误")
triples = result["data"]
logger.debug(f"成功抽取 {len(triples)} 个三元组")
return triples
except Exception as e:
logger.error(f"三元组抽取服务调用失败: {str(e)}")
raise RuntimeError(f"三元组抽取服务调用失败: {str(e)}")
async def _insert_to_milvus(self, collection_name: str, documents: List[Document], embeddings: List[List[float]]) -> None:
"""将文档和嵌入向量插入 Milvus 集合"""
try:
if not connections.has_connection("default"):
self._initialize_connection()
collection = Collection(collection_name)
collection.load()
data = {
"userid": [doc.metadata["userid"] for doc in documents],
"document_id": [doc.metadata["document_id"] for doc in documents],
"text": [doc.page_content for doc in documents],
"vector": embeddings,
"filename": [doc.metadata["filename"] for doc in documents],
"file_path": [doc.metadata["file_path"] for doc in documents],
"upload_time": [doc.metadata["upload_time"] for doc in documents],
"file_type": [doc.metadata["file_type"] for doc in documents],
}
collection.insert([data[field.name] for field in collection.schema.fields if field.name != "pk"])
collection.flush()
logger.debug(f"成功插入 {len(documents)} 个文档到集合 {collection_name}")
except Exception as e:
logger.error(f"插入 Milvus 失败: {str(e)}")
raise RuntimeError(f"插入 Milvus 失败: {str(e)}")
def _delete_document(self, db_type: str, userid: str, filename: str) -> Dict:
"""删除用户指定文件数据"""
collection_name = f"ragdb_{db_type}"
try:
if not db_type or "_" in db_type:
raise ValueError("db_type 不能为空且不能包含下划线")
if not userid or "_" in userid:
raise ValueError("userid 不能为空且不能包含下划线")
if not filename:
raise ValueError("filename 不能为空")
if len(db_type) > 100 or len(userid) > 100 or len(filename) > 255:
raise ValueError("db_type、userid 或 filename 的长度超出限制")
if not utility.has_collection(collection_name):
logger.warning(f"集合 {collection_name} 不存在")
return {
"status": "error",
"collection_name": collection_name,
"message": f"集合 {collection_name} 不存在"
}
try:
collection = Collection(collection_name)
collection.load()
logger.debug(f"加载集合: {collection_name}")
except Exception as e:
logger.error(f"加载集合 {collection_name} 失败: {str(e)}")
return {
"status": "error",
"collection_name": collection_name,
"message": f"加载集合失败: {str(e)}"
}
expr = f"userid == '{userid}' and filename == '{filename}'"
logger.debug(f"查询表达式: {expr}")
try:
results = collection.query(
expr=expr,
output_fields=["document_id"],
limit=1000
)
if not results:
logger.warning(f"没有找到 userid={userid}, filename={filename} 的记录")
return {
"status": "error",
"collection_name": collection_name,
"message": f"没有找到 userid={userid}, filename={filename} 的记录"
}
document_ids = list(set(result["document_id"] for result in results if "document_id" in result))
logger.debug(f"找到 {len(document_ids)} 个 document_id: {document_ids}")
except Exception as e:
logger.error(f"查询 document_id 失败: {str(e)}")
return {
"status": "error",
"collection_name": collection_name,
"message": f"查询失败: {str(e)}"
}
total_deleted = 0
for doc_id in document_ids:
try:
delete_expr = f"userid == '{userid}' and document_id == '{doc_id}'"
logger.debug(f"删除表达式: {delete_expr}")
delete_result = collection.delete(delete_expr)
deleted_count = delete_result.delete_count
total_deleted += deleted_count
logger.info(f"成功删除 document_id={doc_id}{deleted_count} 条记录")
except Exception as e:
logger.error(f"删除 document_id={doc_id} 失败: {str(e)}")
continue
if total_deleted == 0:
logger.warning(f"没有删除任何记录userid={userid}, filename={filename}")
return {
"status": "error",
"collection_name": collection_name,
"message": f"没有删除任何记录userid={userid}, filename={filename}"
}
logger.info(f"总计删除 {total_deleted} 条记录userid={userid}, filename={filename}")
return {
"status": "success",
"collection_name": collection_name,
"message": f"成功删除 {total_deleted} 条记录userid={userid}, filename={filename}"
}
except Exception as e:
logger.error(f"删除文档失败: {str(e)}")
return {
"status": "error",
"collection_name": collection_name,
"message": f"删除文档失败: {str(e)}"
}
async def _extract_entities(self, query: str) -> List[str]:
"""调用实体识别服务"""
try:
if not query:
raise ValueError("查询文本不能为空")
async with aiohttp.ClientSession() as session:
async with session.post(
"http://localhost:9990/v1/entities",
headers={"Content-Type": "application/json"},
json={"query": query}
) as response:
if response.status != 200:
logger.error(f"实体识别服务调用失败,状态码: {response.status}")
raise RuntimeError(f"实体识别服务调用失败: {response.status}")
result = await response.json()
if result.get("object") != "list" or not result.get("data"):
logger.error(f"实体识别服务响应格式错误: {result}")
raise RuntimeError("实体识别服务响应格式错误")
entities = result["data"]
unique_entities = list(dict.fromkeys(entities)) # 去重
logger.debug(f"成功提取 {len(unique_entities)} 个唯一实体: {unique_entities}")
return unique_entities
except Exception as e:
logger.error(f"实体识别服务调用失败: {str(e)}")
return []
async def _match_triplets(self, query: str, query_entities: List[str], userid: str, document_id: str) -> List[Dict]:
"""匹配查询实体与 Neo4j 中的三元组"""
matched_triplets = []
ENTITY_SIMILARITY_THRESHOLD = 0.8
try:
graph = Graph(self.neo4j_uri, auth=(self.neo4j_user, self.neo4j_password))
logger.debug(f"已连接到 Neo4j: {self.neo4j_uri}")
matched_names = set()
for entity in query_entities:
normalized_entity = entity.lower().strip()
query = """
MATCH (n {document_id: $document_id})
WHERE toLower(n.name) CONTAINS $entity
OR apoc.text.levenshteinSimilarity(toLower(n.name), $entity) > 0.7
RETURN n.name, apoc.text.levenshteinSimilarity(toLower(n.name), $entity) AS sim
ORDER BY sim DESC
LIMIT 100
"""
try:
results = graph.run(query, document_id=document_id, entity=normalized_entity).data()
for record in results:
matched_names.add(record['n.name'])
logger.debug(f"实体 {entity} 匹配节点: {record['n.name']} (Levenshtein 相似度: {record['sim']:.2f})")
except Exception as e:
logger.warning(f"模糊匹配实体 {entity} 失败: {str(e)}")
continue
triplets = []
if matched_names:
query = """
MATCH (h {document_id: $document_id})-[r]->(t {document_id: $document_id})
WHERE h.name IN $matched_names OR t.name IN $matched_names
RETURN h.name AS head, r.name AS type, t.name AS tail
LIMIT 100
"""
try:
results = graph.run(query, document_id=document_id, matched_names=list(matched_names)).data()
seen = set()
for record in results:
head, type_, tail = record['head'], record['type'], record['tail']
triplet_key = (head.lower(), type_.lower(), tail.lower())
if triplet_key not in seen:
seen.add(triplet_key)
triplets.append({
'head': head,
'type': type_,
'tail': tail,
'head_type': '',
'tail_type': ''
})
logger.debug(f"从 Neo4j 加载三元组: document_id={document_id}, 数量={len(triplets)}")
except Exception as e:
logger.error(f"检索三元组失败: document_id={document_id}, 错误: {str(e)}")
return []
if not triplets:
logger.debug(f"文档 document_id={document_id} 无匹配三元组")
return []
texts_to_embed = query_entities + [t['head'] for t in triplets] + [t['tail'] for t in triplets]
embeddings = await self._get_embeddings(texts_to_embed)
entity_vectors = {entity: embeddings[i] for i, entity in enumerate(query_entities)}
head_vectors = {t['head']: embeddings[len(query_entities) + i] for i, t in enumerate(triplets)}
tail_vectors = {t['tail']: embeddings[len(query_entities) + len(triplets) + i] for i, t in enumerate(triplets)}
logger.debug(f"成功获取 {len(embeddings)} 个嵌入向量({len(query_entities)} entities + {len(triplets)} heads + {len(triplets)} tails")
for entity in query_entities:
entity_vec = entity_vectors[entity]
for d_triplet in triplets:
d_head_vec = head_vectors[d_triplet['head']]
d_tail_vec = tail_vectors[d_triplet['tail']]
head_similarity = 1 - cosine(entity_vec, d_head_vec)
tail_similarity = 1 - cosine(entity_vec, d_tail_vec)
if head_similarity >= ENTITY_SIMILARITY_THRESHOLD or tail_similarity >= ENTITY_SIMILARITY_THRESHOLD:
matched_triplets.append(d_triplet)
logger.debug(f"匹配三元组: {d_triplet['head']} - {d_triplet['type']} - {d_triplet['tail']} "
f"(entity={entity}, head_sim={head_similarity:.2f}, tail_sim={tail_similarity:.2f})")
unique_matched = []
seen = set()
for t in matched_triplets:
identifier = (t['head'].lower(), t['type'].lower(), t['tail'].lower())
if identifier not in seen:
seen.add(identifier)
unique_matched.append(t)
logger.info(f"找到 {len(unique_matched)} 个匹配的三元组")
return unique_matched
except Exception as e:
logger.error(f"匹配三元组失败: {str(e)}")
return []
texts_to_embed = query_entities + [t['head'] for t in triplets] + [t['tail'] for t in triplets]
embeddings = await self._get_embeddings(texts_to_embed)
entity_vectors = {entity: embeddings[i] for i, entity in enumerate(query_entities)}
head_vectors = {t['head']: embeddings[len(query_entities) + i] for i, t in enumerate(triplets)}
tail_vectors = {t['tail']: embeddings[len(query_entities) + len(triplets) + i] for i, t in enumerate(triplets)}
logger.debug(f"成功获取 {len(embeddings)} 个嵌入向量({len(query_entities)} entities + {len(triplets)} heads + {len(triplets)} tails")
for entity in query_entities:
entity_vec = entity_vectors[entity]
for d_triplet in triplets:
d_head_vec = head_vectors[d_triplet['head']]
d_tail_vec = tail_vectors[d_triplet['tail']]
head_similarity = 1 - cosine(entity_vec, d_head_vec)
tail_similarity = 1 - cosine(entity_vec, d_tail_vec)
if head_similarity >= ENTITY_SIMILARITY_THRESHOLD or tail_similarity >= ENTITY_SIMILARITY_THRESHOLD:
matched_triplets.append(d_triplet)
logger.debug(f"匹配三元组: {d_triplet['head']} - {d_triplet['type']} - {d_triplet['tail']} "
f"(entity={entity}, head_sim={head_similarity:.2f}, tail_sim={tail_similarity:.2f})")
unique_matched = []
seen = set()
for t in matched_triplets:
identifier = (t['head'].lower(), t['type'].lower(), t['tail'].lower())
if identifier not in seen:
seen.add(identifier)
unique_matched.append(t)
logger.info(f"找到 {len(unique_matched)} 个匹配的三元组")
return unique_matched
except Exception as e:
logger.error(f"匹配三元组失败: {str(e)}")
return []
async def _rerank_results(self, query: str, results: List[Dict]) -> List[Dict]:
"""调用重排序服务"""
try:
documents = [result["text"] for result in results]
async with aiohttp.ClientSession() as session:
async with session.post(
"http://localhost:9997/v1/rerank",
headers={"Content-Type": "application/json"},
json={
"model": "rerank-001",
"query": query,
"documents": documents,
"top_n": len(documents)
}
) as response:
if response.status != 200:
logger.error(f"重排序服务调用失败,状态码: {response.status}")
raise RuntimeError(f"重排序服务调用失败: {response.status}")
result = await response.json()
if result.get("object") != "rerank.result" or not result.get("data"):
logger.error(f"重排序服务响应格式错误: {result}")
raise RuntimeError("重排序服务响应格式错误")
rerank_data = result["data"]
reranked_results = []
for item in rerank_data:
index = item["index"]
if index < len(results):
results[index]["rerank_score"] = item["relevance_score"]
reranked_results.append(results[index])
logger.debug(f"成功重排序 {len(reranked_results)} 条结果")
return reranked_results
except Exception as e:
logger.error(f"重排序服务调用失败: {str(e)}")
return results # 出错时返回原始结果
async def _fused_search(self, query: str, userid: str, db_type: str, file_paths: List[str], limit: int = 5, offset: int = 0, use_rerank: bool = True) -> List[Dict]:
"""融合搜索,基于三元组匹配和 Milvus 向量搜索"""
try:
logger.info(f"开始融合搜索: query={query}, userid={userid}, db_type={db_type}, use_rerank={use_rerank}")
collection_name = f"ragdb_{db_type}"
# 参数验证
if not query or not userid or not db_type or not file_paths:
raise ValueError("query、userid、db_type 和 file_paths 不能为空")
if "_" in userid or "_" in db_type:
raise ValueError("userid 和 db_type 不能包含下划线")
if len(db_type) > 100 or len(userid) > 100:
raise ValueError("db_type 或 userid 的长度超出限制")
if limit < 1 or offset < 0:
raise ValueError("limit 必须大于 0offset 必须大于或等于 0")
# 检查集合是否存在
if not utility.has_collection(collection_name):
logger.warning(f"集合 {collection_name} 不存在")
return []
# 加载集合
try:
collection = Collection(collection_name)
collection.load()
logger.debug(f"加载集合: {collection_name}")
except Exception as e:
logger.error(f"加载集合 {collection_name} 失败: {str(e)}")
return []
# 提取实体
query_entities = await self._extract_entities(query)
logger.debug(f"提取实体: {query_entities}")
# 收集 document_id 和三元组
documents = []
all_triplets = []
for file_path in file_paths:
filename = os.path.basename(file_path)
logger.debug(f"处理文件: {filename}")
# 获取 document_id
results = collection.query(
expr=f"userid == '{userid}' and filename == '{filename}'",
output_fields=["document_id", "filename"],
limit=1
)
if not results:
logger.warning(f"未找到 userid {userid} 和 filename {filename} 对应的文档")
continue
documents.append(results[0])
# 获取三元组
document_id = results[0]["document_id"]
matched_triplets = await self._match_triplets(query, query_entities, userid, document_id)
logger.debug(f"文件 {filename} 匹配三元组: {len(matched_triplets)}")
all_triplets.extend(matched_triplets)
if not documents:
logger.warning("未找到任何有效文档")
return []
logger.info(f"找到 {len(documents)} 个文档: {[doc['filename'] for doc in documents]}")
# Milvus 搜索
search_results = []
search_params = {"metric_type": "COSINE", "params": {"nprobe": 10}}
# 如果有三元组,基于三元组搜索
if all_triplets:
for triplet in all_triplets:
head = triplet.get('head', '')
type_ = triplet.get('type', '')
tail = triplet.get('tail', '')
if not head or not type_ or not tail:
logger.debug(f"无效三元组: {triplet}")
continue
triplet_text = f"{head} {type_} {tail}"
logger.debug(f"搜索三元组: {triplet_text}")
try:
embeddings = await self._get_embeddings([triplet_text])
query_vector = embeddings[0]
for doc in documents:
filename = doc["filename"]
expr = f"userid == '{userid}' and filename == '{filename}' and text like '%{head}%{tail}%'"
logger.debug(f"搜索表达式: {expr}")
results = collection.search(
data=[query_vector],
anns_field="vector",
param=search_params,
limit=limit,
expr=expr,
output_fields=["text", "userid", "document_id", "filename", "file_path", "upload_time", "file_type"],
offset=offset
)
for hits in results:
for hit in hits:
metadata = {
"userid": hit.entity.get("userid"),
"document_id": hit.entity.get("document_id"),
"filename": hit.entity.get("filename"),
"file_path": hit.entity.get("file_path"),
"upload_time": hit.entity.get("upload_time"),
"file_type": hit.entity.get("file_type")
}
result = {
"text": hit.entity.get("text"),
"distance": hit.distance,
"source": f"fused_triplets_{len(all_triplets)}",
"metadata": metadata
}
search_results.append(result)
logger.debug(f"命中: text={result['text'][:100]}..., distance={hit.distance}, filename={metadata['filename']}")
except Exception as e:
logger.warning(f"三元组 {triplet_text} 搜索失败: {str(e)}")
continue
else:
# 无三元组时,直接搜索查询
logger.debug("无匹配三元组,使用原始查询搜索")
embeddings = await self._get_embeddings([query])
query_vector = embeddings[0]
filenames = [os.path.basename(fp) for fp in file_paths]
expr = f"userid == '{userid}' and filename in {filenames}"
logger.debug(f"搜索表达式: {expr}")
results = collection.search(
data=[query_vector],
anns_field="vector",
param=search_params,
limit=limit,
expr=expr,
output_fields=["text", "userid", "document_id", "filename", "file_path", "upload_time", "file_type"],
offset=offset
)
for hits in results:
for hit in hits:
metadata = {
"userid": hit.entity.get("userid"),
"document_id": hit.entity.get("document_id"),
"filename": hit.entity.get("filename"),
"file_path": hit.entity.get("file_path"),
"upload_time": hit.entity.get("upload_time"),
"file_type": hit.entity.get("file_type")
}
result = {
"text": hit.entity.get("text"),
"distance": hit.distance,
"source": "fused_query",
"metadata": metadata
}
search_results.append(result)
logger.debug(f"命中: text={result['text'][:100]}..., distance={hit.distance}, filename={metadata['filename']}")
# 去重
unique_results = []
seen_texts = set()
for result in sorted(search_results, key=lambda x: x['distance'], reverse=True):
if result['text'] not in seen_texts:
unique_results.append(result)
seen_texts.add(result['text'])
logger.info(f"去重后结果数量: {len(unique_results)} (原始数量: {len(search_results)})")
# 重排序(可选)
if use_rerank and unique_results:
logger.debug("开始重排序")
unique_results = await self._rerank_results(query, unique_results)
unique_results = sorted(unique_results, key=lambda x: x.get('rerank_score', 0), reverse=True)
logger.debug(f"重排序分数分布: {[round(r.get('rerank_score', 0), 3) for r in unique_results]}")
else:
# 未启用重排序,确保不包含 rerank_score
unique_results = [{k: v for k, v in r.items() if k != 'rerank_score'} for r in unique_results]
return unique_results[:limit]
except Exception as e:
logger.error(f"融合搜索失败: {str(e)}")
return []
connection_register('Milvus', MilvusConnection)
logger.info("MilvusConnection registered")

View File

@ -0,0 +1,71 @@
{
"filesroot": "$[workdir]$/files",
"logger": {
"name": "llmengine",
"levelname": "info",
"logfile": "$[workdir]$/logs/llmengine.log"
},
"website": {
"paths": [
["$[workdir]$/wwwroot", ""]
],
"client_max_size": 10000,
"host": "0.0.0.0",
"port": 8888,
"coding": "utf-8",
"indexes": [
"index.html",
"index.ui"
],
"startswiths": [
{
"leading": "/idfile",
"registerfunction": "idfile"
},
{
"leading": "/v1/connection",
"registerfunction": "connection"
},
{
"leading": "/v1/createcollection",
"registerfunction": "createcollection"
},
{
"leading": "/v1/deletecollection",
"registerfunction": "deletecollection"
},
{
"leading": "/v1/insertfile",
"registerfunction": "insertfile"
},
{
"leading": "/v1/deletefile",
"registerfunction": "deletefile"
},
{
"leading": "/v1/fusedsearchquery",
"registerfunction": "fusedsearchquery"
},
{
"leading": "/docs",
"registerfunction": "docs"
}
],
"processors": [
[".tmpl", "tmpl"],
[".app", "app"],
[".ui", "bui"],
[".dspy", "dspy"],
[".md", "md"]
],
"rsakey_oops": {
"privatekey": "$[workdir]$/conf/rsa_private_key.pem",
"publickey": "$[workdir]$/conf/rsa_public_key.pem"
},
"session_max_time": 3000,
"session_issue_time": 2500,
"session_redis_notuse": {
"url": "redis://127.0.0.1:6379"
}
}
}

View File

@ -0,0 +1 @@
实体 611674ee-1d01-4f39-b0dc-bca896dce7cc

View File

@ -0,0 +1,209 @@
285 611674ee-1d01-4f39-b0dc-bca896dce7cc
498514 611674ee-1d01-4f39-b0dc-bca896dce7cc
Chinese knowledge graphs 611674ee-1d01-4f39-b0dc-bca896dce7cc
GY 611674ee-1d01-4f39-b0dc-bca896dce7cc
Joint Conf. on Artificial Intelligence 611674ee-1d01-4f39-b0dc-bca896dce7cc
KGE模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
Knowledge Graph Embedding Technology Research 611674ee-1d01-4f39-b0dc-bca896dce7cc
Personalized entity recommendation 611674ee-1d01-4f39-b0dc-bca896dce7cc
RJ. Relation embedding with dihedral group in knowledge graph 611674ee-1d01-4f39-b0dc-bca896dce7cc
TransD学 611674ee-1d01-4f39-b0dc-bca896dce7cc
TransE模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
ZH, Hovy E. An interpretable knowledge transfer model 611674ee-1d01-4f39-b0dc-bca896dce7cc
Zhu ZB 611674ee-1d01-4f39-b0dc-bca896dce7cc
architecture 611674ee-1d01-4f39-b0dc-bca896dce7cc
dimensionality reduction 611674ee-1d01-4f39-b0dc-bca896dce7cc
embedding 611674ee-1d01-4f39-b0dc-bca896dce7cc
embedding model of entities and relationships in knowledge bases 611674ee-1d01-4f39-b0dc-bca896dce7cc
embedding models for relation 611674ee-1d01-4f39-b0dc-bca896dce7cc
embeddings 611674ee-1d01-4f39-b0dc-bca896dce7cc
embeddings approach 611674ee-1d01-4f39-b0dc-bca896dce7cc
graph 611674ee-1d01-4f39-b0dc-bca896dce7cc
graph completion 611674ee-1d01-4f39-b0dc-bca896dce7cc
graph database 611674ee-1d01-4f39-b0dc-bca896dce7cc
graph embedding 611674ee-1d01-4f39-b0dc-bca896dce7cc
graph embedding based question answering 611674ee-1d01-4f39-b0dc-bca896dce7cc
graph embeddings 611674ee-1d01-4f39-b0dc-bca896dce7cc
graph knowledge 611674ee-1d01-4f39-b0dc-bca896dce7cc
graph link prediction 611674ee-1d01-4f39-b0dc-bca896dce7cc
graph network 611674ee-1d01-4f39-b0dc-bca896dce7cc
graph representation learning 611674ee-1d01-4f39-b0dc-bca896dce7cc
graph. 611674ee-1d01-4f39-b0dc-bca896dce7cc
graphs 611674ee-1d01-4f39-b0dc-bca896dce7cc
knowledge base completion 611674ee-1d01-4f39-b0dc-bca896dce7cc
∑ 611674ee-1d01-4f39-b0dc-bca896dce7cc
⊕c 611674ee-1d01-4f39-b0dc-bca896dce7cc
事实集合 611674ee-1d01-4f39-b0dc-bca896dce7cc
于戈 611674ee-1d01-4f39-b0dc-bca896dce7cc
交互嵌入 611674ee-1d01-4f39-b0dc-bca896dce7cc
人 611674ee-1d01-4f39-b0dc-bca896dce7cc
人工智能 611674ee-1d01-4f39-b0dc-bca896dce7cc
优惠推荐任务 611674ee-1d01-4f39-b0dc-bca896dce7cc
会士 611674ee-1d01-4f39-b0dc-bca896dce7cc
传统模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
似然 611674ee-1d01-4f39-b0dc-bca896dce7cc
信息与动态 KG 611674ee-1d01-4f39-b0dc-bca896dce7cc
偏差 611674ee-1d01-4f39-b0dc-bca896dce7cc
偏置向量传输多向语义 611674ee-1d01-4f39-b0dc-bca896dce7cc
元组 611674ee-1d01-4f39-b0dc-bca896dce7cc
元组关联的实体对 611674ee-1d01-4f39-b0dc-bca896dce7cc
元组学习知识 611674ee-1d01-4f39-b0dc-bca896dce7cc
全局损失函数 611674ee-1d01-4f39-b0dc-bca896dce7cc
关 611674ee-1d01-4f39-b0dc-bca896dce7cc
关系 611674ee-1d01-4f39-b0dc-bca896dce7cc
关系-尾实体对建模 611674ee-1d01-4f39-b0dc-bca896dce7cc
关系向量 611674ee-1d01-4f39-b0dc-bca896dce7cc
关系向量和时间数字 611674ee-1d01-4f39-b0dc-bca896dce7cc
关联的矩阵 611674ee-1d01-4f39-b0dc-bca896dce7cc
典 611674ee-1d01-4f39-b0dc-bca896dce7cc
动态KG 611674ee-1d01-4f39-b0dc-bca896dce7cc
动态知识图谱嵌入 611674ee-1d01-4f39-b0dc-bca896dce7cc
动态知识图谱嵌入的学习过程 611674ee-1d01-4f39-b0dc-bca896dce7cc
势超曲面 611674ee-1d01-4f39-b0dc-bca896dce7cc
单层神经网络模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
单词输入神经网络 611674ee-1d01-4f39-b0dc-bca896dce7cc
卷 611674ee-1d01-4f39-b0dc-bca896dce7cc
卷积层 611674ee-1d01-4f39-b0dc-bca896dce7cc
卷积提取特征 611674ee-1d01-4f39-b0dc-bca896dce7cc
卷积神经网络 611674ee-1d01-4f39-b0dc-bca896dce7cc
卷积神经网络模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
卷积过滤器 611674ee-1d01-4f39-b0dc-bca896dce7cc
双曲几何模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
双曲空间 611674ee-1d01-4f39-b0dc-bca896dce7cc
可感知时间间隔的动态知识图谱嵌入方法 611674ee-1d01-4f39-b0dc-bca896dce7cc
可扩展性 611674ee-1d01-4f39-b0dc-bca896dce7cc
可解释性 611674ee-1d01-4f39-b0dc-bca896dce7cc
向量化操作 611674ee-1d01-4f39-b0dc-bca896dce7cc
向量空间 611674ee-1d01-4f39-b0dc-bca896dce7cc
噪音和矛盾的问题 611674ee-1d01-4f39-b0dc-bca896dce7cc
图 7 611674ee-1d01-4f39-b0dc-bca896dce7cc
图谱嵌入 611674ee-1d01-4f39-b0dc-bca896dce7cc
型将关系和实体表示 611674ee-1d01-4f39-b0dc-bca896dce7cc
基于相似性匹配的评分函数 611674ee-1d01-4f39-b0dc-bca896dce7cc
基于知识图谱嵌入的问答 611674ee-1d01-4f39-b0dc-bca896dce7cc
基于知识图谱的问答 611674ee-1d01-4f39-b0dc-bca896dce7cc
基于距离的模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
复嵌入 611674ee-1d01-4f39-b0dc-bca896dce7cc
复数嵌入 611674ee-1d01-4f39-b0dc-bca896dce7cc
复杂关系 611674ee-1d01-4f39-b0dc-bca896dce7cc
复杂关系建模 611674ee-1d01-4f39-b0dc-bca896dce7cc
复杂语义关联 611674ee-1d01-4f39-b0dc-bca896dce7cc
多关系知识图 611674ee-1d01-4f39-b0dc-bca896dce7cc
多层感知机 611674ee-1d01-4f39-b0dc-bca896dce7cc
多步关系路径 611674ee-1d01-4f39-b0dc-bca896dce7cc
多源信息 611674ee-1d01-4f39-b0dc-bca896dce7cc
头实体嵌入 611674ee-1d01-4f39-b0dc-bca896dce7cc
孙相会 611674ee-1d01-4f39-b0dc-bca896dce7cc
定量分析 611674ee-1d01-4f39-b0dc-bca896dce7cc
实体 611674ee-1d01-4f39-b0dc-bca896dce7cc
实体与关系嵌入 611674ee-1d01-4f39-b0dc-bca896dce7cc
实体区分度 611674ee-1d01-4f39-b0dc-bca896dce7cc
实体名称歧义性 611674ee-1d01-4f39-b0dc-bca896dce7cc
实体嵌入向量服从正态分布 611674ee-1d01-4f39-b0dc-bca896dce7cc
实体推荐框架 611674ee-1d01-4f39-b0dc-bca896dce7cc
实体空间 r r Mrhi wrwti+(1,i=1,2,3) h3t1 h2 611674ee-1d01-4f39-b0dc-bca896dce7cc
实数字段 611674ee-1d01-4f39-b0dc-bca896dce7cc
对称关系 611674ee-1d01-4f39-b0dc-bca896dce7cc
嵌入三元组 611674ee-1d01-4f39-b0dc-bca896dce7cc
嵌入技术 611674ee-1d01-4f39-b0dc-bca896dce7cc
庞加莱球 611674ee-1d01-4f39-b0dc-bca896dce7cc
引文知识图 611674ee-1d01-4f39-b0dc-bca896dce7cc
张天成 611674ee-1d01-4f39-b0dc-bca896dce7cc
张量分解 611674ee-1d01-4f39-b0dc-bca896dce7cc
张量分量分解 611674ee-1d01-4f39-b0dc-bca896dce7cc
张量层数 611674ee-1d01-4f39-b0dc-bca896dce7cc
张量神经网络模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
影矩阵 611674ee-1d01-4f39-b0dc-bca896dce7cc
循环相关性 611674ee-1d01-4f39-b0dc-bca896dce7cc
态 611674ee-1d01-4f39-b0dc-bca896dce7cc
感知知识图谱嵌入方法 611674ee-1d01-4f39-b0dc-bca896dce7cc
扩展模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
投影向量 611674ee-1d01-4f39-b0dc-bca896dce7cc
投影矩阵 611674ee-1d01-4f39-b0dc-bca896dce7cc
投影矩阵堆叠 611674ee-1d01-4f39-b0dc-bca896dce7cc
挑战与展望 611674ee-1d01-4f39-b0dc-bca896dce7cc
旋转模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
旋转矩阵 611674ee-1d01-4f39-b0dc-bca896dce7cc
普通向量空间 611674ee-1d01-4f39-b0dc-bca896dce7cc
智能中的概率推理 611674ee-1d01-4f39-b0dc-bca896dce7cc
更新门 611674ee-1d01-4f39-b0dc-bca896dce7cc
树状结构 611674ee-1d01-4f39-b0dc-bca896dce7cc
模 型 611674ee-1d01-4f39-b0dc-bca896dce7cc
欧几里德范数 611674ee-1d01-4f39-b0dc-bca896dce7cc
欧拉公式 611674ee-1d01-4f39-b0dc-bca896dce7cc
欧拉角 611674ee-1d01-4f39-b0dc-bca896dce7cc
正则化项 611674ee-1d01-4f39-b0dc-bca896dce7cc
流形 611674ee-1d01-4f39-b0dc-bca896dce7cc
滤波器器 611674ee-1d01-4f39-b0dc-bca896dce7cc
田雪 611674ee-1d01-4f39-b0dc-bca896dce7cc
的知识图谱嵌入 611674ee-1d01-4f39-b0dc-bca896dce7cc
相似性评分函数 611674ee-1d01-4f39-b0dc-bca896dce7cc
相关调查 611674ee-1d01-4f39-b0dc-bca896dce7cc
知识图谱 611674ee-1d01-4f39-b0dc-bca896dce7cc
知识图谱嵌入 611674ee-1d01-4f39-b0dc-bca896dce7cc
知识图谱嵌入技术 611674ee-1d01-4f39-b0dc-bca896dce7cc
知识图谱嵌入的应用 611674ee-1d01-4f39-b0dc-bca896dce7cc
知识类型 611674ee-1d01-4f39-b0dc-bca896dce7cc
矩阵分解 611674ee-1d01-4f39-b0dc-bca896dce7cc
矩阵的第ij项 2 611674ee-1d01-4f39-b0dc-bca896dce7cc
神经关系提取框架 611674ee-1d01-4f39-b0dc-bca896dce7cc
神经网络 611674ee-1d01-4f39-b0dc-bca896dce7cc
神经网络模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
种基本符号 611674ee-1d01-4f39-b0dc-bca896dce7cc
种被广泛采用的知识表示方法 611674ee-1d01-4f39-b0dc-bca896dce7cc
等 611674ee-1d01-4f39-b0dc-bca896dce7cc
简单问题 611674ee-1d01-4f39-b0dc-bca896dce7cc
类 611674ee-1d01-4f39-b0dc-bca896dce7cc
类 型的关系 611674ee-1d01-4f39-b0dc-bca896dce7cc
类别 611674ee-1d01-4f39-b0dc-bca896dce7cc
类比结构 611674ee-1d01-4f39-b0dc-bca896dce7cc
级联 611674ee-1d01-4f39-b0dc-bca896dce7cc
线性方式 611674ee-1d01-4f39-b0dc-bca896dce7cc
线性模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
组 611674ee-1d01-4f39-b0dc-bca896dce7cc
结构信息 611674ee-1d01-4f39-b0dc-bca896dce7cc
结构化 611674ee-1d01-4f39-b0dc-bca896dce7cc
结构化信息 611674ee-1d01-4f39-b0dc-bca896dce7cc
结构化信息的知识表示模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
统一框架 611674ee-1d01-4f39-b0dc-bca896dce7cc
统计关系学习 611674ee-1d01-4f39-b0dc-bca896dce7cc
美国总统 611674ee-1d01-4f39-b0dc-bca896dce7cc
翻译原理 611674ee-1d01-4f39-b0dc-bca896dce7cc
翻译模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
能量函数 611674ee-1d01-4f39-b0dc-bca896dce7cc
自然语言处理 611674ee-1d01-4f39-b0dc-bca896dce7cc
融合多源信息的知识图谱嵌入 611674ee-1d01-4f39-b0dc-bca896dce7cc
融合实体描述的知识表示模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
表示学习模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
认知智能 611674ee-1d01-4f39-b0dc-bca896dce7cc
训练语料库 611674ee-1d01-4f39-b0dc-bca896dce7cc
评分函数 611674ee-1d01-4f39-b0dc-bca896dce7cc
识图谱嵌入的方法 611674ee-1d01-4f39-b0dc-bca896dce7cc
识的语义表示 611674ee-1d01-4f39-b0dc-bca896dce7cc
词向量 611674ee-1d01-4f39-b0dc-bca896dce7cc
语义匹配模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
调查 611674ee-1d01-4f39-b0dc-bca896dce7cc
谱表示 611674ee-1d01-4f39-b0dc-bca896dce7cc
超平 611674ee-1d01-4f39-b0dc-bca896dce7cc
超链接 611674ee-1d01-4f39-b0dc-bca896dce7cc
距离函数 611674ee-1d01-4f39-b0dc-bca896dce7cc
软件学报 2023年 第 34卷 第 1期 611674ee-1d01-4f39-b0dc-bca896dce7cc
软件学报 2023年第 34卷 611674ee-1d01-4f39-b0dc-bca896dce7cc
软件学报 2023年第 34卷第 1期 611674ee-1d01-4f39-b0dc-bca896dce7cc
远程监督 611674ee-1d01-4f39-b0dc-bca896dce7cc
连接 611674ee-1d01-4f39-b0dc-bca896dce7cc
连接头实体 611674ee-1d01-4f39-b0dc-bca896dce7cc
链接 611674ee-1d01-4f39-b0dc-bca896dce7cc
阵W和尾实体 611674ee-1d01-4f39-b0dc-bca896dce7cc
阶Horn子句 611674ee-1d01-4f39-b0dc-bca896dce7cc
隐藏层 611674ee-1d01-4f39-b0dc-bca896dce7cc
集候选实体 611674ee-1d01-4f39-b0dc-bca896dce7cc
静态子KG 611674ee-1d01-4f39-b0dc-bca896dce7cc
静态知识图谱 611674ee-1d01-4f39-b0dc-bca896dce7cc
非结构模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
面临的挑战 611674ee-1d01-4f39-b0dc-bca896dce7cc
项目和数据稀疏性等问题 611674ee-1d01-4f39-b0dc-bca896dce7cc
预测缺失链 接 611674ee-1d01-4f39-b0dc-bca896dce7cc
高斯词嵌入 611674ee-1d01-4f39-b0dc-bca896dce7cc
高维 611674ee-1d01-4f39-b0dc-bca896dce7cc
黑盒神经模型 611674ee-1d01-4f39-b0dc-bca896dce7cc

View File

@ -0,0 +1,10 @@
1097 611674ee-1d01-4f39-b0dc-bca896dce7cc
2010 611674ee-1d01-4f39-b0dc-bca896dce7cc
2012 611674ee-1d01-4f39-b0dc-bca896dce7cc
2013 611674ee-1d01-4f39-b0dc-bca896dce7cc
2016 611674ee-1d01-4f39-b0dc-bca896dce7cc
2021 611674ee-1d01-4f39-b0dc-bca896dce7cc
2023 611674ee-1d01-4f39-b0dc-bca896dce7cc
2023年 611674ee-1d01-4f39-b0dc-bca896dce7cc
<time> 611674ee-1d01-4f39-b0dc-bca896dce7cc
Annu 611674ee-1d01-4f39-b0dc-bca896dce7cc

View File

@ -0,0 +1,8 @@
32th 611674ee-1d01-4f39-b0dc-bca896dce7cc
5α 611674ee-1d01-4f39-b0dc-bca896dce7cc
An C 611674ee-1d01-4f39-b0dc-bca896dce7cc
KG 611674ee-1d01-4f39-b0dc-bca896dce7cc
MuRP 611674ee-1d01-4f39-b0dc-bca896dce7cc
fr(h;t);r(m;1h”;mr 611674ee-1d01-4f39-b0dc-bca896dce7cc
入相邻子KG之间的时间间隔 611674ee-1d01-4f39-b0dc-bca896dce7cc
知识图谱嵌入技术研究综述 279 611674ee-1d01-4f39-b0dc-bca896dce7cc

View File

@ -0,0 +1,6 @@
Joints 611674ee-1d01-4f39-b0dc-bca896dce7cc
V, B, 611674ee-1d01-4f39-b0dc-bca896dce7cc
W 611674ee-1d01-4f39-b0dc-bca896dce7cc
Wikipediaの学习 611674ee-1d01-4f39-b0dc-bca896dce7cc
t 611674ee-1d01-4f39-b0dc-bca896dce7cc
t-TransE 611674ee-1d01-4f39-b0dc-bca896dce7cc

View File

@ -0,0 +1,16 @@
1901787@stu.neu.edu.cn 611674ee-1d01-4f39-b0dc-bca896dce7cc
5 Lt4 611674ee-1d01-4f39-b0dc-bca896dce7cc
<concept> 611674ee-1d01-4f39-b0dc-bca896dce7cc
La Palma 611674ee-1d01-4f39-b0dc-bca896dce7cc
New York 611674ee-1d01-4f39-b0dc-bca896dce7cc
R-GCN[80]模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
Sun B, Han XP, Sun 611674ee-1d01-4f39-b0dc-bca896dce7cc
Sydney 611674ee-1d01-4f39-b0dc-bca896dce7cc
TransE 611674ee-1d01-4f39-b0dc-bca896dce7cc
Vancouver 611674ee-1d01-4f39-b0dc-bca896dce7cc
Wikipedia组织的 611674ee-1d01-4f39-b0dc-bca896dce7cc
learning 611674ee-1d01-4f39-b0dc-bca896dce7cc
r(h r) 611674ee-1d01-4f39-b0dc-bca896dce7cc
欧式空间(零曲率空间) 611674ee-1d01-4f39-b0dc-bca896dce7cc
沈阳 611674ee-1d01-4f39-b0dc-bca896dce7cc
矩阵W 611674ee-1d01-4f39-b0dc-bca896dce7cc

View File

@ -0,0 +1,77 @@
<misc> 611674ee-1d01-4f39-b0dc-bca896dce7cc
Adcock AB 611674ee-1d01-4f39-b0dc-bca896dce7cc
AlexNet 611674ee-1d01-4f39-b0dc-bca896dce7cc
Bollacker KD 611674ee-1d01-4f39-b0dc-bca896dce7cc
CNN 611674ee-1d01-4f39-b0dc-bca896dce7cc
CP 611674ee-1d01-4f39-b0dc-bca896dce7cc
Chinese knowledge 611674ee-1d01-4f39-b0dc-bca896dce7cc
ComplEx 611674ee-1d01-4f39-b0dc-bca896dce7cc
Connecting language and knowledge bases with 611674ee-1d01-4f39-b0dc-bca896dce7cc
ConvE 611674ee-1d01-4f39-b0dc-bca896dce7cc
ConvE模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
DBLP 611674ee-1d01-4f39-b0dc-bca896dce7cc
DL 611674ee-1d01-4f39-b0dc-bca896dce7cc
DY 611674ee-1d01-4f39-b0dc-bca896dce7cc
GPG 611674ee-1d01-4f39-b0dc-bca896dce7cc
GRU 611674ee-1d01-4f39-b0dc-bca896dce7cc
GRU的模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
HypER模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
IJCAI 611674ee-1d01-4f39-b0dc-bca896dce7cc
INDSCAL 611674ee-1d01-4f39-b0dc-bca896dce7cc
JD 611674ee-1d01-4f39-b0dc-bca896dce7cc
JMLR 611674ee-1d01-4f39-b0dc-bca896dce7cc
KEQA 611674ee-1d01-4f39-b0dc-bca896dce7cc
KGE 611674ee-1d01-4f39-b0dc-bca896dce7cc
KGE技术 611674ee-1d01-4f39-b0dc-bca896dce7cc
Knowledge 611674ee-1d01-4f39-b0dc-bca896dce7cc
LM 611674ee-1d01-4f39-b0dc-bca896dce7cc
Le P, Dymetman M.Le P.LsTM-based mixture-of 611674ee-1d01-4f39-b0dc-bca896dce7cc
Learning entity and relation 611674ee-1d01-4f39-b0dc-bca896dce7cc
Learning sequence encoders 611674ee-1d01-4f39-b0dc-bca896dce7cc
Networks and Machine Learning 611674ee-1d01-4f39-b0dc-bca896dce7cc
QA-KG 611674ee-1d01-4f39-b0dc-bca896dce7cc
Quaternion knowledge 611674ee-1d01-4f39-b0dc-bca896dce7cc
RESCAL 611674ee-1d01-4f39-b0dc-bca896dce7cc
STransE 611674ee-1d01-4f39-b0dc-bca896dce7cc
Tensor factorization 611674ee-1d01-4f39-b0dc-bca896dce7cc
TransE[15] 611674ee-1d01-4f39-b0dc-bca896dce7cc
TransE在 611674ee-1d01-4f39-b0dc-bca896dce7cc
TransE学习实体和关系 611674ee-1d01-4f39-b0dc-bca896dce7cc
TransG模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
blocking 611674ee-1d01-4f39-b0dc-bca896dce7cc
embedding model 611674ee-1d01-4f39-b0dc-bca896dce7cc
instance of the 55th 611674ee-1d01-4f39-b0dc-bca896dce7cc
knowledge 611674ee-1d01-4f39-b0dc-bca896dce7cc
knowledge graph 611674ee-1d01-4f39-b0dc-bca896dce7cc
modeling 611674ee-1d01-4f39-b0dc-bca896dce7cc
never-ending language learning 611674ee-1d01-4f39-b0dc-bca896dce7cc
probabilistic logic programming 611674ee-1d01-4f39-b0dc-bca896dce7cc
question answering 611674ee-1d01-4f39-b0dc-bca896dce7cc
relation extraction 611674ee-1d01-4f39-b0dc-bca896dce7cc
三向张量分解的新型关系学习方法 611674ee-1d01-4f39-b0dc-bca896dce7cc
使用事实进行知识图谱嵌入 611674ee-1d01-4f39-b0dc-bca896dce7cc
关系 611674ee-1d01-4f39-b0dc-bca896dce7cc
关系DirectorOf 611674ee-1d01-4f39-b0dc-bca896dce7cc
动态 KGE方法 611674ee-1d01-4f39-b0dc-bca896dce7cc
区块链 611674ee-1d01-4f39-b0dc-bca896dce7cc
基于知识图谱嵌入的问答框架(KEQA 611674ee-1d01-4f39-b0dc-bca896dce7cc
多源信息学习: 随着网络技术的快速发展, 611674ee-1d01-4f39-b0dc-bca896dce7cc
大规模知识图谱中 611674ee-1d01-4f39-b0dc-bca896dce7cc
学习模型RPJE 611674ee-1d01-4f39-b0dc-bca896dce7cc
学习结 611674ee-1d01-4f39-b0dc-bca896dce7cc
对话生成 611674ee-1d01-4f39-b0dc-bca896dce7cc
对话系统 611674ee-1d01-4f39-b0dc-bca896dce7cc
对齐 611674ee-1d01-4f39-b0dc-bca896dce7cc
现有知识 611674ee-1d01-4f39-b0dc-bca896dce7cc
相似度 611674ee-1d01-4f39-b0dc-bca896dce7cc
知 识 611674ee-1d01-4f39-b0dc-bca896dce7cc
知识 611674ee-1d01-4f39-b0dc-bca896dce7cc
知识图谱 611674ee-1d01-4f39-b0dc-bca896dce7cc
知识图谱嵌入技术研究综述 611674ee-1d01-4f39-b0dc-bca896dce7cc
知识图谱嵌入技术研究综述 283 611674ee-1d01-4f39-b0dc-bca896dce7cc
知识库 611674ee-1d01-4f39-b0dc-bca896dce7cc
神 经网络架构 611674ee-1d01-4f39-b0dc-bca896dce7cc
结构性质学习 611674ee-1d01-4f39-b0dc-bca896dce7cc
网络 611674ee-1d01-4f39-b0dc-bca896dce7cc
软件学报 611674ee-1d01-4f39-b0dc-bca896dce7cc
静态知识 611674ee-1d01-4f39-b0dc-bca896dce7cc

View File

@ -0,0 +1,115 @@
4种类型 611674ee-1d01-4f39-b0dc-bca896dce7cc
<dis> 611674ee-1d01-4f39-b0dc-bca896dce7cc
Continuous Vector Space Models and Their Compositionality 611674ee-1d01-4f39-b0dc-bca896dce7cc
ConvKB 611674ee-1d01-4f39-b0dc-bca896dce7cc
CrossE 611674ee-1d01-4f39-b0dc-bca896dce7cc
Dettmers T, Minervini P, Stenetorp P 611674ee-1d01-4f39-b0dc-bca896dce7cc
GRU 611674ee-1d01-4f39-b0dc-bca896dce7cc
HypER 611674ee-1d01-4f39-b0dc-bca896dce7cc
ITransF 611674ee-1d01-4f39-b0dc-bca896dce7cc
Jinf.computer.in. 611674ee-1d01-4f39-b0dc-bca896dce7cc
KG 611674ee-1d01-4f39-b0dc-bca896dce7cc
KG2E 611674ee-1d01-4f39-b0dc-bca896dce7cc
KGE 611674ee-1d01-4f39-b0dc-bca896dce7cc
KGE框架 611674ee-1d01-4f39-b0dc-bca896dce7cc
KG嵌 611674ee-1d01-4f39-b0dc-bca896dce7cc
KG推理 611674ee-1d01-4f39-b0dc-bca896dce7cc
KRL模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
LFM模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
M ̈obius 611674ee-1d01-4f39-b0dc-bca896dce7cc
MF 611674ee-1d01-4f39-b0dc-bca896dce7cc
MLP 611674ee-1d01-4f39-b0dc-bca896dce7cc
MuRP 611674ee-1d01-4f39-b0dc-bca896dce7cc
NAM 611674ee-1d01-4f39-b0dc-bca896dce7cc
NTN 611674ee-1d01-4f39-b0dc-bca896dce7cc
Neural knowledge 611674ee-1d01-4f39-b0dc-bca896dce7cc
Poincare[88] 2017 611674ee-1d01-4f39-b0dc-bca896dce7cc
Point-Wise空间 611674ee-1d01-4f39-b0dc-bca896dce7cc
QA-KG 611674ee-1d01-4f39-b0dc-bca896dce7cc
QQ 611674ee-1d01-4f39-b0dc-bca896dce7cc
ReLU 611674ee-1d01-4f39-b0dc-bca896dce7cc
SE模型 (h;r;t) h 611674ee-1d01-4f39-b0dc-bca896dce7cc
SLM模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
SSE模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
SSP模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
Semantic Web 611674ee-1d01-4f39-b0dc-bca896dce7cc
TDG2E 611674ee-1d01-4f39-b0dc-bca896dce7cc
TX 611674ee-1d01-4f39-b0dc-bca896dce7cc
TorusE模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
TranSparse 611674ee-1d01-4f39-b0dc-bca896dce7cc
TransE 611674ee-1d01-4f39-b0dc-bca896dce7cc
TransE[15] 611674ee-1d01-4f39-b0dc-bca896dce7cc
TransE模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
TransG 611674ee-1d01-4f39-b0dc-bca896dce7cc
TransG模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
TransMS模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
TransR模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
Xu CR 611674ee-1d01-4f39-b0dc-bca896dce7cc
entity description 611674ee-1d01-4f39-b0dc-bca896dce7cc
geometry of knowledge 611674ee-1d01-4f39-b0dc-bca896dce7cc
hierarchical types 611674ee-1d01-4f39-b0dc-bca896dce7cc
instance of the 12th ACM Intl Conf. 611674ee-1d01-4f39-b0dc-bca896dce7cc
knowledge 611674ee-1d01-4f39-b0dc-bca896dce7cc
knowledge graphs 611674ee-1d01-4f39-b0dc-bca896dce7cc
knowledge representation 611674ee-1d01-4f39-b0dc-bca896dce7cc
link prediction 611674ee-1d01-4f39-b0dc-bca896dce7cc
question 611674ee-1d01-4f39-b0dc-bca896dce7cc
semantic 611674ee-1d01-4f39-b0dc-bca896dce7cc
vector space 611674ee-1d01-4f39-b0dc-bca896dce7cc
三元 611674ee-1d01-4f39-b0dc-bca896dce7cc
三元组 611674ee-1d01-4f39-b0dc-bca896dce7cc
不适定 611674ee-1d01-4f39-b0dc-bca896dce7cc
人工神经网络 611674ee-1d01-4f39-b0dc-bca896dce7cc
关系模式 611674ee-1d01-4f39-b0dc-bca896dce7cc
卷积神经网络 611674ee-1d01-4f39-b0dc-bca896dce7cc
双线 611674ee-1d01-4f39-b0dc-bca896dce7cc
可解释性 611674ee-1d01-4f39-b0dc-bca896dce7cc
四元数 611674ee-1d01-4f39-b0dc-bca896dce7cc
图 8 MLP, NTN, NAM (DNN)和NAM (RMNN 611674ee-1d01-4f39-b0dc-bca896dce7cc
复杂模式 611674ee-1d01-4f39-b0dc-bca896dce7cc
多关系图嵌入的评分函数 611674ee-1d01-4f39-b0dc-bca896dce7cc
多层非线性特征学习 611674ee-1d01-4f39-b0dc-bca896dce7cc
多步推理 611674ee-1d01-4f39-b0dc-bca896dce7cc
多语 言和多模态 611674ee-1d01-4f39-b0dc-bca896dce7cc
头实体 611674ee-1d01-4f39-b0dc-bca896dce7cc
子KG 611674ee-1d01-4f39-b0dc-bca896dce7cc
定义几 611674ee-1d01-4f39-b0dc-bca896dce7cc
实体 611674ee-1d01-4f39-b0dc-bca896dce7cc
实体嵌入 611674ee-1d01-4f39-b0dc-bca896dce7cc
实体类别信息 611674ee-1d01-4f39-b0dc-bca896dce7cc
实体类型 611674ee-1d01-4f39-b0dc-bca896dce7cc
层次化规律 611674ee-1d01-4f39-b0dc-bca896dce7cc
张量乘法則 611674ee-1d01-4f39-b0dc-bca896dce7cc
张量分解 611674ee-1d01-4f39-b0dc-bca896dce7cc
形式的三元组 611674ee-1d01-4f39-b0dc-bca896dce7cc
投影向量 611674ee-1d01-4f39-b0dc-bca896dce7cc
拓扑结构.2 611674ee-1d01-4f39-b0dc-bca896dce7cc
文本对齐来自动标记训练实例.DS 611674ee-1d01-4f39-b0dc-bca896dce7cc
时间感知超 611674ee-1d01-4f39-b0dc-bca896dce7cc
权重矩阵 611674ee-1d01-4f39-b0dc-bca896dce7cc
流形的原理 611674ee-1d01-4f39-b0dc-bca896dce7cc
深度神经网络 611674ee-1d01-4f39-b0dc-bca896dce7cc
相似度 611674ee-1d01-4f39-b0dc-bca896dce7cc
相似性 611674ee-1d01-4f39-b0dc-bca896dce7cc
知识图 611674ee-1d01-4f39-b0dc-bca896dce7cc
知识图谱 611674ee-1d01-4f39-b0dc-bca896dce7cc
知识图谱三元组 611674ee-1d01-4f39-b0dc-bca896dce7cc
知识表示学习 611674ee-1d01-4f39-b0dc-bca896dce7cc
矩阵-向量乘法 611674ee-1d01-4f39-b0dc-bca896dce7cc
神经网络模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
稀疏知识图谱 611674ee-1d01-4f39-b0dc-bca896dce7cc
空间紧致性的条件 611674ee-1d01-4f39-b0dc-bca896dce7cc
系的索引 611674ee-1d01-4f39-b0dc-bca896dce7cc
紧李群 611674ee-1d01-4f39-b0dc-bca896dce7cc
细粒度属性 611674ee-1d01-4f39-b0dc-bca896dce7cc
维度条目之间的全局关系 611674ee-1d01-4f39-b0dc-bca896dce7cc
编码模型也可以 611674ee-1d01-4f39-b0dc-bca896dce7cc
编码语义匹配 611674ee-1d01-4f39-b0dc-bca896dce7cc
评分函数 611674ee-1d01-4f39-b0dc-bca896dce7cc
语义匹配模型 611674ee-1d01-4f39-b0dc-bca896dce7cc
超网络H 611674ee-1d01-4f39-b0dc-bca896dce7cc
距离学习结构嵌入 611674ee-1d01-4f39-b0dc-bca896dce7cc
连续向量空间 611674ee-1d01-4f39-b0dc-bca896dce7cc
逻辑规则 611674ee-1d01-4f39-b0dc-bca896dce7cc
高斯空间 611674ee-1d01-4f39-b0dc-bca896dce7cc
黎曼流形 611674ee-1d01-4f39-b0dc-bca896dce7cc

View File

@ -0,0 +1,3 @@
2 611674ee-1d01-4f39-b0dc-bca896dce7cc
5 611674ee-1d01-4f39-b0dc-bca896dce7cc
的 概率分布的论理学 ca_XX , . 611674ee-1d01-4f39-b0dc-bca896dce7cc

View File

@ -0,0 +1,27 @@
Associates 611674ee-1d01-4f39-b0dc-bca896dce7cc
Associates Inc. 611674ee-1d01-4f39-b0dc-bca896dce7cc
Association for Computational Linguistics 611674ee-1d01-4f39-b0dc-bca896dce7cc
Battglia PWM 611674ee-1d01-4f39-b0dc-bca896dce7cc
CCF 611674ee-1d01-4f39-b0dc-bca896dce7cc
Chang JJ 611674ee-1d01-4f39-b0dc-bca896dce7cc
Connectivist 611674ee-1d01-4f39-b0dc-bca896dce7cc
Dai 611674ee-1d01-4f39-b0dc-bca896dce7cc
Feng等人 611674ee-1d01-4f39-b0dc-bca896dce7cc
GTK 611674ee-1d01-4f39-b0dc-bca896dce7cc
ICANN 611674ee-1d01-4f39-b0dc-bca896dce7cc
JM.LSTM 611674ee-1d01-4f39-b0dc-bca896dce7cc
Jointal 611674ee-1d01-4f39-b0dc-bca896dce7cc
KG 611674ee-1d01-4f39-b0dc-bca896dce7cc
KGE 611674ee-1d01-4f39-b0dc-bca896dce7cc
LTM 611674ee-1d01-4f39-b0dc-bca896dce7cc
PN. 611674ee-1d01-4f39-b0dc-bca896dce7cc
Sullivan 611674ee-1d01-4f39-b0dc-bca896dce7cc
Sun 611674ee-1d01-4f39-b0dc-bca896dce7cc
WW. 611674ee-1d01-4f39-b0dc-bca896dce7cc
Wikipedia组织 611674ee-1d01-4f39-b0dc-bca896dce7cc
geographies understanding 611674ee-1d01-4f39-b0dc-bca896dce7cc
relational 611674ee-1d01-4f39-b0dc-bca896dce7cc
东北大学 611674ee-1d01-4f39-b0dc-bca896dce7cc
未来方向 611674ee-1d01-4f39-b0dc-bca896dce7cc
系の 611674ee-1d01-4f39-b0dc-bca896dce7cc
表 5 611674ee-1d01-4f39-b0dc-bca896dce7cc

View File

@ -0,0 +1,36 @@
<org> 611674ee-1d01-4f39-b0dc-bca896dce7cc
An B 611674ee-1d01-4f39-b0dc-bca896dce7cc
Battaglia PW, Hamrick JB, Bapst V 611674ee-1d01-4f39-b0dc-bca896dce7cc
Bordes等人 611674ee-1d01-4f39-b0dc-bca896dce7cc
Chen MH 611674ee-1d01-4f39-b0dc-bca896dce7cc
Chen Z 611674ee-1d01-4f39-b0dc-bca896dce7cc
Daiber J 611674ee-1d01-4f39-b0dc-bca896dce7cc
Feng J 611674ee-1d01-4f39-b0dc-bca896dce7cc
Guo L. 611674ee-1d01-4f39-b0dc-bca896dce7cc
Guo S 611674ee-1d01-4f39-b0dc-bca896dce7cc
Ji GL 611674ee-1d01-4f39-b0dc-bca896dce7cc
Jin, 611674ee-1d01-4f39-b0dc-bca896dce7cc
Leblay J 611674ee-1d01-4f39-b0dc-bca896dce7cc
Lei K, Chen 611674ee-1d01-4f39-b0dc-bca896dce7cc
Lei等人 611674ee-1d01-4f39-b0dc-bca896dce7cc
Lin等人 611674ee-1d01-4f39-b0dc-bca896dce7cc
Mintz 611674ee-1d01-4f39-b0dc-bca896dce7cc
Niu 611674ee-1d01-4f39-b0dc-bca896dce7cc
Niu GL 611674ee-1d01-4f39-b0dc-bca896dce7cc
Springer 611674ee-1d01-4f39-b0dc-bca896dce7cc
Tang 611674ee-1d01-4f39-b0dc-bca896dce7cc
WY, Mo KX, Zhang Y, Peng XZ, Yang Q 611674ee-1d01-4f39-b0dc-bca896dce7cc
Wang Q 611674ee-1d01-4f39-b0dc-bca896dce7cc
Wang Z 611674ee-1d01-4f39-b0dc-bca896dce7cc
Yang F 611674ee-1d01-4f39-b0dc-bca896dce7cc
ZH, Li L, Xu W. CFO 611674ee-1d01-4f39-b0dc-bca896dce7cc
ZHANG Tian-Cheng1 611674ee-1d01-4f39-b0dc-bca896dce7cc
Zhang DX, Yuan B 611674ee-1d01-4f39-b0dc-bca896dce7cc
Zhang W 611674ee-1d01-4f39-b0dc-bca896dce7cc
geddy 611674ee-1d01-4f39-b0dc-bca896dce7cc
learning and Learning enth. 611674ee-1d01-4f39-b0dc-bca896dce7cc
trans 611674ee-1d01-4f39-b0dc-bca896dce7cc
上的优化目标 611674ee-1d01-4f39-b0dc-bca896dce7cc
函数定义为: 611674ee-1d01-4f39-b0dc-bca896dce7cc
张天成 611674ee-1d01-4f39-b0dc-bca896dce7cc
比尔·克林顿 611674ee-1d01-4f39-b0dc-bca896dce7cc

View File

@ -0,0 +1,7 @@
32d 611674ee-1d01-4f39-b0dc-bca896dce7cc
<loc> 611674ee-1d01-4f39-b0dc-bca896dce7cc
Annutal 611674ee-1d01-4f39-b0dc-bca896dce7cc
knowledgebase 611674ee-1d01-4f39-b0dc-bca896dce7cc
t 611674ee-1d01-4f39-b0dc-bca896dce7cc
第几维的 611674ee-1d01-4f39-b0dc-bca896dce7cc
词语 611674ee-1d01-4f39-b0dc-bca896dce7cc

View File

@ -0,0 +1,2 @@
instance of 611674ee-1d01-4f39-b0dc-bca896dce7cc
part of 611674ee-1d01-4f39-b0dc-bca896dce7cc

View File

@ -0,0 +1,17 @@
<dis> 611674ee-1d01-4f39-b0dc-bca896dce7cc
Jin J, Wan HY, Lin YF. Knowledge 611674ee-1d01-4f39-b0dc-bca896dce7cc
Tay Y, Luu, Hui SC, Brauer F. Random semantic tensor ensemble for scalable knowledge 611674ee-1d01-4f39-b0dc-bca896dce7cc
Text-enhanced representation learning for knowledge 611674ee-1d01-4f39-b0dc-bca896dce7cc
convolutional network 611674ee-1d01-4f39-b0dc-bca896dce7cc
distance metric learning 611674ee-1d01-4f39-b0dc-bca896dce7cc
link prediction 611674ee-1d01-4f39-b0dc-bca896dce7cc
relation attention mechanism 611674ee-1d01-4f39-b0dc-bca896dce7cc
trans encyclopedia 611674ee-1d01-4f39-b0dc-bca896dce7cc
全连接 611674ee-1d01-4f39-b0dc-bca896dce7cc
深度知识感知网络 611674ee-1d01-4f39-b0dc-bca896dce7cc
知识图谱嵌入技术研究综述 611674ee-1d01-4f39-b0dc-bca896dce7cc
知识图谱嵌入技术研究综述 293 h 611674ee-1d01-4f39-b0dc-bca896dce7cc
知识图谱嵌入技术研究综述 299 611674ee-1d01-4f39-b0dc-bca896dce7cc
知识图谱嵌入技术研究综述 301 611674ee-1d01-4f39-b0dc-bca896dce7cc
维度条目 611674ee-1d01-4f39-b0dc-bca896dce7cc
融合实体类别信息的知识图谱表示学习方法 611674ee-1d01-4f39-b0dc-bca896dce7cc

View File

3
test/connection/start.sh Executable file
View File

@ -0,0 +1,3 @@
#!/bin/bash
export CONFIG_PATH=/share/wangmeihua/rag/conf/milvusconfig.yaml
CUDA_VISIBLE_DEVICES=7 /share/vllm-0.8.5/bin/python -m llmengine.connection -p 8888 Milvus