1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
| from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from transformers import AutoTokenizer, AutoModel
import torch
from torch.nn import functional as F
# 加载模型和分词器
tokenizer = AutoTokenizer.from_pretrained("BAAI/bge-reranker-large")
model = AutoModel.from_pretrained("BAAI/bge-reranker-large")
# 切换到 GPU(可选)
if torch.cuda.is_available():
model = model.to("cuda")
app = FastAPI(title="BGE Reranker API", version="1.0")
# 定义请求体结构
class RerankRequest(BaseModel):
query: str
documents: list[str]
@app.post("/rerank")
async def rerank(request: RerankRequest):
try:
# 为查询和每个文档分别编码
query_inputs = tokenizer([request.query], padding=True, truncation=True, return_tensors="pt")
doc_inputs = tokenizer(request.documents, padding=True, truncation=True, return_tensors="pt")
if torch.cuda.is_available():
query_inputs = {k: v.to("cuda") for k, v in query_inputs.items()}
doc_inputs = {k: v.to("cuda") for k, v in doc_inputs.items()}
with torch.no_grad():
query_outputs = model(**query_inputs, return_dict=True)
doc_outputs = model(**doc_inputs, return_dict=True)
query_embedding = query_outputs.pooler_output
document_embeddings = doc_outputs.pooler_output
# 计算查询嵌入和每个文档嵌入的余弦相似度
scores = F.cosine_similarity(query_embedding, document_embeddings, dim=1).tolist()
ranked_docs = sorted(
zip(request.documents, scores),
key=lambda x: x[1],
reverse=True
)
return {"results": [{"document": doc, "score": score} for doc, score in ranked_docs]}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=58222)
|