🧠 Project Goal#
Use the transformers
library to load a Hugging Face reranker model and expose it as a REST API using FastAPI
.
🧪 Code Example#
1from fastapi import FastAPI, HTTPException
2from pydantic import BaseModel
3from transformers import AutoTokenizer, AutoModel
4import torch
5from torch.nn import functional as F
6
7# Load the model and tokenizer
8tokenizer = AutoTokenizer.from_pretrained("BAAI/bge-reranker-large")
9model = AutoModel.from_pretrained("BAAI/bge-reranker-large")
10
11# Move to GPU if available (optional)
12if torch.cuda.is_available():
13 model = model.to("cuda")
14
15app = FastAPI(title="BGE Reranker API", version="1.0")
16
17# Define the request body model
18class RerankRequest(BaseModel):
19 query: str
20 documents: list[str]
21
22@app.post("/rerank")
23async def rerank(request: RerankRequest):
24 try:
25 # Tokenize the query and documents separately
26 query_inputs = tokenizer([request.query], padding=True, truncation=True, return_tensors="pt")
27 doc_inputs = tokenizer(request.documents, padding=True, truncation=True, return_tensors="pt")
28
29 if torch.cuda.is_available():
30 query_inputs = {k: v.to("cuda") for k, v in query_inputs.items()}
31 doc_inputs = {k: v.to("cuda") for k, v in doc_inputs.items()}
32
33 with torch.no_grad():
34 query_outputs = model(**query_inputs, return_dict=True)
35 doc_outputs = model(**doc_inputs, return_dict=True)
36
37 query_embedding = query_outputs.pooler_output
38 document_embeddings = doc_outputs.pooler_output
39
40 # Compute cosine similarity between the query embedding and each document embedding
41 scores = F.cosine_similarity(query_embedding, document_embeddings, dim=1).tolist()
42
43 ranked_docs = sorted(
44 zip(request.documents, scores),
45 key=lambda x: x,
46 reverse=True
47 )
48
49 return {"results": [{"document": doc, "score": score} for doc, score in ranked_docs]}
50 except Exception as e:
51 raise HTTPException(status_code=500, detail=str(e))
52
53if __name__ == "__main__":
54 import uvicorn
55 uvicorn.run(app, host="0.0.0.0", port=58222)