from fastapi import FastAPI, HTTPException from pydantic import BaseModel import torch from transformers import AutoModelForSequenceClassification, AutoTokenizer app = FastAPI() # Load model and tokenizer model_name = "Bijoy09/your_mobilebert_model_repo" # replace with your Hugging Face repo name try: model = AutoModelForSequenceClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) except Exception as e: raise RuntimeError(f"Failed to load model or tokenizer: {e}") class TextRequest(BaseModel): text: str @app.post("/predict/") async def predict(request: TextRequest): try: model.eval() inputs = tokenizer.encode_plus( request.text, add_special_tokens=True, max_length=64, truncation=True, padding='max_length', return_attention_mask=True, return_tensors='pt' ) with torch.no_grad(): logits = model(inputs['input_ids'], attention_mask=inputs['attention_mask']).logits prediction = torch.argmax(logits, dim=1).item() return {"prediction": "Spam" if prediction == 1 else "Ham"} except Exception as e: raise HTTPException(status_code=500, detail=f"Prediction failed: {e}") @app.get("/") async def root(): return {"message": "Welcome to the MobileBERT API"}