File size: 1,348 Bytes
ed0bdc4 cc6362f 7811033 cc6362f ed0bdc4 ae29a25 ed0bdc4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
from fastapi import FastAPI, Form, HTTPException, Request, File
from fastapi.concurrency import asynccontextmanager
from fastapi.responses import HTMLResponse
from fastapi.templating import Jinja2Templates
from typing import Annotated
from wtpsplit import SaT
sat_models = {}
@asynccontextmanager
async def lifespan(app: FastAPI):
# Load the ML model
sat_models["sat-3l-sm"] = SaT("sat-3l-sm")
yield
# Clean up the ML models and release the resources
sat_models.clear()
app = FastAPI(lifespan=lifespan)
templates = Jinja2Templates(directory="templates")
@app.get("/", response_class=HTMLResponse)
def root(request: Request):
return templates.TemplateResponse(request=request, name="index.html")
@app.post("/split", response_class=HTMLResponse)
async def split_text(request: Request, text: Annotated[str, Form()] = ""):
sentences = sat_models["sat-3l-sm"].split(text)
return templates.TemplateResponse(
request=request, name="index.html", context={"sentences": sentences}
)
@app.post("/api/split")
async def split_file(file: Annotated[bytes, File()]):
if len(file) > 1.44 * 1024 * 1024: # 1.44 MB
raise HTTPException(status_code=413, detail="File too large")
text = file.decode("utf-8")
sentences = sat_models["sat-3l-sm"].split(text)
return {"sentences": sentences} |