# import gradio as gr # gr.load("models/openai/whisper-large-v3").launch() from typing import Union from pydantic import BaseModel from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from fastapi.staticfiles import StaticFiles from fastapi.responses import FileResponse from transformers import pipeline app = FastAPI(docs_url="/api/docs") app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_methods=["*"], allow_headers=["*"], allow_credentials=True, ) pipe_flan = pipeline("text2text-generation", model="google/flan-t5-small") @app.get("/infer_t5") def t5(input): output = pipe_flan(input) return {"output": output[0]["generated_text"]} app.mount("/", StaticFiles(directory="static", html=True), name="static") @app.get("/") def index() -> FileResponse: return FileResponse(path="/app/static/index.html", media_type="text/html")