from fastapi import FastAPI, UploadFile, File import torch from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline app = FastAPI() device = "cuda:0" if torch.cuda.is_available() else "cpu" torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 model_id = "openai/whisper-large-v3" model = AutoModelForSpeechSeq2Seq.from_pretrained( model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True ) model.to(device) processor = AutoProcessor.from_pretrained(model_id) pipe = pipeline( "automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, max_new_tokens=128, chunk_length_s=30, batch_size=16, return_timestamps=True, torch_dtype=torch_dtype, device=device, ) sample = dataset[0]["audio"] # result = pipe(sample) # print(result["text"]) @app.post("/speech_to_text") async def speech_to_text(file : UploadFile = File(...)) if file: contents = await file.read() with open(file.filename, "wb") as f: f.write(contents) converted_result = pipe(file.filename) return { "status": 200, "text": converted_result["text"] } else: return { "status": -1 }