# #uvicorn app:app --host 0.0.0.0 --port 8000 --reload # # from fastapi import FastAPI # # from transformers import WhisperProcessor, WhisperForConditionalGeneration # # import librosa # # import uvicorn # # app = FastAPI() # # processor = WhisperProcessor.from_pretrained("openai/whisper-small") # # model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small") # # model.config.forced_decoder_ids = None # # audio_file_path = "output.mp3" # # audio_data, _ = librosa.load(audio_file_path, sr=16000) # # @app.get("/") # # def transcribe_audio(): # # input_features = processor(audio_data.tolist(), return_tensors="pt").input_features # # predicted_ids = model.generate(input_features) # # transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) # # return {"transcription": transcription[0]} # # if __name__ == "__main__": # # import uvicorn # # uvicorn.run(app, host="0.0.0.0", port=8000) # # if __name__=='__main__': # # uvicorn.run('main:app', reload=True) # #uvicorn app:app --host 0.0.0.0 --port 8000 --reload # #curl -X GET "http://localhost:8000/?text=I%20like%20Apples" # #http://localhost:8000/?text=I%20like%20Apples # # from fastapi import FastAPI # # from transformers import WhisperProcessor, WhisperForConditionalGeneration # # import librosa # # import uvicorn # # app = FastAPI() # # # Load model and processor # # processor = WhisperProcessor.from_pretrained("openai/whisper-small") # # model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small") # # model.config.forced_decoder_ids = None # # # Path to your audio file # # audio_file_path = "/home/pranjal/Downloads/output.mp3" # # # Read the audio file # # audio_data, _ = librosa.load(audio_file_path, sr=16000) # # @app.get("/") # # def transcribe_audio(): # # # Process the audio data using the Whisper processor # # input_features = processor(audio_data.tolist(), return_tensors="pt").input_features # # # Generate transcription # # predicted_ids = model.generate(input_features) # # transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) # # return {"transcription": transcription[0]} # # if __name__ == "__main__": # # import uvicorn # # uvicorn.run(app, host="0.0.0.0", port=8000) # # if __name__=='__app__': # # uvicorn.run('main:app', reload=True) # from fastapi import FastAPI, UploadFile, File # from transformers import WhisperProcessor, WhisperForConditionalGeneration # import librosa # from fastapi.responses import HTMLResponse # import uvicorn # import io # app = FastAPI() # # Load model and processor # processor = WhisperProcessor.from_pretrained("openai/whisper-small") # model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small") # model.config.forced_decoder_ids = None # @app.get("/") # def read_root(): # html_form = """ # #
#