import gradio as gr from transformers import pipeline import time import os import torch from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer, pipeline import numpy as np auth_token = os.environ.get("key") os.environ["HUGGING_FACE_HUB_TOKEN"] = auth_token iface = gr.load(name="mutisya/transcribe-api", hf_token=auth_token, src="spaces") if __name__ == "__main__": iface.queue(api_open=False).launch(show_api=False)