Spaces:
Sleeping
Sleeping
File size: 1,591 Bytes
a2a46d0 aa0a69d 3482efe b94d057 b405478 3482efe b94d057 51dfe6b e151c0b 51dfe6b 9509bf9 51dfe6b 9509bf9 51dfe6b 1399500 863d23e 51dfe6b dc1d260 241d86b c9ccf46 2dfb07f 3b63114 2dfb07f a2a46d0 dc1d260 2dfb07f 3482efe |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import whisper
import gradio as gr
from transformers import pipeline
# Force the model to run on CPU
device = "cpu"
print("Running on CPU")
# Load the tiny Whisper model
whisper_model = whisper.load_model("tiny")
# Load the text summarization model from Hugging Face
#summarizer = pipeline(task="summarization", model="facebook/bart-large-cnn")
#summarizer = pipeline("summarization", model="allenai/led-base-16384")
summarizer = pipeline(task="summarization", model="facebook/bart-large-cnn")
# Function to transcribe and summarize the audio file
def transcribe_and_summarize(audio):
# Step 1: Transcribe the audio using Whisper
transcription_result = whisper_model.transcribe(audio)
transcription = transcription_result['text']
# Step 2: Summarize the transcription
summary = summarizer(transcription, min_length=50, max_length=200)
summary_text = summary[0]['summary_text']
return transcription, summary_text
# Create the Gradio interface
demo = gr.Interface(
fn=transcribe_and_summarize, # The function to be called for transcription
inputs=gr.Audio(type="filepath", label="Upload your audio file"), # Input audio field
outputs=[gr.Textbox(label="Transcription"), gr.Textbox(label="Summary")], # Output fields
title="Whisper Tiny Transcription and Summarization",
examples=["Classification_and_Regression_in_Machine_Learning.mp3"],
description="Upload an audio file, get the transcription from Whisper tiny model and the summarized version using Hugging Face."
)
# Launch the Gradio interface
demo.launch(debug=True)
|