SivaResearch commited on
Commit
851061b
·
verified ·
1 Parent(s): e090a56

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -43
app.py CHANGED
@@ -1,15 +1,14 @@
1
- import whisper
2
- import torch
3
- from transformers import AutoTokenizer, AutoModelForCausalLM
4
- import gradio as gr
5
- # from gradio import inputs # Import the 'inputs' module from 'gradio'
6
 
7
 
8
 
9
- Asr_model = whisper.load_model("base")
10
- Asr_model.device
 
 
 
11
 
12
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
13
  model_name = "ai4bharat/Airavata"
14
 
15
  tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side="left")
@@ -53,43 +52,10 @@ def inference(input_prompt, model, tokenizer):
53
  return output_text
54
 
55
 
56
- def transcribe(audio):
57
-
58
- #time.sleep(3)
59
- # load audio and pad/trim it to fit 30 seconds
60
- audio = whisper.load_audio(audio)
61
- audio = whisper.pad_or_trim(audio)
62
-
63
- # make log-Mel spectrogram and move to the same device as the model
64
- mel = whisper.log_mel_spectrogram(audio).to(model.device)
65
-
66
- # detect the spoken language
67
- _, probs = model.detect_language(mel)
68
- print(f"Detected language: {max(probs, key=probs.get)}")
69
-
70
- # decode the audio
71
- options = whisper.DecodingOptions()
72
- result = whisper.decode(model, mel, options)
73
- return result.text
74
-
75
-
76
- def chat_interface(audio_tuple ):
77
- audio_path = audio_tuple[0] if isinstance(audio_tuple, tuple) else audio_tuple
78
-
79
- message = transcribe(audio_path)
80
  outputs = inference(message, model, tokenizer)
81
  return outputs
82
 
83
 
84
- gr.Interface(
85
- title="CAMAI - Centralized Actionable Multimodal Agri Assistant on Edge Intelligence for Farmers",
86
- fn=chat_interface,
87
- inputs=[
88
- gr.Audio(sources=["microphone"])
89
- ],
90
-
91
- outputs=[
92
- "textbox"
93
- ],
94
- theme="darkly"
95
- ).launch()
 
 
 
 
 
 
1
 
2
 
3
 
4
+
5
+
6
+ import torch
7
+ from transformers import AutoTokenizer, AutoModelForCausalLM
8
+ import gradio as gr
9
 
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
+
12
  model_name = "ai4bharat/Airavata"
13
 
14
  tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side="left")
 
52
  return output_text
53
 
54
 
55
+ def chat_interface(message,history):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  outputs = inference(message, model, tokenizer)
57
  return outputs
58
 
59
 
60
+ chat_interface = gr.ChatInterface(chat_interface, title="CAMAI")
61
+ chat_interface.launch()