CordwainerSmith harish3110 commited on
Commit
d8755a6
0 Parent(s):

Duplicate from harish3110/emotion_detection

Browse files

Co-authored-by: Harish Vadlamani <[email protected]>

Files changed (4) hide show
  1. .gitattributes +27 -0
  2. README.md +13 -0
  3. app.py +32 -0
  4. requirements.txt +3 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.h5 filter=lfs diff=lfs merge=lfs -text
8
+ *.joblib filter=lfs diff=lfs merge=lfs -text
9
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
+ *.model filter=lfs diff=lfs merge=lfs -text
11
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
12
+ *.onnx filter=lfs diff=lfs merge=lfs -text
13
+ *.ot filter=lfs diff=lfs merge=lfs -text
14
+ *.parquet filter=lfs diff=lfs merge=lfs -text
15
+ *.pb filter=lfs diff=lfs merge=lfs -text
16
+ *.pt filter=lfs diff=lfs merge=lfs -text
17
+ *.pth filter=lfs diff=lfs merge=lfs -text
18
+ *.rar filter=lfs diff=lfs merge=lfs -text
19
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
20
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
21
+ *.tflite filter=lfs diff=lfs merge=lfs -text
22
+ *.tgz filter=lfs diff=lfs merge=lfs -text
23
+ *.wasm filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Emotion Detection
3
+ emoji: 🌖
4
+ colorFrom: blue
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 3.0.19
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: harish3110/emotion_detection
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from speechbrain.pretrained.interfaces import foreign_class
2
+ import gradio as gr
3
+
4
+ import warnings
5
+ warnings.filterwarnings("ignore")
6
+
7
+ # Loading the speechbrain emotion detection model
8
+ learner = foreign_class(
9
+ source="speechbrain/emotion-recognition-wav2vec2-IEMOCAP",
10
+ # savedir="/home/harish3110/SeaWord/emotion/nbs/pretrained_models/CustomEncoderWav2vec2Classifier--8353113631630090076",
11
+ pymodule_file="custom_interface.py",
12
+ classname="CustomEncoderWav2vec2Classifier"
13
+ )
14
+
15
+ # Building prediction function for gradio
16
+ emotion_dict = {
17
+ 'sad': 'Sad',
18
+ 'hap': 'Happy',
19
+ 'ang': 'Anger',
20
+ 'neu': 'Neutral'
21
+ }
22
+
23
+ def predict_emotion(audio):
24
+ out_prob, score, index, text_lab = learner.classify_file(audio.name)
25
+ return emotion_dict[text_lab[0]]
26
+
27
+ # Loading gradio interface
28
+ inputs = gr.inputs.Audio(label="Input Audio", type="file")
29
+ outputs = "text"
30
+ title = "Emotion Detection"
31
+ description = "Gradio demo for Emotion Detection. To use it, simply upload your audio, or click one of the examples to load them. Read more at the links below."
32
+ gr.Interface(predict_emotion, inputs, outputs, title=title, description=description).launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ speechbrain
2
+ torchaudio
3
+ transformers