Aekanun commited on
Commit
2eb0277
·
1 Parent(s): 526e16c
Files changed (1) hide show
  1. app.py +26 -9
app.py CHANGED
@@ -1,30 +1,47 @@
1
- import spaces
2
  import gradio as gr
3
  from transformers import pipeline
 
 
4
 
5
- # Initialize model and move to GPU
6
  model = pipeline(
7
  "automatic-speech-recognition",
8
  model="Aekanun/whisper-small-hi",
9
- device="cuda" # เปลี่ยนเป็น cuda เลย
10
  )
11
 
12
- @spaces.GPU # GPU function with default 60s duration
13
  def transcribe_speech(audio):
14
- """Speech transcription function"""
15
  try:
16
  if audio is None:
17
  return "กรุณาบันทึกเสียงก่อน"
18
 
19
- # Process audio (model is already on GPU)
20
- result = model(audio, batch_size=1)
21
 
22
- # Get text result
23
- text = result["text"] if isinstance(result, dict) else result
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
  return text
26
 
27
  except Exception as e:
 
 
 
28
  return f"เกิดข้อผิดพลาด: {str(e)}"
29
 
30
  # Create Gradio interface
 
 
1
  import gradio as gr
2
  from transformers import pipeline
3
+ import torch
4
+ import spaces
5
 
6
+ # Initialize model on CPU
7
  model = pipeline(
8
  "automatic-speech-recognition",
9
  model="Aekanun/whisper-small-hi",
10
+ device="cpu"
11
  )
12
 
13
+ @spaces.GPU
14
  def transcribe_speech(audio):
15
+ """Speech transcription with GPU support"""
16
  try:
17
  if audio is None:
18
  return "กรุณาบันทึกเสียงก่อน"
19
 
20
+ # Move model to GPU
21
+ model.model = model.model.to("cuda")
22
 
23
+ with torch.amp.autocast('cuda'):
24
+ # Process audio with chunk length to handle long audio
25
+ result = model(
26
+ audio,
27
+ batch_size=1,
28
+ return_timestamps=True, # แก้ error เรื่อง timestamps
29
+ chunk_length_s=30 # แก้ error เรื่องความยาวเสียง
30
+ )
31
+
32
+ # Get text result
33
+ text = result["text"] if isinstance(result, dict) else result
34
+
35
+ # Move model back to CPU
36
+ model.model = model.model.to("cpu")
37
+ torch.cuda.empty_cache()
38
 
39
  return text
40
 
41
  except Exception as e:
42
+ # Make sure model is back on CPU in case of error
43
+ model.model = model.model.to("cpu")
44
+ torch.cuda.empty_cache()
45
  return f"เกิดข้อผิดพลาด: {str(e)}"
46
 
47
  # Create Gradio interface