Spaces:
Runtime error
Runtime error
Upload 2 files
Browse files- app (1).py +50 -0
- requirements (1).txt +12 -0
app (1).py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from peft import PeftModel, PeftConfig
|
3 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
+
from deep_translator import GoogleTranslator
|
5 |
+
from gtts import gTTS
|
6 |
+
import os
|
7 |
+
|
8 |
+
# Initialize the model and tokenizer
|
9 |
+
peft_model_id = "saludobuenas/test3"
|
10 |
+
config = PeftConfig.from_pretrained(peft_model_id)
|
11 |
+
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
|
12 |
+
model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, return_dict=True)
|
13 |
+
model.to('cpu')
|
14 |
+
model = PeftModel.from_pretrained(model, peft_model_id)
|
15 |
+
|
16 |
+
# Your fixed prompt
|
17 |
+
fixed_prompt = "###input: Game progress: Jugador 1 played [6|6] Jugador 2 played [4|6].Board State: (6,4).Player 1 tiles: [5|6], [4|5], [0|4], [3|6], [4|4],[2|6], [1|3].Given the current board state and Player 1’s tiles, which tile should he play and why? ###output:"
|
18 |
+
|
19 |
+
def translate_to_spanish(text):
|
20 |
+
translation = GoogleTranslator(source='auto', target='es').translate(text=text)
|
21 |
+
return translation
|
22 |
+
|
23 |
+
def texto_a_voz(texto):
|
24 |
+
tts = gTTS(text=texto, lang='es')
|
25 |
+
audio_file = "/tmp/temp.wav"
|
26 |
+
tts.save(audio_file)
|
27 |
+
return audio_file
|
28 |
+
|
29 |
+
def process_prompt():
|
30 |
+
input_ids = tokenizer.encode(fixed_prompt, return_tensors='pt')
|
31 |
+
output = model.generate(input_ids=input_ids, max_length=210)
|
32 |
+
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
|
33 |
+
output_text = generated_text.split("###output:")[1].strip()
|
34 |
+
translation = translate_to_spanish(output_text)
|
35 |
+
audio_file = texto_a_voz(translation)
|
36 |
+
return output_text, translation, audio_file
|
37 |
+
|
38 |
+
# Since we're not using inputs, we define a function that launches the processing
|
39 |
+
def launch_response():
|
40 |
+
return process_prompt()
|
41 |
+
|
42 |
+
# Define the Gradio interface without inputs
|
43 |
+
iface = gr.Interface(fn=launch_response,
|
44 |
+
inputs=[],
|
45 |
+
outputs=[gr.Textbox(label="Generated Response"),
|
46 |
+
gr.Textbox(label="Translation"),
|
47 |
+
gr.Audio(label="Text-to-Speech")],
|
48 |
+
title="Model Response Generator",
|
49 |
+
description="Displays the model's response, its Spanish translation, and the text-to-speech audio for the fixed prompt.",
|
50 |
+
examples=[]).launch()
|
requirements (1).txt
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
gradio
|
3 |
+
peft
|
4 |
+
bitsandbytes
|
5 |
+
sentencepiece
|
6 |
+
httpx
|
7 |
+
httpcore
|
8 |
+
pymongo
|
9 |
+
deep-translator
|
10 |
+
IPython
|
11 |
+
gTTS
|
12 |
+
pygame
|