Spaces:
Runtime error
Runtime error
import gradio as gr | |
from peft import PeftModel, PeftConfig | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
from deep_translator import GoogleTranslator | |
from gtts import gTTS | |
import os | |
# Initialize the model and tokenizer | |
peft_model_id = "saludobuenas/test3" | |
config = PeftConfig.from_pretrained(peft_model_id) | |
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) | |
model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, return_dict=True) | |
model.to('cpu') | |
model = PeftModel.from_pretrained(model, peft_model_id) | |
# Your fixed prompt | |
fixed_prompt = "###input: Game progress: Jugador 1 played [6|6] Jugador 2 played [4|6].Board State: (6,4).Player 1 tiles: [5|6], [4|5], [0|4], [3|6], [4|4],[2|6], [1|3].Given the current board state and Player 1’s tiles, which tile should he play and why? ###output:" | |
def translate_to_spanish(text): | |
translation = GoogleTranslator(source='auto', target='es').translate(text=text) | |
return translation | |
def texto_a_voz(texto): | |
tts = gTTS(text=texto, lang='es') | |
audio_file = "/tmp/temp.wav" | |
tts.save(audio_file) | |
return audio_file | |
def process_prompt(): | |
input_ids = tokenizer.encode(fixed_prompt, return_tensors='pt') | |
output = model.generate(input_ids=input_ids, max_length=210) | |
generated_text = tokenizer.decode(output[0], skip_special_tokens=True) | |
output_text = generated_text.split("###output:")[1].strip() | |
translation = translate_to_spanish(output_text) | |
audio_file = texto_a_voz(translation) | |
return output_text, translation, audio_file | |
# Since we're not using inputs, we define a function that launches the processing | |
def launch_response(): | |
return process_prompt() | |
# Define the Gradio interface without inputs | |
iface = gr.Interface(fn=launch_response, | |
inputs=[], | |
outputs=[gr.Textbox(label="Generated Response"), | |
gr.Textbox(label="Translation"), | |
gr.Audio(label="Text-to-Speech")], | |
title="Model Response Generator", | |
description="Displays the model's response, its Spanish translation, and the text-to-speech audio for the fixed prompt.", | |
examples=[]).launch() | |