3d-prompt / app.py
Miguelpef's picture
Update app.py
8a9797c verified
raw
history blame
1.8 kB
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from peft import PeftModel, PeftConfig
import gradio as gr
from accelerate import Accelerator
from googletrans import Translator
# Inicializar el acelerador
accelerator = Accelerator()
translator = Translator()
# Define the repository ID
repo_id = "Miguelpef/bart-base-lora-3DPrompt" # Replace with your repository name
# Load the PEFT configuration from the Hub
peft_config = PeftConfig.from_pretrained(repo_id)
# Cargar el modelo y el tokenizador con accelerate
model, tokenizer = accelerator.prepare(
AutoModelForSeq2SeqLM.from_pretrained(peft_config.base_model_name_or_path),
AutoTokenizer.from_pretrained(repo_id)
)
# Wrap the base model with PEFT
model = PeftModel.from_pretrained(model, repo_id)
# Now you can use the model for inference as before
async def generar_prompt_desde_objeto(objeto):
prompt = objeto
inputs = tokenizer(prompt, return_tensors='pt').to(model.device)
outputs = model.generate(**inputs, max_length=100)
prompt_generado = tokenizer.decode(outputs[0], skip_special_tokens=True)
try:
# Intentar traducir el texto generado
prompt_translate = await translator.translate(prompt_generado, src="es", dest="en")
return prompt_translate.text
except Exception as e:
# Manejo de errores en la traducci贸n
print(f"Error en la traducci贸n: {e}")
return prompt_generado
# Define the Gradio interface
iface = gr.Interface(
fn=generar_prompt_desde_objeto,
inputs=gr.Textbox(lines=2, placeholder="Enter object description here..."),
outputs="text",
title="3D Prompt Generator",
description="Generates 3D prompts from object descriptions using a fine-tuned BART model.",
)
# Launch the interface
iface.launch()