|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM |
|
from peft import PeftModel, PeftConfig |
|
import gradio as gr |
|
from accelerate import Accelerator |
|
from googletrans import Translator |
|
|
|
|
|
accelerator = Accelerator() |
|
translator = Translator() |
|
|
|
|
|
repo_id = "Miguelpef/bart-base-lora-3DPrompt" |
|
|
|
|
|
peft_config = PeftConfig.from_pretrained(repo_id) |
|
|
|
|
|
model, tokenizer = accelerator.prepare( |
|
AutoModelForSeq2SeqLM.from_pretrained(peft_config.base_model_name_or_path), |
|
AutoTokenizer.from_pretrained(repo_id) |
|
) |
|
|
|
|
|
model = PeftModel.from_pretrained(model, repo_id) |
|
|
|
|
|
async def generar_prompt_desde_objeto(objeto): |
|
prompt = objeto |
|
inputs = tokenizer(prompt, return_tensors='pt').to(model.device) |
|
outputs = model.generate(**inputs, max_length=100) |
|
prompt_generado = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
try: |
|
|
|
prompt_translate = await translator.translate(prompt_generado, src="es", dest="en") |
|
return prompt_translate.text |
|
except Exception as e: |
|
|
|
print(f"Error en la traducci贸n: {e}") |
|
return prompt_generado |
|
|
|
|
|
iface = gr.Interface( |
|
fn=generar_prompt_desde_objeto, |
|
inputs=gr.Textbox(lines=2, placeholder="Enter object description here..."), |
|
outputs="text", |
|
title="Generador de Prompt 3D", |
|
description="""Genera un prompt para crear una im谩gen del modelo 3D desde la descripcion de un objeto usando el modelo Facebook/bart fine-tuned |
|
ATENCI脫N: Funciona solamente en el idioma Castellano |
|
ATTENTION: It only works in the Spanish language""", |
|
|
|
) |
|
|
|
|
|
|
|
|
|
iface.launch() |