Miguelpef commited on
Commit
8a9797c
verified
1 Parent(s): 52e8879

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -7
app.py CHANGED
@@ -1,6 +1,12 @@
1
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
2
  from peft import PeftModel, PeftConfig
3
  import gradio as gr
 
 
 
 
 
 
4
 
5
  # Define the repository ID
6
  repo_id = "Miguelpef/bart-base-lora-3DPrompt" # Replace with your repository name
@@ -8,22 +14,29 @@ repo_id = "Miguelpef/bart-base-lora-3DPrompt" # Replace with your repository na
8
  # Load the PEFT configuration from the Hub
9
  peft_config = PeftConfig.from_pretrained(repo_id)
10
 
11
- # Load the base model from the Hub
12
- model = AutoModelForSeq2SeqLM.from_pretrained(peft_config.base_model_name_or_path)
13
-
14
- # Load the tokenizer from the Hub
15
- tokenizer = AutoTokenizer.from_pretrained(repo_id)
16
 
17
  # Wrap the base model with PEFT
18
  model = PeftModel.from_pretrained(model, repo_id)
19
 
20
  # Now you can use the model for inference as before
21
- def generar_prompt_desde_objeto(objeto):
22
  prompt = objeto
23
  inputs = tokenizer(prompt, return_tensors='pt').to(model.device)
24
  outputs = model.generate(**inputs, max_length=100)
25
  prompt_generado = tokenizer.decode(outputs[0], skip_special_tokens=True)
26
- return prompt_generado
 
 
 
 
 
 
 
27
 
28
  # Define the Gradio interface
29
  iface = gr.Interface(
@@ -34,5 +47,7 @@ iface = gr.Interface(
34
  description="Generates 3D prompts from object descriptions using a fine-tuned BART model.",
35
  )
36
 
 
 
37
  # Launch the interface
38
  iface.launch()
 
1
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
2
  from peft import PeftModel, PeftConfig
3
  import gradio as gr
4
+ from accelerate import Accelerator
5
+ from googletrans import Translator
6
+
7
+ # Inicializar el acelerador
8
+ accelerator = Accelerator()
9
+ translator = Translator()
10
 
11
  # Define the repository ID
12
  repo_id = "Miguelpef/bart-base-lora-3DPrompt" # Replace with your repository name
 
14
  # Load the PEFT configuration from the Hub
15
  peft_config = PeftConfig.from_pretrained(repo_id)
16
 
17
+ # Cargar el modelo y el tokenizador con accelerate
18
+ model, tokenizer = accelerator.prepare(
19
+ AutoModelForSeq2SeqLM.from_pretrained(peft_config.base_model_name_or_path),
20
+ AutoTokenizer.from_pretrained(repo_id)
21
+ )
22
 
23
  # Wrap the base model with PEFT
24
  model = PeftModel.from_pretrained(model, repo_id)
25
 
26
  # Now you can use the model for inference as before
27
+ async def generar_prompt_desde_objeto(objeto):
28
  prompt = objeto
29
  inputs = tokenizer(prompt, return_tensors='pt').to(model.device)
30
  outputs = model.generate(**inputs, max_length=100)
31
  prompt_generado = tokenizer.decode(outputs[0], skip_special_tokens=True)
32
+ try:
33
+ # Intentar traducir el texto generado
34
+ prompt_translate = await translator.translate(prompt_generado, src="es", dest="en")
35
+ return prompt_translate.text
36
+ except Exception as e:
37
+ # Manejo de errores en la traducci贸n
38
+ print(f"Error en la traducci贸n: {e}")
39
+ return prompt_generado
40
 
41
  # Define the Gradio interface
42
  iface = gr.Interface(
 
47
  description="Generates 3D prompts from object descriptions using a fine-tuned BART model.",
48
  )
49
 
50
+
51
+
52
  # Launch the interface
53
  iface.launch()