Spaces:
Runtime error
Runtime error
File size: 1,645 Bytes
6364e0a c4efef6 6364e0a ca9429e 6364e0a ca9429e 89c2a39 ca9429e 710f69d ca9429e 7c0ecb8 89c2a39 f3fab95 06e6e70 ca9429e 89c2a39 ca9429e 89c2a39 ca9429e 89c2a39 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import gradio as gr
from transformers import pipeline, set_seed
#Using the local model
#model="./models/mt5-small-finetuned-amazon-en-es"
#summarizer = pipeline("summarization", model)
#Using the default model
#summarizer = pipeline("summarization")
#Using the fine tuned model hosted in hf
#hub_model_id = "vhpvmx/mt5-small-finetuned-amazon-en-es"
#response = pipeline("summarization", model=hub_model_id)
#def resp(text):
#summarize
# return response(text)[0]["summary_text"]
#hub_model_id = "WizardLM/WizardLM-7B-V1.0"
#response = pipeline("text2text-generation", model=hub_model_id)
#Obtuve este error, no encontro el modelo
#OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like WizardLM/WizardLM-7B-V1.0 is not the path to a directory containing a file named config.json.
#Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'.
#hub_model_id = "tiiuae/falcon-7b-instruct"
#response = pipeline("text-generation", model=hub_model_id)
#obtuve este error
#runtime error
#Memory limit exceeded (16Gi)
#obtuve este error - despues de hacer hw upgrade
#runtime error
#Memory limit exceeded (32Gi)
response = pipeline('text-generation', model='gpt2')
set_seed(42)
def resp(text):
return response(text)
with gr.Blocks() as demo:
input_text = gr.Textbox(placeholder="Ingresa un texto...", lines=4)
output_text = gr.Textbox(label="Respuesta")
btn = gr.Button("Genera la respuesta")
btn.click(resp, input_text, output_text)
demo.launch()
|