PerfectGPT / app.py
lruizap's picture
Update app.py
9e35d46
raw
history blame
1.87 kB
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from transformers import pipeline
import torch
import gradio as gr
# chatgpt-gpt4-prompts-bart-large-cnn-samsum
tokenizer = AutoTokenizer.from_pretrained(
"Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum")
model = AutoModelForSeq2SeqLM.from_pretrained(
"Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum", from_tf=True)
# zephyr
pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-alpha",
torch_dtype=torch.bfloat16, device_map="auto")
def generatePrompt(inputuno, inputdos):
prompt = inputuno
promptdos = inputdos
batch = tokenizer(prompt, return_tensors="pt")
generated_ids = model.generate(batch["input_ids"])
output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
new_prompt = output[0]
messages = [
{
"role": "system", "content": str(new_prompt)
},
{
"role": "user", "content": str(promptdos)
},
]
# https://huggingface.co/docs/transformers/main/en/chat_templating
# final_prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
outputs = pipe(messages, tokenize=False, add_generation_prompt=True)
return outputs[0]["generated_text"]
#
# Interface
input_prompt = gr.Textbox(label="Actua como: ", value="Chef")
input_promptdos = gr.Textbox(label="Prompt: ", value="Recipe for ham croquettes")
output_component = gr.Textbox(label="Output: ")
examples = [["photographer"], ["developer"], ["teacher"], [
"human resources staff"], ["recipe for ham croquettes"]]
description = ""
PerfectGPT = gr.Interface(generatePrompt, inputs=[input_prompt, input_promptdos], outputs=output_component, examples=examples, title="๐Ÿ—ฟ PerfectGPT v1 ๐Ÿ—ฟ", description=description)
PerfectGPT.launch()