Spaces:
Runtime error
Runtime error
File size: 2,651 Bytes
52cb1af bf44e50 52cb1af 7a5a1bc bf44e50 dbd9832 bf44e50 dbd9832 4c288e9 dbd9832 4c288e9 9e35d46 eaf234f 249d778 c11aa2e bf44e50 bbccd1b eaf234f bf44e50 4c288e9 bf44e50 249d778 613c19d 9e35d46 bf44e50 769e23e bf44e50 52cb1af dbd9832 bf44e50 52cb1af |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from transformers import pipeline
import torch
import gradio as gr
# chatgpt-gpt4-prompts-bart-large-cnn-samsum
tokenizer = AutoTokenizer.from_pretrained(
"Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum")
model = AutoModelForSeq2SeqLM.from_pretrained(
"Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum", from_tf=True)
# zephyr
pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-alpha",torch_dtype=torch.bfloat16, device_map="auto")
def generate(inputuno, inputdos, max_new_tokens=3556, top_p=0.95, repetition_penalty=1.0):
top_p = float(top_p)
prompt = inputuno
promptdos = inputdos
generate_kwargs = dict(
temperature=fixed_temperature,
max_new_tokens=max_new_tokens,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=True,
seed=42,
)
messages = [
{
"role": "system", "content": str(new_prompt)
},
{
"role": "user", "content": str(promptdos)
},
]
stream = pipe.tokenizer.apply_chat_template(messages, **generate_kwargs, stream=True, details=True, return_full_text=False)
output = ""
for response in stream:
output += response.token.text
yield output
return output
def generatePrompt(inputuno, inputdos):
prompt = inputuno
promptdos = inputdos
batch = tokenizer(prompt, return_tensors="pt")
generated_ids = model.generate(batch["input_ids"])
output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
new_prompt = output[0]
messages = [
{
"role": "system", "content": str(new_prompt)
},
{
"role": "user", "content": str(promptdos)
},
]
# https://huggingface.co/docs/transformers/main/en/chat_templating
final_prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
outputs = pipe(final_prompt, do_sample=True,)
return outputs[0]["generated_text"]
#
# Interface
input_prompt = gr.Textbox(label="Actua como: ", value="Chef")
input_promptdos = gr.Textbox(label="Prompt: ", value="Recipe for ham croquettes")
output_component = gr.Textbox(label="Output: ")
examples = [["photographer"], ["developer"], ["teacher"], [
"human resources staff"], ["recipe for ham croquettes"]]
description = ""
PerfectGPT = gr.Interface(generate, inputs=[input_prompt, input_promptdos], outputs=output_component, examples=examples, title="๐ฟ PerfectGPT v1 ๐ฟ", description=description)
PerfectGPT.launch()
|