lruizap commited on
Commit
52cb1af
ยท
1 Parent(s): f144b12

Actualizado Inputs

Browse files
Files changed (2) hide show
  1. HuggingFaceH4_zephyr-7b-alpha.ipynb +0 -0
  2. app.py +15 -22
HuggingFaceH4_zephyr-7b-alpha.ipynb CHANGED
The diff for this file is too large to render. See raw diff
 
app.py CHANGED
@@ -1,28 +1,17 @@
1
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
 
2
  import torch
3
  import gradio as gr
4
 
5
  # chatgpt-gpt4-prompts-bart-large-cnn-samsum
6
- tokenizer = AutoTokenizer.from_pretrained("Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum")
7
- model = AutoModelForSeq2SeqLM.from_pretrained("Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum", from_tf=True)
 
 
8
 
9
  # zephyr
10
- pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-alpha", torch_dtype=torch.bfloat16, device_map="auto")
11
-
12
- def useZephyr(prompt):
13
- messages = [
14
- {
15
- "role": "system",
16
- "content": "You are a friendly chatbot who always responds in the style of a pirate.",
17
- },
18
- {"role": "user", "content": prompt},
19
- ]
20
- # https://huggingface.co/docs/transformers/main/en/chat_templating
21
- prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
22
- print(prompt)
23
-
24
- outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
25
- return outputs[0]["generated_text"]
26
 
27
 
28
  def useZephyr(prompt):
@@ -34,8 +23,11 @@ def useZephyr(prompt):
34
  {"role": "user", "content": prompt},
35
  ]
36
  # https://huggingface.co/docs/transformers/main/en/chat_templating
37
- outputs = pipe(prompt)
 
38
 
 
 
39
  return outputs[0]["generated_text"]
40
 
41
 
@@ -69,7 +61,8 @@ output_component = gr.Textbox(label="Output")
69
  examples = [["photographer"], ["developer"], ["teacher"], [
70
  "human resources staff"], ["recipe for ham croquettes"]]
71
  description = ""
72
- PerfectGPT = gr.Interface(useZephyr, inputs=[input_prompt, input_maxtokens], outputs=output_component,
 
73
  examples=examples, title="๐Ÿ—ฟ PerfectGPT v1 ๐Ÿ—ฟ", description=description)
74
 
75
- PerfectGPT.launch()
 
1
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
2
+ from transformers import pipeline
3
  import torch
4
  import gradio as gr
5
 
6
  # chatgpt-gpt4-prompts-bart-large-cnn-samsum
7
+ tokenizer = AutoTokenizer.from_pretrained(
8
+ "Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum")
9
+ model = AutoModelForSeq2SeqLM.from_pretrained(
10
+ "Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum", from_tf=True)
11
 
12
  # zephyr
13
+ pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-alpha",
14
+ torch_dtype=torch.bfloat16, device_map="auto")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
 
17
  def useZephyr(prompt):
 
23
  {"role": "user", "content": prompt},
24
  ]
25
  # https://huggingface.co/docs/transformers/main/en/chat_templating
26
+ prompt = pipe.tokenizer.apply_chat_template(
27
+ messages, tokenize=False, add_generation_prompt=True)
28
 
29
+ outputs = pipe(prompt, max_new_tokens=256, do_sample=True,
30
+ temperature=0.7, top_k=50, top_p=0.95)
31
  return outputs[0]["generated_text"]
32
 
33
 
 
61
  examples = [["photographer"], ["developer"], ["teacher"], [
62
  "human resources staff"], ["recipe for ham croquettes"]]
63
  description = ""
64
+
65
+ PerfectGPT = gr.Interface(generatePrompt, inputs=[input_prompt, input_maxtokens], outputs=output_component,
66
  examples=examples, title="๐Ÿ—ฟ PerfectGPT v1 ๐Ÿ—ฟ", description=description)
67
 
68
+ PerfectGPT.launch()