lruizap commited on
Commit
f144b12
·
1 Parent(s): 5ab8047

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -19
app.py CHANGED
@@ -1,30 +1,28 @@
1
- from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, GenerationConfig, pipeline
2
  import torch
3
  import gradio as gr
4
 
5
  # chatgpt-gpt4-prompts-bart-large-cnn-samsum
6
- tokenizer = AutoTokenizer.from_pretrained(
7
- "Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum")
8
- model = AutoModelForSeq2SeqLM.from_pretrained(
9
- "Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum", from_tf=True)
10
 
11
  # zephyr
12
- # pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-alpha", torch_dtype=torch.bfloat16, device_map="auto")
13
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
- hf_model_id = "HuggingFaceH4/zephyr-7b-alpha"
16
- model = AutoModelForCausalLM.from_pretrained(hf_model_id)
17
- tokenizerZephyr = AutoTokenizer.from_pretrained(hf_model_id, legacy=False)
18
- generation_config, unused_kwargs = GenerationConfig.from_pretrained(hf_model_id, max_new_tokens=200, temperature=0.7, return_unused_kwargs=True)
19
-
20
- model.generation_config = generation_config
21
-
22
- pipe = pipeline(
23
- "text-generation",
24
- model=model,
25
- tokenizer=tokenizerZephyr,
26
- )
27
- pipe(prompt)
28
 
29
 
30
  def useZephyr(prompt):
 
1
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
2
  import torch
3
  import gradio as gr
4
 
5
  # chatgpt-gpt4-prompts-bart-large-cnn-samsum
6
+ tokenizer = AutoTokenizer.from_pretrained("Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum")
7
+ model = AutoModelForSeq2SeqLM.from_pretrained("Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum", from_tf=True)
 
 
8
 
9
  # zephyr
10
+ pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-alpha", torch_dtype=torch.bfloat16, device_map="auto")
11
 
12
+ def useZephyr(prompt):
13
+ messages = [
14
+ {
15
+ "role": "system",
16
+ "content": "You are a friendly chatbot who always responds in the style of a pirate.",
17
+ },
18
+ {"role": "user", "content": prompt},
19
+ ]
20
+ # https://huggingface.co/docs/transformers/main/en/chat_templating
21
+ prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
22
+ print(prompt)
23
 
24
+ outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
25
+ return outputs[0]["generated_text"]
 
 
 
 
 
 
 
 
 
 
 
26
 
27
 
28
  def useZephyr(prompt):