lruizap commited on
Commit
4c288e9
·
1 Parent(s): 9e35d46

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -9,12 +9,12 @@ tokenizer = AutoTokenizer.from_pretrained(
9
  model = AutoModelForSeq2SeqLM.from_pretrained(
10
  "Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum", from_tf=True)
11
 
12
- # zephyr
13
- pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-alpha",
14
- torch_dtype=torch.bfloat16, device_map="auto")
15
-
16
 
17
  def generatePrompt(inputuno, inputdos):
 
 
 
 
18
  prompt = inputuno
19
  promptdos = inputdos
20
  batch = tokenizer(prompt, return_tensors="pt")
@@ -31,9 +31,9 @@ def generatePrompt(inputuno, inputdos):
31
  },
32
  ]
33
  # https://huggingface.co/docs/transformers/main/en/chat_templating
34
- # final_prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
35
 
36
- outputs = pipe(messages, tokenize=False, add_generation_prompt=True)
37
 
38
  return outputs[0]["generated_text"]
39
  #
 
9
  model = AutoModelForSeq2SeqLM.from_pretrained(
10
  "Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum", from_tf=True)
11
 
 
 
 
 
12
 
13
  def generatePrompt(inputuno, inputdos):
14
+
15
+ # zephyr
16
+ pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-alpha",torch_dtype=torch.bfloat16, device_map="auto")
17
+
18
  prompt = inputuno
19
  promptdos = inputdos
20
  batch = tokenizer(prompt, return_tensors="pt")
 
31
  },
32
  ]
33
  # https://huggingface.co/docs/transformers/main/en/chat_templating
34
+ final_prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
35
 
36
+ outputs = final_prompt
37
 
38
  return outputs[0]["generated_text"]
39
  #