tevykuch commited on
Commit
effb8bc
·
verified ·
1 Parent(s): 809fde8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -12
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
  sys_message = """
5
  This model can generate untruths, lies or inappropriate things. Only for testing and validation.
@@ -13,22 +13,28 @@ generation_config = {
13
  "top_p": 0.95,
14
  "top_k": 30,
15
  "repetition_penalty": 1.1,
16
- "stop_token": '### Instruction:'
17
  }
18
 
19
- tokenizer = AutoTokenizer.from_pretrained("tevykuch/sftsl0th")
20
- llm = AutoModelForCausalLM.from_pretrained("tevykuch/sftsl0th")
21
 
22
- def stream(prompt):
23
- # Tokenize the prompt
24
- inputs = tokenizer.encode(prompt, return_tensors="pt")
25
- # Generate a response
26
- output_ids = llm.generate(inputs, **generation_config)
27
- # Decode the generated ids to a string
28
- response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
29
- return response
30
 
 
 
 
 
 
 
 
 
31
 
 
 
 
 
32
 
33
  chat_interface = gr.ChatInterface(
34
  fn=stream,
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
 
4
  sys_message = """
5
  This model can generate untruths, lies or inappropriate things. Only for testing and validation.
 
13
  "top_p": 0.95,
14
  "top_k": 30,
15
  "repetition_penalty": 1.1,
16
+ "eos_token_id": pipe.tokenizer.eos_token_id
17
  }
18
 
 
 
19
 
20
+ pipe = pipeline("text-generation", model="tevykuch/sftsl0th", device=0, framework="pt")
21
+
22
+ # tokenizer = AutoTokenizer.from_pretrained("tevykuch/sftsl0th")
23
+ # llm = AutoModelForCausalLM.from_pretrained("tevykuch/sftsl0th")
 
 
 
 
24
 
25
+ # def stream(prompt):
26
+ # # Tokenize the prompt
27
+ # inputs = tokenizer.encode(prompt, return_tensors="pt")
28
+ # # Generate a response
29
+ # output_ids = llm.generate(inputs, **generation_config)
30
+ # # Decode the generated ids to a string
31
+ # response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
32
+ # return response
33
 
34
+ def stream(prompt):
35
+ outputs = pipe(prompt, **generation_config)
36
+ response = outputs[0]["generated_text"]
37
+ return response
38
 
39
  chat_interface = gr.ChatInterface(
40
  fn=stream,