anezatra2 commited on
Commit
540b86f
·
verified ·
1 Parent(s): dabca7f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -76
app.py CHANGED
@@ -1,78 +1,76 @@
1
- from peft import AutoPeftModelForCausalLM
2
- from transformers import GenerationConfig
3
- from transformers import AutoTokenizer
4
- import torch
5
- import streamlit as st
6
- from streamlit_chat import message
7
-
8
- st.session_state.clicked=True
9
-
10
- def process_data_sample(example):
11
-
12
- processed_example = "<|system|>\n You are a support chatbot who helps with user queries chatbot who always responds in the style of a professional.</s>\n<|user|>\n" + example + "</s>\n<|assistant|>\n"
13
-
14
- return processed_example
15
-
16
- @st.cache_resource(show_spinner=True)
17
- def create_bot():
18
-
19
- tokenizer = AutoTokenizer.from_pretrained("Vasanth/zephyr-support-chatbot")
20
-
21
- model = AutoPeftModelForCausalLM.from_pretrained(
22
- "Vasanth/zephyr-support-chatbot",
23
- low_cpu_mem_usage=True,
24
- return_dict=True,
25
- torch_dtype=torch.float16,
26
- device_map="cuda"
27
- )
28
-
29
- generation_config = GenerationConfig(
30
- do_sample=True,
31
- temperature=0.5,
32
- max_new_tokens=256,
33
- pad_token_id=tokenizer.eos_token_id
34
- )
 
 
 
 
 
 
 
 
 
 
 
35
 
36
- return model, tokenizer, generation_config
37
-
38
- model, tokenizer, generation_config = create_bot()
39
-
40
- bot = create_bot()
41
-
42
- def infer_bot(prompt):
43
- inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
44
- outputs = model.generate(**inputs, generation_config=generation_config)
45
- out_str = tokenizer.decode(outputs[0], skip_special_tokens=True).replace(prompt, '')
46
- return out_str
47
-
48
- def display_conversation(history):
49
- for i in range(len(history["assistant"])):
50
- message(history["user"][i], is_user=True, key=str(i) + "_user")
51
- message(history["assistant"][i],key=str(i))
52
-
53
- def main():
54
-
55
- st.title("Support Member 📚🤖")
56
- st.subheader("A bot created using Zephyr which was finetuned to possess the capabilities to be a support member")
57
-
58
- user_input = st.text_input("Enter your query")
59
-
60
- if "assistant" not in st.session_state:
61
- st.session_state["assistant"] = ["I am ready to help you"]
62
- if "user" not in st.session_state:
63
- st.session_state["user"] = ["Hey there!"]
64
-
65
- if st.session_state.clicked:
66
- if st.button("Answer"):
67
-
68
- answer = infer_bot(user_input)
69
- st.session_state["user"].append(user_input)
70
- st.session_state["assistant"].append(answer)
71
-
72
- if st.session_state["assistant"]:
73
- display_conversation(st.session_state)
74
-
75
  if __name__ == "__main__":
76
- main()
77
-
78
-
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+
4
+ client = InferenceClient(model="TinyLlama/TinyLlama-1.1B-Chat-v1.0")
5
+
6
+ def respond(message, history, system_message):
7
+ # Mesajları hazırlıyoruz, sistem mesajı ile başlıyoruz
8
+ messages = [{"role": "system", "content": system_message}]
9
+
10
+ for user_msg, assistant_msg in history:
11
+ if user_msg:
12
+ messages.append({"role": "user", "content": user_msg})
13
+ if assistant_msg:
14
+ messages.append({"role": "assistant", "content": assistant_msg})
15
+
16
+ # Son kullanıcı mesajını ekliyoruz
17
+ messages.append({"role": "user", "content": message})
18
+
19
+ response = ""
20
+ try:
21
+ # Yanıtları alıyoruz ve yayınlıyoruz
22
+ result = client.chat_completion(
23
+ messages=messages,
24
+ max_tokens=250,
25
+ temperature=0.7,
26
+ top_p=0.95,
27
+ )
28
+ for choice in result.choices:
29
+ response += choice.message.get('content', '')
30
+ yield response
31
+ except Exception as e:
32
+ # Hata durumunda hata mesajını döndürüyoruz
33
+ yield f"Hata: {e}"
34
+
35
+ # Gradio arayüzünü oluşturuyoruz
36
+ with gr.Blocks(theme=gr.Theme.from_hub('HaleyCH/HaleyCH_Theme')) as demo:
37
+ system_message = gr.HTML("""
38
+ <h1 style="color: #fff; text-shadow: 0 0 5px #fff, 0 0 10px #fff, 0 0 15px #fff, 0 0 10px #0000ff, 0 0 15px #0000ff; text-align: center;">
39
+ SIMULACRA GPT-2
40
+ </h1>
41
+ <p>🤖 Welcome to Simulacra user! See our account for more information.</p>
42
+ """)
43
+
44
+ chatbot = gr.Chatbot()
45
+ msg = gr.Textbox(label="Mesajınızı yazın")
46
 
47
+ # Butonları yan yana koymak için bir satır içine alıyoruz
48
+ with gr.Row():
49
+ clear = gr.Button("Temizle")
50
+ submit = gr.Button("Gönder")
51
+
52
+ def user_input(user_message, history):
53
+ # Mesajı HTML ile resim ekleyerek döndürme
54
+ user_message_with_image = f'<img src="file_path/favicon.ico" alt="icon" style="width: 16px; height: 16px; vertical-align: middle;"> {user_message}'
55
+ return "", history + [[user_message_with_image, None]]
56
+
57
+ def bot_response(history):
58
+ last_message = history[-1][0]
59
+ response_gen = respond(
60
+ message=last_message,
61
+ history=history[:-1],
62
+ system_message=system_message.value,
63
+ )
64
+ for response in response_gen:
65
+ history[-1][1] = response
66
+ yield history
67
+
68
+ msg.submit(user_input, [msg, chatbot], [msg, chatbot], queue=False).then(
69
+ bot_response, chatbot, chatbot
70
+ )
71
+ clear.click(lambda: None, None, chatbot, queue=False)
72
+ submit.click(lambda: msg.submit(), None, chatbot, queue=False) # Gönder butonuna tıklandığında mesajı gönder
73
+
74
+ # Uygulamayı başlatıyoruz
 
 
 
 
 
 
 
 
 
 
 
75
  if __name__ == "__main__":
76
+ demo.launch(share=True)