student-abdullah commited on
Commit
8dbcc5b
1 Parent(s): 0375083

Upload 5 files

Browse files
Files changed (6) hide show
  1. .gitattributes +1 -0
  2. a.py +166 -0
  3. location.csv +0 -0
  4. requirements.txt +0 -0
  5. train.csv +0 -0
  6. unsloth.Q5_K_M.gguf +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ unsloth.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
a.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ from fuzzywuzzy import process
4
+ from langchain_community.llms import LlamaCpp
5
+ from langchain_core.callbacks import StreamingStdOutCallbackHandler
6
+ from langchain_core.prompts import PromptTemplate
7
+
8
+ # Load the CSV files into DataFrames with Windows-1252 encoding
9
+ df = pd.read_csv('location.csv', encoding='Windows-1252')
10
+ df2 = pd.read_csv('train.csv')
11
+
12
+ # Initialize the LlamaCpp model
13
+ llm = LlamaCpp(
14
+ model_path="unsloth.Q5_K_M.gguf",
15
+ temperature=0.01,
16
+ max_tokens=500,
17
+ top_p=3,
18
+ callbacks=[StreamingStdOutCallbackHandler()],
19
+ verbose=False,
20
+ stop=["###"]
21
+ )
22
+
23
+ # Define the prompt template
24
+ template = """Below is an instruction that describes a task, paired with an input that provides further context. Write a lengthy detailed response that appropriately completes the request.
25
+
26
+ ### Instruction:
27
+ {instruction}
28
+
29
+ ### Input:
30
+ {input}
31
+
32
+ ### Response:
33
+ {response}"""
34
+
35
+ prompt = PromptTemplate.from_template(template)
36
+
37
+
38
+ # Function to find the best matching context based on user input
39
+ def find_best_match(query):
40
+ questions = df2['Question'].tolist()
41
+ contexts = df2['Context'].tolist()
42
+
43
+ # Find the best match
44
+ best_match = process.extractOne(query, questions)
45
+ if best_match:
46
+ index = questions.index(best_match[0])
47
+ return contexts[index]
48
+ return "No relevant information found."
49
+
50
+
51
+ # Function to truncate response at the nearest full stop
52
+ def truncate_at_full_stop(text, max_length=500):
53
+ if len(text) <= max_length:
54
+ return text
55
+
56
+ truncated = text[:max_length]
57
+ print(f"Truncated text: {truncated}")
58
+
59
+ last_period = truncated.rfind('.')
60
+ print(f"Last period index: {last_period}")
61
+
62
+ if last_period != -1:
63
+ return truncated[:last_period + 1]
64
+
65
+ return truncated
66
+
67
+
68
+ # Initialize session state for selected service, chat history, and AI history
69
+ if 'selected_service' not in st.session_state:
70
+ st.session_state.selected_service = "Home"
71
+ if 'chat_history' not in st.session_state:
72
+ st.session_state.chat_history = []
73
+ if 'history' not in st.session_state:
74
+ st.session_state.history = []
75
+ if 'input' not in st.session_state:
76
+ st.session_state['input'] = ''
77
+
78
+ # Sidebar for selecting services
79
+ with st.sidebar:
80
+ st.title("Select the Service")
81
+
82
+ # Create buttons for each service
83
+ if st.button('Medicine Services'):
84
+ st.session_state.selected_service = "Medicine Services"
85
+
86
+ if st.button('Kendra Locator'):
87
+ st.session_state.selected_service = "Kendra Locator"
88
+
89
+ if st.button('Assistant'):
90
+ st.session_state.selected_service = "Assistant"
91
+
92
+ # Main content area based on selected service
93
+ if st.session_state.selected_service == "Home":
94
+ st.title("Welcome to Medical Service Center")
95
+ st.write("Explore the options in the sidebar to get started.")
96
+
97
+ elif st.session_state.selected_service == "Medicine Services":
98
+ st.title("Medicine Services")
99
+
100
+ # Display chat history
101
+ for chat in st.session_state.chat_history:
102
+ st.write(f"**User:** {chat['user']}")
103
+ st.write(f"**Bot:** {chat['bot']}")
104
+
105
+ # User input section
106
+ user_input = st.text_input("Enter medicine:")
107
+
108
+ # Handle input when the "Send" button is clicked
109
+ if st.button('Send'):
110
+ if user_input:
111
+ response = find_best_match(user_input)
112
+ st.session_state.chat_history.append({"user": user_input, "bot": response})
113
+
114
+ elif st.session_state.selected_service == "Kendra Locator":
115
+ st.title("Kendra Locator")
116
+ display_option = st.selectbox("Select:", ["Address", "Email"])
117
+ pin_code_input = st.text_input("Enter Pin Code:")
118
+
119
+ if st.button("Locate"):
120
+ if pin_code_input:
121
+ result = df[df['Pin'].astype(str) == pin_code_input]
122
+ if not result.empty:
123
+ if display_option == "Address":
124
+ st.write(f"Address: {result['Address'].values[0]}")
125
+ elif display_option == "Email":
126
+ st.write(f"Email: {result['Email'].values[0]}")
127
+ else:
128
+ st.write("No results found.")
129
+ else:
130
+ st.write("Please enter a pin code.")
131
+
132
+ elif st.session_state.selected_service == "Assistant":
133
+ st.title("Query Assistance")
134
+
135
+ # Display AI chat history
136
+ for chat in st.session_state.history:
137
+ st.write(f"**Medicine Query:** {chat['user']}")
138
+ st.write(f"**Chatbot:** {chat['bot']}")
139
+
140
+
141
+ # Function to handle user input
142
+ def handle_input():
143
+ user_input = st.session_state['input']
144
+ if user_input:
145
+ # Format the prompt
146
+ formatted_prompt = prompt.format(
147
+ instruction="You are an all-knowing Medical AI. Provide detailed responses to only medicine-related queries.",
148
+ input=user_input,
149
+ response="" # Leave this blank for generation!
150
+ )
151
+
152
+ # Generate response
153
+ response = llm.invoke(formatted_prompt)
154
+
155
+ # Truncate response if necessary
156
+ truncated_response = truncate_at_full_stop(response)
157
+
158
+ # Update the chat history
159
+ st.session_state.history.append({"user": user_input, "bot": truncated_response})
160
+
161
+ # Clear the input box
162
+ st.session_state['input'] = ''
163
+
164
+
165
+ # Persistent text input at the top
166
+ st.text_input("Enter Medicine Name:", key="input", on_change=handle_input)
location.csv ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
Binary file (2.86 kB). View file
 
train.csv ADDED
The diff for this file is too large to render. See raw diff
 
unsloth.Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56ec872ac6e16e492c06073affb524dbc121d4b0d3d906edbbfe219231b1bfc9
3
+ size 5732987264