ProfessorLeVesseur
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,713 +1,3 @@
|
|
1 |
-
# # intervention_analysis_app.py
|
2 |
-
|
3 |
-
# import streamlit as st
|
4 |
-
# import pandas as pd
|
5 |
-
# # from transformers import pipeline
|
6 |
-
# from huggingface_hub import InferenceClient
|
7 |
-
# import os
|
8 |
-
# from pathlib import Path
|
9 |
-
# from dotenv import load_dotenv
|
10 |
-
|
11 |
-
# load_dotenv()
|
12 |
-
|
13 |
-
# # Set the Hugging Face API key
|
14 |
-
# # Retrieve Hugging Face API key from environment variables
|
15 |
-
# hf_api_key = os.getenv('HF_API_KEY')
|
16 |
-
# if not hf_api_key:
|
17 |
-
# raise ValueError("HF_API_KEY not set in environment variables")
|
18 |
-
|
19 |
-
# # Create the Hugging Face inference client
|
20 |
-
# client = InferenceClient(api_key=hf_api_key)
|
21 |
-
|
22 |
-
# # Constants
|
23 |
-
# INTERVENTION_COLUMN = 'Did the intervention happen today?'
|
24 |
-
# ENGAGED_STR = 'Engaged (Respect, Responsibility, Effort)'
|
25 |
-
# PARTIALLY_ENGAGED_STR = 'Partially Engaged (about 50%)'
|
26 |
-
# NOT_ENGAGED_STR = 'Not Engaged (less than 50%)'
|
27 |
-
|
28 |
-
# def main():
|
29 |
-
# st.title("Intervention Program Analysis")
|
30 |
-
|
31 |
-
# # File uploader
|
32 |
-
# uploaded_file = st.file_uploader("Upload your Excel file", type=["xlsx"])
|
33 |
-
|
34 |
-
# if uploaded_file is not None:
|
35 |
-
# try:
|
36 |
-
# # Read the Excel file into a DataFrame
|
37 |
-
# df = pd.read_excel(uploaded_file)
|
38 |
-
# st.subheader("Uploaded Data")
|
39 |
-
# st.write(df)
|
40 |
-
|
41 |
-
# # Ensure expected column is available
|
42 |
-
# if INTERVENTION_COLUMN not in df.columns:
|
43 |
-
# st.error(f"Expected column '{INTERVENTION_COLUMN}' not found.")
|
44 |
-
# return
|
45 |
-
|
46 |
-
# # Clean up column names
|
47 |
-
# df.columns = df.columns.str.strip()
|
48 |
-
|
49 |
-
# # Compute Intervention Session Statistics
|
50 |
-
# intervention_stats = compute_intervention_statistics(df)
|
51 |
-
# st.subheader("Intervention Session Statistics")
|
52 |
-
# st.write(intervention_stats)
|
53 |
-
|
54 |
-
# # Compute Student Metrics
|
55 |
-
# student_metrics_df = compute_student_metrics(df)
|
56 |
-
# st.subheader("Student Metrics")
|
57 |
-
# st.write(student_metrics_df)
|
58 |
-
|
59 |
-
# # Prepare input for the language model
|
60 |
-
# llm_input = prepare_llm_input(student_metrics_df)
|
61 |
-
|
62 |
-
# # Generate Notes and Recommendations using Hugging Face LLM
|
63 |
-
# recommendations = prompt_response_from_hf_llm(llm_input)
|
64 |
-
|
65 |
-
# st.subheader("AI Analysis")
|
66 |
-
# st.markdown(recommendations)
|
67 |
-
|
68 |
-
# except Exception as e:
|
69 |
-
# st.error(f"Error reading the file: {str(e)}")
|
70 |
-
|
71 |
-
# def compute_intervention_statistics(df):
|
72 |
-
# # Total Number of Days Available
|
73 |
-
# total_days = len(df)
|
74 |
-
|
75 |
-
# # Intervention Sessions Held
|
76 |
-
# sessions_held = df[INTERVENTION_COLUMN].str.strip().str.lower().eq('yes').sum()
|
77 |
-
|
78 |
-
# # Intervention Sessions Not Held
|
79 |
-
# sessions_not_held = df[INTERVENTION_COLUMN].str.strip().str.lower().eq('no').sum()
|
80 |
-
|
81 |
-
# # Intervention Frequency (%)
|
82 |
-
# intervention_frequency = (sessions_held / total_days) * 100 if total_days > 0 else 0
|
83 |
-
# intervention_frequency = round(intervention_frequency, 2)
|
84 |
-
|
85 |
-
# # Create a DataFrame to display the statistics
|
86 |
-
# stats = {
|
87 |
-
# 'Total Number of Days Available': [total_days],
|
88 |
-
# 'Intervention Sessions Held': [sessions_held],
|
89 |
-
# 'Intervention Sessions Not Held': [sessions_not_held],
|
90 |
-
# 'Intervention Frequency (%)': [intervention_frequency]
|
91 |
-
# }
|
92 |
-
# stats_df = pd.DataFrame(stats)
|
93 |
-
# return stats_df
|
94 |
-
|
95 |
-
# def compute_student_metrics(df):
|
96 |
-
# # Filter DataFrame for sessions where intervention happened
|
97 |
-
# intervention_df = df[df[INTERVENTION_COLUMN].str.strip().str.lower() == 'yes']
|
98 |
-
# intervention_sessions_held = len(intervention_df)
|
99 |
-
|
100 |
-
# # Get list of student columns
|
101 |
-
# student_columns = [col for col in df.columns if col.startswith('Student Attendance')]
|
102 |
-
|
103 |
-
# student_metrics = {}
|
104 |
-
|
105 |
-
# for col in student_columns:
|
106 |
-
# student_name = col.replace('Student Attendance [', '').replace(']', '').strip()
|
107 |
-
# # Get the attendance data for the student
|
108 |
-
# student_data = intervention_df[[col]].copy()
|
109 |
-
|
110 |
-
# # Treat blank entries as 'Absent'
|
111 |
-
# student_data[col] = student_data[col].fillna('Absent')
|
112 |
-
|
113 |
-
# # Assign attendance values
|
114 |
-
# attendance_values = student_data[col].apply(lambda x: 1 if x in [
|
115 |
-
# ENGAGED_STR,
|
116 |
-
# PARTIALLY_ENGAGED_STR,
|
117 |
-
# NOT_ENGAGED_STR
|
118 |
-
# ] else 0)
|
119 |
-
|
120 |
-
# # Number of Sessions Attended
|
121 |
-
# sessions_attended = attendance_values.sum()
|
122 |
-
|
123 |
-
# # Attendance (%)
|
124 |
-
# attendance_pct = (sessions_attended / intervention_sessions_held) * 100 if intervention_sessions_held > 0 else 0
|
125 |
-
# attendance_pct = round(attendance_pct, 2)
|
126 |
-
|
127 |
-
# # For engagement calculation, include only sessions where attendance is not 'Absent'
|
128 |
-
# valid_engagement_indices = attendance_values[attendance_values == 1].index
|
129 |
-
# engagement_data = student_data.loc[valid_engagement_indices, col]
|
130 |
-
|
131 |
-
# # Assign engagement values
|
132 |
-
# engagement_values = engagement_data.apply(lambda x: 1 if x == ENGAGED_STR
|
133 |
-
# else 0.5 if x == PARTIALLY_ENGAGED_STR else 0)
|
134 |
-
|
135 |
-
# # Sum of Engagement Values
|
136 |
-
# sum_engagement_values = engagement_values.sum()
|
137 |
-
|
138 |
-
# # Number of Sessions Attended for engagement (should be same as sessions_attended)
|
139 |
-
# number_sessions_attended = len(valid_engagement_indices)
|
140 |
-
|
141 |
-
# # Engagement (%)
|
142 |
-
# engagement_pct = (sum_engagement_values / number_sessions_attended) * 100 if number_sessions_attended > 0 else 0
|
143 |
-
# engagement_pct = round(engagement_pct, 2)
|
144 |
-
|
145 |
-
# # Store metrics
|
146 |
-
# student_metrics[student_name] = {
|
147 |
-
# 'Attendance (%)': attendance_pct,
|
148 |
-
# 'Engagement (%)': engagement_pct
|
149 |
-
# }
|
150 |
-
|
151 |
-
# # Create a DataFrame from student_metrics
|
152 |
-
# student_metrics_df = pd.DataFrame.from_dict(student_metrics, orient='index').reset_index()
|
153 |
-
# student_metrics_df.rename(columns={'index': 'Student'}, inplace=True)
|
154 |
-
# return student_metrics_df
|
155 |
-
|
156 |
-
# def prepare_llm_input(student_metrics_df):
|
157 |
-
# # Convert the student metrics DataFrame to a string
|
158 |
-
# metrics_str = student_metrics_df.to_string(index=False)
|
159 |
-
# llm_input = f"""
|
160 |
-
# Based on the following student metrics:
|
161 |
-
|
162 |
-
# {metrics_str}
|
163 |
-
|
164 |
-
# Provide:
|
165 |
-
|
166 |
-
# 1. Notes and Key Takeaways: Summarize the data, highlight students with the lowest and highest attendance and engagement percentages, identify students who may need adjustments to their intervention due to low attendance or engagement, and highlight students who are showing strong performance.
|
167 |
-
|
168 |
-
# 2. Recommendations and Next Steps: Provide interpretations based on the analysis and suggest possible next steps or strategies to improve student outcomes.
|
169 |
-
# """
|
170 |
-
# return llm_input
|
171 |
-
|
172 |
-
# def prompt_response_from_hf_llm(llm_input):
|
173 |
-
# # Generate the refined prompt using Hugging Face API
|
174 |
-
# response = client.chat.completions.create(
|
175 |
-
# # model="mistralai/Mistral-7B-Instruct-v0.3",
|
176 |
-
# model="meta-llama/Llama-3.1-70B-Instruct",
|
177 |
-
# messages=[
|
178 |
-
# {"role": "user", "content": llm_input}
|
179 |
-
# ],
|
180 |
-
# stream=True,
|
181 |
-
# temperature=0.5,
|
182 |
-
# max_tokens=1024,
|
183 |
-
# top_p=0.7
|
184 |
-
# )
|
185 |
-
|
186 |
-
# # Combine messages if response is streamed
|
187 |
-
# response_content = ""
|
188 |
-
# for message in response:
|
189 |
-
# response_content += message.choices[0].delta.content
|
190 |
-
|
191 |
-
# return response_content.strip()
|
192 |
-
|
193 |
-
# if __name__ == '__main__':
|
194 |
-
# main()
|
195 |
-
|
196 |
-
|
197 |
-
# CHARTS
|
198 |
-
# # intervention_analysis_app.py
|
199 |
-
|
200 |
-
# import streamlit as st
|
201 |
-
# import pandas as pd
|
202 |
-
# import matplotlib.pyplot as plt
|
203 |
-
# # from transformers import pipeline
|
204 |
-
# from huggingface_hub import InferenceClient
|
205 |
-
# import os
|
206 |
-
# from pathlib import Path
|
207 |
-
# from dotenv import load_dotenv
|
208 |
-
|
209 |
-
# load_dotenv()
|
210 |
-
|
211 |
-
# # Set the Hugging Face API key
|
212 |
-
# # Retrieve Hugging Face API key from environment variables
|
213 |
-
# hf_api_key = os.getenv('HF_API_KEY')
|
214 |
-
# if not hf_api_key:
|
215 |
-
# raise ValueError("HF_API_KEY not set in environment variables")
|
216 |
-
|
217 |
-
# # Create the Hugging Face inference client
|
218 |
-
# client = InferenceClient(api_key=hf_api_key)
|
219 |
-
|
220 |
-
# # Constants
|
221 |
-
# INTERVENTION_COLUMN = 'Did the intervention happen today?'
|
222 |
-
# ENGAGED_STR = 'Engaged (Respect, Responsibility, Effort)'
|
223 |
-
# PARTIALLY_ENGAGED_STR = 'Partially Engaged (about 50%)'
|
224 |
-
# NOT_ENGAGED_STR = 'Not Engaged (less than 50%)'
|
225 |
-
|
226 |
-
# def main():
|
227 |
-
# st.title("Intervention Program Analysis")
|
228 |
-
|
229 |
-
# # File uploader
|
230 |
-
# uploaded_file = st.file_uploader("Upload your Excel file", type=["xlsx"])
|
231 |
-
|
232 |
-
# if uploaded_file is not None:
|
233 |
-
# try:
|
234 |
-
# # Read the Excel file into a DataFrame
|
235 |
-
# df = pd.read_excel(uploaded_file)
|
236 |
-
# st.subheader("Uploaded Data")
|
237 |
-
# # st.write(df.head(4)) # Display only the first four rows
|
238 |
-
# st.write(df) # Display all
|
239 |
-
|
240 |
-
# # Ensure expected column is available
|
241 |
-
# if INTERVENTION_COLUMN not in df.columns:
|
242 |
-
# st.error(f"Expected column '{INTERVENTION_COLUMN}' not found.")
|
243 |
-
# return
|
244 |
-
|
245 |
-
# # Clean up column names
|
246 |
-
# df.columns = df.columns.str.strip()
|
247 |
-
|
248 |
-
# # Compute Intervention Session Statistics
|
249 |
-
# intervention_stats = compute_intervention_statistics(df)
|
250 |
-
# st.subheader("Intervention Session Statistics")
|
251 |
-
# st.write(intervention_stats)
|
252 |
-
|
253 |
-
# # Visualization for Intervention Session Statistics
|
254 |
-
# plot_intervention_statistics(intervention_stats)
|
255 |
-
|
256 |
-
# # Compute Student Metrics
|
257 |
-
# student_metrics_df = compute_student_metrics(df)
|
258 |
-
# st.subheader("Student Metrics")
|
259 |
-
# st.write(student_metrics_df)
|
260 |
-
|
261 |
-
# # Visualization for Student Metrics
|
262 |
-
# plot_student_metrics(student_metrics_df)
|
263 |
-
|
264 |
-
# # Prepare input for the language model
|
265 |
-
# llm_input = prepare_llm_input(student_metrics_df)
|
266 |
-
|
267 |
-
# # Generate Notes and Recommendations using Hugging Face LLM
|
268 |
-
# recommendations = prompt_response_from_hf_llm(llm_input)
|
269 |
-
|
270 |
-
# st.subheader("AI Analysis")
|
271 |
-
# st.markdown(recommendations)
|
272 |
-
|
273 |
-
# except Exception as e:
|
274 |
-
# st.error(f"Error reading the file: {str(e)}")
|
275 |
-
|
276 |
-
# def compute_intervention_statistics(df):
|
277 |
-
# # Total Number of Days Available
|
278 |
-
# total_days = len(df)
|
279 |
-
|
280 |
-
# # Intervention Sessions Held
|
281 |
-
# sessions_held = df[INTERVENTION_COLUMN].str.strip().str.lower().eq('yes').sum()
|
282 |
-
|
283 |
-
# # Intervention Sessions Not Held
|
284 |
-
# sessions_not_held = df[INTERVENTION_COLUMN].str.strip().str.lower().eq('no').sum()
|
285 |
-
|
286 |
-
# # Intervention Frequency (%)
|
287 |
-
# intervention_frequency = (sessions_held / total_days) * 100 if total_days > 0 else 0
|
288 |
-
# intervention_frequency = round(intervention_frequency, 2)
|
289 |
-
|
290 |
-
# # Create a DataFrame to display the statistics
|
291 |
-
# stats = {
|
292 |
-
# 'Total Number of Days Available': [total_days],
|
293 |
-
# 'Intervention Sessions Held': [sessions_held],
|
294 |
-
# 'Intervention Sessions Not Held': [sessions_not_held],
|
295 |
-
# 'Intervention Frequency (%)': [intervention_frequency]
|
296 |
-
# }
|
297 |
-
# stats_df = pd.DataFrame(stats)
|
298 |
-
# return stats_df
|
299 |
-
|
300 |
-
# def plot_intervention_statistics(intervention_stats):
|
301 |
-
# # Create a stacked bar chart for sessions held and not held
|
302 |
-
# sessions_held = intervention_stats['Intervention Sessions Held'].values[0]
|
303 |
-
# sessions_not_held = intervention_stats['Intervention Sessions Not Held'].values[0]
|
304 |
-
|
305 |
-
# fig, ax = plt.subplots()
|
306 |
-
# ax.bar(['Intervention Sessions'], [sessions_not_held], label='Not Held', color='#358E66')
|
307 |
-
# ax.bar(['Intervention Sessions'], [sessions_held], bottom=[sessions_not_held], label='Held', color='#91D6B8')
|
308 |
-
|
309 |
-
# # Display the values on the bars
|
310 |
-
# ax.text(0, sessions_not_held / 2, str(sessions_not_held), ha='center', va='center', color='white')
|
311 |
-
# ax.text(0, sessions_not_held + sessions_held / 2, str(sessions_held), ha='center', va='center', color='black')
|
312 |
-
|
313 |
-
# ax.set_ylabel('Number of Sessions')
|
314 |
-
# ax.set_title('Intervention Sessions Held vs Not Held')
|
315 |
-
# ax.legend()
|
316 |
-
|
317 |
-
# st.pyplot(fig)
|
318 |
-
|
319 |
-
# def compute_student_metrics(df):
|
320 |
-
# # Filter DataFrame for sessions where intervention happened
|
321 |
-
# intervention_df = df[df[INTERVENTION_COLUMN].str.strip().str.lower() == 'yes']
|
322 |
-
# intervention_sessions_held = len(intervention_df)
|
323 |
-
|
324 |
-
# # Get list of student columns
|
325 |
-
# student_columns = [col for col in df.columns if col.startswith('Student Attendance')]
|
326 |
-
|
327 |
-
# student_metrics = {}
|
328 |
-
|
329 |
-
# for col in student_columns:
|
330 |
-
# student_name = col.replace('Student Attendance [', '').replace(']', '').strip()
|
331 |
-
# # Get the attendance data for the student
|
332 |
-
# student_data = intervention_df[[col]].copy()
|
333 |
-
|
334 |
-
# # Treat blank entries as 'Absent'
|
335 |
-
# student_data[col] = student_data[col].fillna('Absent')
|
336 |
-
|
337 |
-
# # Assign attendance values
|
338 |
-
# attendance_values = student_data[col].apply(lambda x: 1 if x in [
|
339 |
-
# ENGAGED_STR,
|
340 |
-
# PARTIALLY_ENGAGED_STR,
|
341 |
-
# NOT_ENGAGED_STR
|
342 |
-
# ] else 0)
|
343 |
-
|
344 |
-
# # Number of Sessions Attended
|
345 |
-
# sessions_attended = attendance_values.sum()
|
346 |
-
|
347 |
-
# # Attendance (%)
|
348 |
-
# attendance_pct = (sessions_attended / intervention_sessions_held) * 100 if intervention_sessions_held > 0 else 0
|
349 |
-
# attendance_pct = round(attendance_pct, 2)
|
350 |
-
|
351 |
-
# # For engagement calculation, include only sessions where attendance is not 'Absent'
|
352 |
-
# valid_engagement_indices = attendance_values[attendance_values == 1].index
|
353 |
-
# engagement_data = student_data.loc[valid_engagement_indices, col]
|
354 |
-
|
355 |
-
# # Assign engagement values
|
356 |
-
# engagement_values = engagement_data.apply(lambda x: 1 if x == ENGAGED_STR
|
357 |
-
# else 0.5 if x == PARTIALLY_ENGAGED_STR else 0)
|
358 |
-
|
359 |
-
# # Sum of Engagement Values
|
360 |
-
# sum_engagement_values = engagement_values.sum()
|
361 |
-
|
362 |
-
# # Number of Sessions Attended for engagement (should be same as sessions_attended)
|
363 |
-
# number_sessions_attended = len(valid_engagement_indices)
|
364 |
-
|
365 |
-
# # Engagement (%)
|
366 |
-
# engagement_pct = (sum_engagement_values / number_sessions_attended) * 100 if number_sessions_attended > 0 else 0
|
367 |
-
# engagement_pct = round(engagement_pct, 2)
|
368 |
-
|
369 |
-
# # Store metrics
|
370 |
-
# student_metrics[student_name] = {
|
371 |
-
# 'Attendance (%)': attendance_pct,
|
372 |
-
# 'Engagement (%)': engagement_pct
|
373 |
-
# }
|
374 |
-
|
375 |
-
# # Create a DataFrame from student_metrics
|
376 |
-
# student_metrics_df = pd.DataFrame.from_dict(student_metrics, orient='index').reset_index()
|
377 |
-
# student_metrics_df.rename(columns={'index': 'Student'}, inplace=True)
|
378 |
-
# return student_metrics_df
|
379 |
-
|
380 |
-
# def plot_student_metrics(student_metrics_df):
|
381 |
-
# # Create a line graph for attendance and engagement
|
382 |
-
# fig, ax = plt.subplots()
|
383 |
-
|
384 |
-
# # Plotting Attendance and Engagement with specific colors
|
385 |
-
# ax.plot(student_metrics_df['Student'], student_metrics_df['Attendance (%)'], marker='o', color='#005288', label='Attendance (%)')
|
386 |
-
# ax.plot(student_metrics_df['Student'], student_metrics_df['Engagement (%)'], marker='o', color='#3AB0FF', label='Engagement (%)')
|
387 |
-
|
388 |
-
# ax.set_xlabel('Student')
|
389 |
-
# ax.set_ylabel('Percentage (%)')
|
390 |
-
# ax.set_title('Student Attendance and Engagement Metrics')
|
391 |
-
# ax.legend()
|
392 |
-
# plt.xticks(rotation=45)
|
393 |
-
|
394 |
-
# st.pyplot(fig)
|
395 |
-
|
396 |
-
# def prepare_llm_input(student_metrics_df):
|
397 |
-
# # Convert the student metrics DataFrame to a string
|
398 |
-
# metrics_str = student_metrics_df.to_string(index=False)
|
399 |
-
# llm_input = f"""
|
400 |
-
# Based on the following student metrics:
|
401 |
-
|
402 |
-
# {metrics_str}
|
403 |
-
|
404 |
-
# Provide:
|
405 |
-
|
406 |
-
# 1. Notes and Key Takeaways: Summarize the data, highlight students with the lowest and highest attendance and engagement percentages, identify students who may need adjustments to their intervention due to low attendance or engagement, and highlight students who are showing strong performance.
|
407 |
-
|
408 |
-
# 2. Recommendations and Next Steps: Provide interpretations based on the analysis and suggest possible next steps or strategies to improve student outcomes.
|
409 |
-
# """
|
410 |
-
# return llm_input
|
411 |
-
|
412 |
-
# def prompt_response_from_hf_llm(llm_input):
|
413 |
-
# # Generate the refined prompt using Hugging Face API
|
414 |
-
# response = client.chat.completions.create(
|
415 |
-
# # model="mistralai/Mistral-7B-Instruct-v0.3",
|
416 |
-
# model="meta-llama/Llama-3.1-70B-Instruct",
|
417 |
-
# messages=[
|
418 |
-
# {"role": "user", "content": llm_input}
|
419 |
-
# ],
|
420 |
-
# stream=True,
|
421 |
-
# temperature=0.5,
|
422 |
-
# max_tokens=1024,
|
423 |
-
# top_p=0.7
|
424 |
-
# )
|
425 |
-
|
426 |
-
# # Combine messages if response is streamed
|
427 |
-
# response_content = ""
|
428 |
-
# for message in response:
|
429 |
-
# response_content += message.choices[0].delta.content
|
430 |
-
|
431 |
-
# return response_content.strip()
|
432 |
-
|
433 |
-
# if __name__ == '__main__':
|
434 |
-
# main()
|
435 |
-
|
436 |
-
|
437 |
-
|
438 |
-
|
439 |
-
|
440 |
-
# CHARTS + DOWNLOAD
|
441 |
-
# # intervention_analysis_app.py
|
442 |
-
|
443 |
-
# import streamlit as st
|
444 |
-
# import pandas as pd
|
445 |
-
# import matplotlib.pyplot as plt
|
446 |
-
# import io
|
447 |
-
# # from transformers import pipeline
|
448 |
-
# from huggingface_hub import InferenceClient
|
449 |
-
# import os
|
450 |
-
# from pathlib import Path
|
451 |
-
# from dotenv import load_dotenv
|
452 |
-
|
453 |
-
# load_dotenv()
|
454 |
-
|
455 |
-
# # Set the Hugging Face API key
|
456 |
-
# # Retrieve Hugging Face API key from environment variables
|
457 |
-
# hf_api_key = os.getenv('HF_API_KEY')
|
458 |
-
# if not hf_api_key:
|
459 |
-
# raise ValueError("HF_API_KEY not set in environment variables")
|
460 |
-
|
461 |
-
# # Create the Hugging Face inference client
|
462 |
-
# client = InferenceClient(api_key=hf_api_key)
|
463 |
-
|
464 |
-
# # Constants
|
465 |
-
# INTERVENTION_COLUMN = 'Did the intervention happen today?'
|
466 |
-
# ENGAGED_STR = 'Engaged (Respect, Responsibility, Effort)'
|
467 |
-
# PARTIALLY_ENGAGED_STR = 'Partially Engaged (about 50%)'
|
468 |
-
# NOT_ENGAGED_STR = 'Not Engaged (less than 50%)'
|
469 |
-
|
470 |
-
# def main():
|
471 |
-
# st.title("Intervention Program Analysis")
|
472 |
-
|
473 |
-
# # File uploader
|
474 |
-
# uploaded_file = st.file_uploader("Upload your Excel file", type=["xlsx"])
|
475 |
-
|
476 |
-
# if uploaded_file is not None:
|
477 |
-
# try:
|
478 |
-
# # Read the Excel file into a DataFrame
|
479 |
-
# df = pd.read_excel(uploaded_file)
|
480 |
-
# st.subheader("Uploaded Data")
|
481 |
-
# st.write(df.head(4)) # Display only the first four rows
|
482 |
-
|
483 |
-
# # Ensure expected column is available
|
484 |
-
# if INTERVENTION_COLUMN not in df.columns:
|
485 |
-
# st.error(f"Expected column '{INTERVENTION_COLUMN}' not found.")
|
486 |
-
# return
|
487 |
-
|
488 |
-
# # Clean up column names
|
489 |
-
# df.columns = df.columns.str.strip()
|
490 |
-
|
491 |
-
# # Compute Intervention Session Statistics
|
492 |
-
# intervention_stats = compute_intervention_statistics(df)
|
493 |
-
# st.subheader("Intervention Session Statistics")
|
494 |
-
# st.write(intervention_stats)
|
495 |
-
|
496 |
-
# # Visualization for Intervention Session Statistics
|
497 |
-
# intervention_fig = plot_intervention_statistics(intervention_stats)
|
498 |
-
|
499 |
-
# # Add download button for Intervention Session Statistics chart
|
500 |
-
# download_chart(intervention_fig, "intervention_statistics_chart.png")
|
501 |
-
|
502 |
-
# # Compute Student Metrics
|
503 |
-
# student_metrics_df = compute_student_metrics(df)
|
504 |
-
# st.subheader("Student Metrics")
|
505 |
-
# st.write(student_metrics_df)
|
506 |
-
|
507 |
-
# # Visualization for Student Metrics
|
508 |
-
# student_metrics_fig = plot_student_metrics(student_metrics_df)
|
509 |
-
|
510 |
-
# # Add download button for Student Metrics chart
|
511 |
-
# download_chart(student_metrics_fig, "student_metrics_chart.png")
|
512 |
-
|
513 |
-
# # Prepare input for the language model
|
514 |
-
# llm_input = prepare_llm_input(student_metrics_df)
|
515 |
-
|
516 |
-
# # Generate Notes and Recommendations using Hugging Face LLM
|
517 |
-
# with st.spinner("Generating AI analysis..."):
|
518 |
-
# recommendations = prompt_response_from_hf_llm(llm_input)
|
519 |
-
|
520 |
-
# st.subheader("AI Analysis")
|
521 |
-
# st.markdown(recommendations)
|
522 |
-
|
523 |
-
# # Add download button for LLM output
|
524 |
-
# download_llm_output(recommendations, "llm_output.txt")
|
525 |
-
|
526 |
-
# except Exception as e:
|
527 |
-
# st.error(f"Error reading the file: {str(e)}")
|
528 |
-
|
529 |
-
# def compute_intervention_statistics(df):
|
530 |
-
# # Total Number of Days Available
|
531 |
-
# total_days = len(df)
|
532 |
-
|
533 |
-
# # Intervention Sessions Held
|
534 |
-
# sessions_held = df[INTERVENTION_COLUMN].str.strip().str.lower().eq('yes').sum()
|
535 |
-
|
536 |
-
# # Intervention Sessions Not Held
|
537 |
-
# sessions_not_held = df[INTERVENTION_COLUMN].str.strip().str.lower().eq('no').sum()
|
538 |
-
|
539 |
-
# # Intervention Frequency (%)
|
540 |
-
# intervention_frequency = (sessions_held / total_days) * 100 if total_days > 0 else 0
|
541 |
-
# intervention_frequency = round(intervention_frequency, 2)
|
542 |
-
|
543 |
-
# # Create a DataFrame to display the statistics
|
544 |
-
# stats = {
|
545 |
-
# 'Total Number of Days Available': [total_days],
|
546 |
-
# 'Intervention Sessions Held': [sessions_held],
|
547 |
-
# 'Intervention Sessions Not Held': [sessions_not_held],
|
548 |
-
# 'Intervention Frequency (%)': [intervention_frequency]
|
549 |
-
# }
|
550 |
-
# stats_df = pd.DataFrame(stats)
|
551 |
-
# return stats_df
|
552 |
-
|
553 |
-
# def plot_intervention_statistics(intervention_stats):
|
554 |
-
# # Create a stacked bar chart for sessions held and not held
|
555 |
-
# sessions_held = intervention_stats['Intervention Sessions Held'].values[0]
|
556 |
-
# sessions_not_held = intervention_stats['Intervention Sessions Not Held'].values[0]
|
557 |
-
|
558 |
-
# fig, ax = plt.subplots()
|
559 |
-
# ax.bar(['Intervention Sessions'], [sessions_not_held], label='Not Held', color='#358E66')
|
560 |
-
# ax.bar(['Intervention Sessions'], [sessions_held], bottom=[sessions_not_held], label='Held', color='#91D6B8')
|
561 |
-
|
562 |
-
# # Display the values on the bars
|
563 |
-
# ax.text(0, sessions_not_held / 2, str(sessions_not_held), ha='center', va='center', color='white')
|
564 |
-
# ax.text(0, sessions_not_held + sessions_held / 2, str(sessions_held), ha='center', va='center', color='black')
|
565 |
-
|
566 |
-
# ax.set_ylabel('Number of Sessions')
|
567 |
-
# ax.set_title('Intervention Sessions Held vs Not Held')
|
568 |
-
# ax.legend()
|
569 |
-
|
570 |
-
# st.pyplot(fig)
|
571 |
-
|
572 |
-
# return fig
|
573 |
-
|
574 |
-
# def compute_student_metrics(df):
|
575 |
-
# # Filter DataFrame for sessions where intervention happened
|
576 |
-
# intervention_df = df[df[INTERVENTION_COLUMN].str.strip().str.lower() == 'yes']
|
577 |
-
# intervention_sessions_held = len(intervention_df)
|
578 |
-
|
579 |
-
# # Get list of student columns
|
580 |
-
# student_columns = [col for col in df.columns if col.startswith('Student Attendance')]
|
581 |
-
|
582 |
-
# student_metrics = {}
|
583 |
-
|
584 |
-
# for col in student_columns:
|
585 |
-
# student_name = col.replace('Student Attendance [', '').replace(']', '').strip()
|
586 |
-
# # Get the attendance data for the student
|
587 |
-
# student_data = intervention_df[[col]].copy()
|
588 |
-
|
589 |
-
# # Treat blank entries as 'Absent'
|
590 |
-
# student_data[col] = student_data[col].fillna('Absent')
|
591 |
-
|
592 |
-
# # Assign attendance values
|
593 |
-
# attendance_values = student_data[col].apply(lambda x: 1 if x in [
|
594 |
-
# ENGAGED_STR,
|
595 |
-
# PARTIALLY_ENGAGED_STR,
|
596 |
-
# NOT_ENGAGED_STR
|
597 |
-
# ] else 0)
|
598 |
-
|
599 |
-
# # Number of Sessions Attended
|
600 |
-
# sessions_attended = attendance_values.sum()
|
601 |
-
|
602 |
-
# # Attendance (%)
|
603 |
-
# attendance_pct = (sessions_attended / intervention_sessions_held) * 100 if intervention_sessions_held > 0 else 0
|
604 |
-
# attendance_pct = round(attendance_pct, 2)
|
605 |
-
|
606 |
-
# # For engagement calculation, include only sessions where attendance is not 'Absent'
|
607 |
-
# valid_engagement_indices = attendance_values[attendance_values == 1].index
|
608 |
-
# engagement_data = student_data.loc[valid_engagement_indices, col]
|
609 |
-
|
610 |
-
# # Assign engagement values
|
611 |
-
# engagement_values = engagement_data.apply(lambda x: 1 if x == ENGAGED_STR
|
612 |
-
# else 0.5 if x == PARTIALLY_ENGAGED_STR else 0)
|
613 |
-
|
614 |
-
# # Sum of Engagement Values
|
615 |
-
# sum_engagement_values = engagement_values.sum()
|
616 |
-
|
617 |
-
# # Number of Sessions Attended for engagement (should be same as sessions_attended)
|
618 |
-
# number_sessions_attended = len(valid_engagement_indices)
|
619 |
-
|
620 |
-
# # Engagement (%)
|
621 |
-
# engagement_pct = (sum_engagement_values / number_sessions_attended) * 100 if number_sessions_attended > 0 else 0
|
622 |
-
# engagement_pct = round(engagement_pct, 2)
|
623 |
-
|
624 |
-
# # Store metrics
|
625 |
-
# student_metrics[student_name] = {
|
626 |
-
# 'Attendance (%)': attendance_pct,
|
627 |
-
# 'Engagement (%)': engagement_pct
|
628 |
-
# }
|
629 |
-
|
630 |
-
# # Create a DataFrame from student_metrics
|
631 |
-
# student_metrics_df = pd.DataFrame.from_dict(student_metrics, orient='index').reset_index()
|
632 |
-
# student_metrics_df.rename(columns={'index': 'Student'}, inplace=True)
|
633 |
-
# return student_metrics_df
|
634 |
-
|
635 |
-
# def plot_student_metrics(student_metrics_df):
|
636 |
-
# # Create a line graph for attendance and engagement
|
637 |
-
# fig, ax = plt.subplots()
|
638 |
-
|
639 |
-
# # Plotting Attendance and Engagement with specific colors
|
640 |
-
# ax.plot(student_metrics_df['Student'], student_metrics_df['Attendance (%)'], marker='o', color='#005288', label='Attendance (%)')
|
641 |
-
# ax.plot(student_metrics_df['Student'], student_metrics_df['Engagement (%)'], marker='o', color='#3AB0FF', label='Engagement (%)')
|
642 |
-
|
643 |
-
# ax.set_xlabel('Student')
|
644 |
-
# ax.set_ylabel('Percentage (%)')
|
645 |
-
# ax.set_title('Student Attendance and Engagement Metrics')
|
646 |
-
# ax.legend()
|
647 |
-
# plt.xticks(rotation=45)
|
648 |
-
|
649 |
-
# st.pyplot(fig)
|
650 |
-
|
651 |
-
# return fig
|
652 |
-
|
653 |
-
# def download_chart(fig, filename):
|
654 |
-
# # Create a buffer to hold the image data
|
655 |
-
# buffer = io.BytesIO()
|
656 |
-
# # Save the figure to the buffer
|
657 |
-
# fig.savefig(buffer, format='png')
|
658 |
-
# # Set the file pointer to the beginning
|
659 |
-
# buffer.seek(0)
|
660 |
-
# # Add a download button to Streamlit
|
661 |
-
# st.download_button(label="Download Chart", data=buffer, file_name=filename, mime='image/png')
|
662 |
-
|
663 |
-
# def download_llm_output(content, filename):
|
664 |
-
# # Create a buffer to hold the text data
|
665 |
-
# buffer = io.BytesIO()
|
666 |
-
# buffer.write(content.encode('utf-8'))
|
667 |
-
# buffer.seek(0)
|
668 |
-
# # Add a download button to Streamlit
|
669 |
-
# st.download_button(label="Download LLM Output", data=buffer, file_name=filename, mime='text/plain')
|
670 |
-
|
671 |
-
# def prepare_llm_input(student_metrics_df):
|
672 |
-
# # Convert the student metrics DataFrame to a string
|
673 |
-
# metrics_str = student_metrics_df.to_string(index=False)
|
674 |
-
# llm_input = f"""
|
675 |
-
# Based on the following student metrics:
|
676 |
-
|
677 |
-
# {metrics_str}
|
678 |
-
|
679 |
-
# Provide:
|
680 |
-
|
681 |
-
# 1. Notes and Key Takeaways: Summarize the data, highlight students with the lowest and highest attendance and engagement percentages, identify students who may need adjustments to their intervention due to low attendance or engagement, and highlight students who are showing strong performance.
|
682 |
-
|
683 |
-
# 2. Recommendations and Next Steps: Provide interpretations based on the analysis and suggest possible next steps or strategies to improve student outcomes.
|
684 |
-
# """
|
685 |
-
# return llm_input
|
686 |
-
|
687 |
-
# def prompt_response_from_hf_llm(llm_input):
|
688 |
-
# # Generate the refined prompt using Hugging Face API
|
689 |
-
# response = client.chat.completions.create(
|
690 |
-
# model="meta-llama/Llama-3.1-70B-Instruct",
|
691 |
-
# messages=[
|
692 |
-
# {"role": "user", "content": llm_input}
|
693 |
-
# ],
|
694 |
-
# stream=True,
|
695 |
-
# temperature=0.5,
|
696 |
-
# max_tokens=1024,
|
697 |
-
# top_p=0.7
|
698 |
-
# )
|
699 |
-
|
700 |
-
# # Combine messages if response is streamed
|
701 |
-
# response_content = ""
|
702 |
-
# for message in response:
|
703 |
-
# response_content += message.choices[0].delta.content
|
704 |
-
|
705 |
-
# return response_content.strip()
|
706 |
-
|
707 |
-
# if __name__ == '__main__':
|
708 |
-
# main()
|
709 |
-
|
710 |
-
|
711 |
# CHARTS + DOWNLOAD + NO NAMES
|
712 |
# intervention_analysis_app.py
|
713 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
# CHARTS + DOWNLOAD + NO NAMES
|
2 |
# intervention_analysis_app.py
|
3 |
|