Imageye commited on
Commit
ebdfd20
·
verified ·
1 Parent(s): d234b8a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -4,9 +4,9 @@ import re
4
  import tempfile
5
  import os
6
  import warnings
7
- from groq import Groq
8
  import torch
9
  from transformers import pipeline
 
10
 
11
  # Set up device for torch
12
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
@@ -23,7 +23,7 @@ asr_pipeline = pipeline(
23
  warnings.filterwarnings("ignore", message="FP16 is not supported on CPU; using FP32 instead")
24
 
25
  # Set up Groq client
26
- client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
27
 
28
  # Function to transcribe audio using ASR pipeline
29
  def transcribe_audio(file_path):
@@ -48,7 +48,7 @@ def get_transcript(url):
48
  # Function to summarize text using Groq API
49
  def summarize_text(text):
50
  try:
51
- response = client.chat.completions.create(
52
  messages=[
53
  {
54
  "role": "user",
@@ -57,7 +57,7 @@ def summarize_text(text):
57
  ],
58
  model="llama3-8b-8192",
59
  )
60
- summary = response.choices[0].message.content.strip()
61
  return summary
62
  except Exception as e:
63
  return f"Error summarizing text: {e}"
@@ -65,7 +65,7 @@ def summarize_text(text):
65
  # Function to generate quiz questions using Groq API
66
  def generate_quiz_questions(text):
67
  try:
68
- response = client.chat.completions.create(
69
  messages=[
70
  {
71
  "role": "user",
@@ -74,7 +74,7 @@ def generate_quiz_questions(text):
74
  ],
75
  model="llama3-8b-8192",
76
  )
77
- quiz_questions = response.choices[0].message.content.strip()
78
  return quiz_questions
79
  except Exception as e:
80
  return f"Error generating quiz questions: {e}"
@@ -116,7 +116,7 @@ def parse_quiz_questions(quiz_text):
116
  # Function to generate explanation for quiz answers using Groq API
117
  def generate_explanation(question, correct_answer, user_answer):
118
  try:
119
- response = client.chat.completions.create(
120
  messages=[
121
  {
122
  "role": "user",
@@ -125,7 +125,7 @@ def generate_explanation(question, correct_answer, user_answer):
125
  ],
126
  model="llama3-8b-8192",
127
  )
128
- explanation = response.choices[0].message.content.strip()
129
  return explanation
130
  except Exception as e:
131
  return f"Error generating explanation: {e}"
@@ -241,4 +241,4 @@ if st.session_state.generated_quiz:
241
  if item['status'] == "Incorrect":
242
  st.write(f"**Explanation:** {item['explanation']}")
243
  else:
244
- st.write("Please generate the quiz first.")
 
4
  import tempfile
5
  import os
6
  import warnings
 
7
  import torch
8
  from transformers import pipeline
9
+ from groq.client import Client
10
 
11
  # Set up device for torch
12
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
 
23
  warnings.filterwarnings("ignore", message="FP16 is not supported on CPU; using FP32 instead")
24
 
25
  # Set up Groq client
26
+ client = Client(api_key=os.environ.get("GROQ_API_KEY"))
27
 
28
  # Function to transcribe audio using ASR pipeline
29
  def transcribe_audio(file_path):
 
48
  # Function to summarize text using Groq API
49
  def summarize_text(text):
50
  try:
51
+ response = client.chat_completions.create(
52
  messages=[
53
  {
54
  "role": "user",
 
57
  ],
58
  model="llama3-8b-8192",
59
  )
60
+ summary = response['choices'][0]['message']['content'].strip()
61
  return summary
62
  except Exception as e:
63
  return f"Error summarizing text: {e}"
 
65
  # Function to generate quiz questions using Groq API
66
  def generate_quiz_questions(text):
67
  try:
68
+ response = client.chat_completions.create(
69
  messages=[
70
  {
71
  "role": "user",
 
74
  ],
75
  model="llama3-8b-8192",
76
  )
77
+ quiz_questions = response['choices'][0]['message']['content'].strip()
78
  return quiz_questions
79
  except Exception as e:
80
  return f"Error generating quiz questions: {e}"
 
116
  # Function to generate explanation for quiz answers using Groq API
117
  def generate_explanation(question, correct_answer, user_answer):
118
  try:
119
+ response = client.chat_completions.create(
120
  messages=[
121
  {
122
  "role": "user",
 
125
  ],
126
  model="llama3-8b-8192",
127
  )
128
+ explanation = response['choices'][0]['message']['content'].strip()
129
  return explanation
130
  except Exception as e:
131
  return f"Error generating explanation: {e}"
 
241
  if item['status'] == "Incorrect":
242
  st.write(f"**Explanation:** {item['explanation']}")
243
  else:
244
+ st.write("Please generate the quiz first.")