gyanbardhan123 commited on
Commit
45e98b3
·
verified ·
1 Parent(s): 3e9122d
Files changed (7) hide show
  1. Chatbot.py +33 -0
  2. Image_QA_Gemini.py +30 -0
  3. MCQ_Gen.py +156 -0
  4. QA_Gemini.py +23 -0
  5. chat_with_pdf.py +93 -0
  6. requirements.txt +7 -0
  7. streamlit.py +23 -0
Chatbot.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from dotenv import load_dotenv
3
+ load_dotenv()
4
+ import os
5
+ import google.generativeai as genai
6
+
7
+ os.getenv("GOOGLE_API_KEY")
8
+ genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
9
+ def show():
10
+ st.header("Chatbot")
11
+ model=genai.GenerativeModel('gemini-1.0-pro-latest')
12
+ chat=model.start_chat(history=[])
13
+ if 'chat_history' not in st.session_state:
14
+ st.session_state['chat_history'] = []
15
+ input=st.text_input("Input Prompt: ",key="input")
16
+
17
+ submit=st.button("Submit")
18
+
19
+ if submit and input:
20
+
21
+
22
+ response=chat.send_message(input,stream=True)
23
+ st.session_state['chat_history'].append(("You", input))
24
+ st.subheader("The Response is")
25
+ for chunk in response:
26
+ st.write(chunk.text)
27
+ st.session_state['chat_history'].append(("Bot", chunk.text))
28
+
29
+
30
+ st.subheader("The Chat History is")
31
+
32
+ for role, text in st.session_state['chat_history']:
33
+ st.write(f"{role}: {text}")
Image_QA_Gemini.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from dotenv import load_dotenv
3
+ load_dotenv()
4
+ import os
5
+ from PIL import Image
6
+ import google.generativeai as genai
7
+
8
+ os.getenv("GOOGLE_API_KEY")
9
+ genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
10
+ def show():
11
+ st.header("Image QA")
12
+ input=st.text_input("Input Prompt: ",key="input")
13
+ file=st.file_uploader("Chose an image...",type=["jpg","jpeg","png"])
14
+ image=""
15
+ if file is not None:
16
+ image=Image.open(file)
17
+ st.image(image)
18
+
19
+ submit=st.button("Submit")
20
+
21
+ if submit:
22
+ model=genai.GenerativeModel('gemini-pro-vision')
23
+ st.subheader("The Response is")
24
+ if input!="":
25
+ response=model.generate_content([input,image])
26
+ st.write(response.text)
27
+
28
+ else:
29
+ response=model.generate_content(image)
30
+ st.write(response.text)
MCQ_Gen.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fpdf import FPDF
2
+ class PDF(FPDF):
3
+ def header(self):
4
+ self.set_font('Arial', 'B', 12)
5
+ self.cell(0, 10, 'MCQ Quiz', 0, 1, 'C')
6
+
7
+ def chapter_title(self, num, label):
8
+ self.set_font('Arial', '', 12)
9
+ self.cell(0, 10, 'Question %d: %s' % (num, label), 0, 1, 'L')
10
+ self.ln(5)
11
+
12
+ def chapter_body(self, body):
13
+ self.set_font('Arial', '', 12)
14
+ self.multi_cell(0, 10, body)
15
+ self.ln()
16
+
17
+ def add_question(self, num, question, options):
18
+ self.chapter_title(num, question)
19
+ for key, option in options.items():
20
+ self.chapter_body(f"{key}. {option}")
21
+ self.ln()
22
+
23
+ def add_answers_section(self, answers):
24
+ self.add_page()
25
+ self.set_font('Arial', 'B', 12)
26
+ self.cell(0, 10, 'Answers', 0, 1, 'C')
27
+ self.ln(10)
28
+ self.set_font('Arial', '', 12)
29
+ for num, answer in answers.items():
30
+ self.cell(0, 10, f"Question {num}: {answer}", 0, 1, 'L')
31
+
32
+
33
+
34
+ import streamlit as st
35
+ from dotenv import load_dotenv
36
+ load_dotenv()
37
+ import os
38
+ import json
39
+ import base64
40
+ from langchain_google_genai import ChatGoogleGenerativeAI
41
+ os.getenv("GOOGLE_API_KEY")
42
+ RESPONSE_JSON = {
43
+ "1": {
44
+ "mcq": "multiple choice question",
45
+ "options": {
46
+ "a": "choice here",
47
+ "b": "choice here",
48
+ "c": "choice here",
49
+ "d": "choice here",
50
+ },
51
+ "correct": "correct answer",
52
+ },
53
+ "2": {
54
+ "mcq": "multiple choice question",
55
+ "options": {
56
+ "a": "choice here",
57
+ "b": "choice here",
58
+ "c": "choice here",
59
+ "d": "choice here",
60
+ },
61
+ "correct": "correct answer",
62
+ },
63
+ "3": {
64
+ "mcq": "multiple choice question",
65
+ "options": {
66
+ "a": "choice here",
67
+ "b": "choice here",
68
+ "c": "choice here",
69
+ "d": "choice here",
70
+ },
71
+ "correct": "correct answer",
72
+ },
73
+ }
74
+ TEMPLATE="""
75
+ Text:{text}
76
+ You are an expert MCQ maker. Given the above text, it is your job to \
77
+ create a quiz of {number} multiple choice questions for {subject} students in {tone} tone.
78
+ Make sure the questions are not repeated and check all the questions to be conforming the text as well.
79
+ Make sure to format your response like RESPONSE_JSON below and use it as a guide. \
80
+ Ensure to make {number} MCQs
81
+ ### RESPONSE_JSON
82
+ {response_json}
83
+
84
+ """
85
+
86
+ TEMPLATE2="""
87
+ You are an expert english grammarian and writer. Given a Multiple Choice Quiz for {subject} students.\
88
+ You need to evaluate the complexity of the question and give a complete analysis of the quiz. Only use at max 50 words for complexity analysis.
89
+ if the quiz is not at per with the cognitive and analytical abilities of the students,\
90
+ update the quiz questions which needs to be changed and change the tone such that it perfectly fits the student abilities
91
+ Quiz_MCQs:
92
+ {quiz}
93
+
94
+ Check from an expert English Writer of the above quiz:
95
+ """
96
+ def show():
97
+ st.header("MCQ_Generator")
98
+ TEXT=st.text_input("Input Prompt: ",key="input1")
99
+ NUMBER=st.text_input("Number of MCQs ",key="input2")
100
+ SUBJECT=st.text_input("Topic of MCQs ",key="input3")
101
+ TONE=st.text_input("Difficulty Level ",key="input4")
102
+
103
+ submit=st.button("Submit")
104
+
105
+ if submit and TEXT:
106
+ llm = ChatGoogleGenerativeAI(model="gemini-pro",temperature=0.9)
107
+ from langchain.prompts import PromptTemplate
108
+ from langchain.chains import LLMChain
109
+ from langchain.chains import SequentialChain
110
+ quiz_generation_prompt = PromptTemplate(
111
+ input_variables=["text", "number", "subject", "tone", "response_json"],
112
+ template=TEMPLATE
113
+ )
114
+ quiz_chain=LLMChain(llm=llm, prompt=quiz_generation_prompt, output_key="quiz", verbose=True)
115
+ quiz_evaluation_prompt=PromptTemplate(input_variables=["subject", "quiz"], template=TEMPLATE)
116
+ review_chain=LLMChain(llm=llm, prompt=quiz_evaluation_prompt, output_key="review", verbose=True)
117
+ generate_evaluate_chain=SequentialChain(chains=[quiz_chain, review_chain], input_variables=["text", "number", "subject", "tone", "response_json"],
118
+ output_variables=["quiz", "review"], verbose=True,)
119
+ response=generate_evaluate_chain(
120
+ {
121
+ "text": TEXT,
122
+ "number": NUMBER,
123
+ "subject":SUBJECT,
124
+ "tone": TONE,
125
+ "response_json": json.dumps(RESPONSE_JSON)
126
+ }
127
+ )
128
+ quiz=response.get("quiz")
129
+ if '### RESPONSE_JSON\n' in quiz:
130
+ quiz = quiz.split('### RESPONSE_JSON\n')[1]
131
+ quiz = json.loads(quiz)
132
+ else:
133
+ quiz=json.loads(quiz)
134
+ pdf = PDF()
135
+ pdf.add_page()
136
+ pdf.set_title(SUBJECT+" Quiz")
137
+ answers = {}
138
+ for key, value in quiz.items():
139
+ question_num = int(key)
140
+ pdf.add_question(question_num, value["mcq"], value["options"])
141
+ answers[question_num] = value["correct"]
142
+ pdf.add_answers_section(answers)
143
+
144
+ pdf_file_path =SUBJECT+"_mcq.pdf"
145
+ pdf.output(pdf_file_path)
146
+
147
+ with open(pdf_file_path, "rb") as pdf_file:
148
+ st.download_button(
149
+ label="Download "+SUBJECT+" Quiz PDF",
150
+ data=pdf_file,
151
+ file_name=SUBJECT+"_quiz.pdf",
152
+ mime="application/pdf",
153
+ )
154
+
155
+ pdf_display = f'<iframe src="data:application/pdf;base64,{base64.b64encode(open(pdf_file_path, "rb").read()).decode()}" width="700" height="1000" type="application/pdf"></iframe>'
156
+ st.markdown(pdf_display, unsafe_allow_html=True)
QA_Gemini.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from dotenv import load_dotenv
3
+ load_dotenv()
4
+ import os
5
+ import google.generativeai as genai
6
+
7
+ os.getenv("GOOGLE_API_KEY")
8
+ genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
9
+ def show():
10
+ st.header("QA Gemeni")
11
+ input=st.text_input("Input Prompt: ",key="input")
12
+
13
+ submit=st.button("Submit")
14
+
15
+ if submit:
16
+ model=genai.GenerativeModel('gemini-1.0-pro-latest')
17
+ st.subheader("The Response is")
18
+ if input!="":
19
+ response=model.generate_content(input)
20
+ st.write(response.text)
21
+
22
+ else:
23
+ st.write("Ask a Question First!!")
chat_with_pdf.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from PyPDF2 import PdfReader
3
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
4
+ import os
5
+ from langchain_google_genai import GoogleGenerativeAIEmbeddings
6
+ import google.generativeai as genai
7
+ from langchain.vectorstores import Pinecone as PC
8
+ from langchain_google_genai import ChatGoogleGenerativeAI
9
+
10
+ from dotenv import load_dotenv
11
+
12
+ load_dotenv()
13
+ os.getenv("GOOGLE_API_KEY")
14
+ os.getenv("PINECONE_API_KEY")
15
+ genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
16
+ os.environ['PINECONE_API_KEY'] = os.getenv("PINECONE_API_KEY")
17
+
18
+
19
+ def Pine():
20
+ from pinecone import Pinecone, ServerlessSpec
21
+
22
+ pc = Pinecone(api_key=os.getenv("PINECONE_API_KEY"))
23
+ index_name = "testing"
24
+
25
+ if index_name not in pc.list_indexes().names():
26
+ pc.create_index(
27
+ name=index_name,
28
+ dimension=768,
29
+ metric="cosine",
30
+ spec=ServerlessSpec(cloud='aws', region='us-east-1')
31
+ )
32
+ return index_name
33
+
34
+
35
+ def get_pdf_text(pdf_docs):
36
+ text = ""
37
+ for pdf in pdf_docs:
38
+ pdf_reader = PdfReader(pdf)
39
+ for page in pdf_reader.pages:
40
+ text += page.extract_text()
41
+ return text
42
+
43
+
44
+ def get_text_chunks(text):
45
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000)
46
+ chunks = text_splitter.split_text(text)
47
+ return chunks
48
+
49
+
50
+ def get_vector_store(text_chunks):
51
+ index_name = Pine()
52
+ embedding = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
53
+ docsearch = PC.from_texts([t for t in text_chunks], embedding, index_name=index_name)
54
+ return docsearch
55
+
56
+
57
+ def showman(pdf_docs):
58
+ st.header("Chat with PDF")
59
+
60
+ user_question = st.text_input("Ask a Question from the PDF Files", key="user_question")
61
+ ask_another_question = st.button("Ask Another Question",on_click=clear_text)
62
+
63
+ if user_question and not ask_another_question:
64
+ llm = ChatGoogleGenerativeAI(model="models/gemini-1.5-pro-latest", temperature=0.9)
65
+ from langchain.chains import RetrievalQA
66
+ qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=st.session_state["docsearch"].as_retriever())
67
+ response = qa(user_question)
68
+ st.session_state["response"] = response["result"]
69
+ st.write("Answer:", st.session_state["response"])
70
+
71
+
72
+ def clear_text():
73
+ st.session_state["user_question"] = ""
74
+ st.session_state["response"] = ""
75
+
76
+ def show():
77
+ with st.sidebar:
78
+ st.title("Menu:")
79
+ pdf_docs = st.file_uploader("Upload your PDF Files", accept_multiple_files=True)
80
+ st.session_state["pdf_docs"] = pdf_docs if pdf_docs is not None else st.session_state.get("pdf_docs", [])
81
+ processed = st.session_state.get("processed", False)
82
+
83
+ if not processed and pdf_docs:
84
+ if st.button("Submit & Process"):
85
+ with st.spinner("Processing..."):
86
+ raw_text = get_pdf_text(pdf_docs)
87
+ text_chunks = get_text_chunks(raw_text)
88
+ docsearch = get_vector_store(text_chunks)
89
+ st.session_state["docsearch"] = docsearch
90
+ st.session_state["processed"] = True
91
+ st.success("Done!")
92
+
93
+ showman(st.session_state["pdf_docs"])
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ PyPDF2
2
+ langchain
3
+ langchain_google_genai
4
+ dotenv
5
+ pinecone
6
+ google.generativeai
7
+ fpdf
streamlit.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ st.set_page_config(page_title="Gemini_Student", page_icon=":material/edit:")
4
+
5
+ st.sidebar.title("Welcome to Gemeni_Student")
6
+ selection = st.sidebar.radio("",["Chatbot","Image_QA_Gemini","QA_Gemini","MCQ_Gen","chat_with_pdf"])
7
+
8
+ if selection == "Chatbot":
9
+ import Chatbot
10
+ Chatbot.show()
11
+ elif selection == "Image_QA_Gemini":
12
+ import Image_QA_Gemini
13
+ Image_QA_Gemini.show()
14
+ elif selection == "QA_Gemini":
15
+ import QA_Gemini
16
+ QA_Gemini.show()
17
+ elif selection == "MCQ_Gen":
18
+ import MCQ_Gen
19
+ MCQ_Gen.show()
20
+ elif selection == "chat_with_pdf":
21
+ import chat_with_pdf
22
+ chat_with_pdf.show()
23
+