Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,5 @@
|
|
|
|
|
|
1 |
import urllib.request
|
2 |
import fitz
|
3 |
import re
|
@@ -5,9 +7,14 @@ import numpy as np
|
|
5 |
import tensorflow_hub as hub
|
6 |
import openai
|
7 |
import gradio as gr
|
8 |
-
import os
|
9 |
from sklearn.neighbors import NearestNeighbors
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
def download_pdf(url, output_path):
|
12 |
urllib.request.urlretrieve(url, output_path)
|
13 |
|
@@ -237,48 +244,34 @@ recommender = SemanticSearch()
|
|
237 |
title = 'PDF GPT Turbo'
|
238 |
description = """ PDF GPT Turbo allows you to chat with your PDF files. It uses Google's Universal Sentence Encoder with Deep averaging network (DAN) to give hallucination free response by improving the embedding quality of OpenAI. It cites the page number in square brackets([Page No.]) and shows where the information is located, adding credibility to the responses."""
|
239 |
|
|
|
240 |
with gr.Blocks(css="""#chatbot { font-size: 14px; min-height: 1200; }""") as demo:
|
241 |
|
242 |
gr.Markdown(f'<center><h3>{title}</h3></center>')
|
243 |
gr.Markdown(description)
|
244 |
|
245 |
with gr.Row():
|
246 |
-
|
247 |
with gr.Group():
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
gr.Markdown("<center><h4>OR<h4></center>")
|
253 |
-
file = gr.File(label='Upload your PDF/ Research Paper / Book here', file_types=['.pdf'])
|
254 |
question = gr.Textbox(label='Enter your question here')
|
255 |
gr.Examples(
|
256 |
-
|
257 |
-
inputs=[question],
|
258 |
-
label="PRE-DEFINED QUESTIONS: Click on a question to auto-fill the input box, then press Enter!",
|
259 |
)
|
260 |
model = gr.Radio([
|
261 |
-
|
262 |
-
'gpt-3.5-turbo-16k',
|
263 |
-
'gpt-3.5-turbo-0613',
|
264 |
-
'gpt-3.5-turbo-16k-0613',
|
265 |
-
'text-davinci-003',
|
266 |
-
'gpt-4',
|
267 |
-
'gpt-4-32k'
|
268 |
], label='Select Model', default='gpt-3.5-turbo')
|
269 |
btn = gr.Button(value='Submit')
|
270 |
-
|
271 |
btn.style(full_width=True)
|
272 |
-
|
273 |
with gr.Group():
|
274 |
chatbot = gr.Chatbot(placeholder="Chat History", label="Chat History", lines=50, elem_id="chatbot")
|
275 |
|
276 |
-
|
277 |
-
#
|
278 |
# Bind the click event of the button to the question_answer function
|
279 |
btn.click(
|
280 |
question_answer,
|
281 |
-
inputs=[chatbot, url, file, question,
|
282 |
outputs=[chatbot],
|
283 |
)
|
284 |
|
|
|
1 |
+
import os
|
2 |
+
from dotenv import load_dotenv
|
3 |
import urllib.request
|
4 |
import fitz
|
5 |
import re
|
|
|
7 |
import tensorflow_hub as hub
|
8 |
import openai
|
9 |
import gradio as gr
|
|
|
10 |
from sklearn.neighbors import NearestNeighbors
|
11 |
|
12 |
+
# Load environment variables
|
13 |
+
load_dotenv()
|
14 |
+
|
15 |
+
# Fetch the OpenAI API key from environment variables
|
16 |
+
openAI_key = os.getenv('OPENAI_API_KEY')
|
17 |
+
|
18 |
def download_pdf(url, output_path):
|
19 |
urllib.request.urlretrieve(url, output_path)
|
20 |
|
|
|
244 |
title = 'PDF GPT Turbo'
|
245 |
description = """ PDF GPT Turbo allows you to chat with your PDF files. It uses Google's Universal Sentence Encoder with Deep averaging network (DAN) to give hallucination free response by improving the embedding quality of OpenAI. It cites the page number in square brackets([Page No.]) and shows where the information is located, adding credibility to the responses."""
|
246 |
|
247 |
+
# Modify the interface setup to remove the OpenAI key input
|
248 |
with gr.Blocks(css="""#chatbot { font-size: 14px; min-height: 1200; }""") as demo:
|
249 |
|
250 |
gr.Markdown(f'<center><h3>{title}</h3></center>')
|
251 |
gr.Markdown(description)
|
252 |
|
253 |
with gr.Row():
|
|
|
254 |
with gr.Group():
|
255 |
+
# Remove the OpenAI key input setup from here
|
256 |
+
url = gr.Textbox(label='Enter PDF URL here (Example: https://arxiv.org/pdf/1706.03762.pdf )')
|
257 |
+
gr.Markdown("<center><h4>OR<h4></center>")
|
258 |
+
file = gr.File(label='Upload your PDF/ Research Paper / Book here', file_types=['.pdf'])
|
|
|
|
|
259 |
question = gr.Textbox(label='Enter your question here')
|
260 |
gr.Examples(
|
261 |
+
# Example setup remains the same...
|
|
|
|
|
262 |
)
|
263 |
model = gr.Radio([
|
264 |
+
# Model selection remains the same...
|
|
|
|
|
|
|
|
|
|
|
|
|
265 |
], label='Select Model', default='gpt-3.5-turbo')
|
266 |
btn = gr.Button(value='Submit')
|
|
|
267 |
btn.style(full_width=True)
|
|
|
268 |
with gr.Group():
|
269 |
chatbot = gr.Chatbot(placeholder="Chat History", label="Chat History", lines=50, elem_id="chatbot")
|
270 |
|
|
|
|
|
271 |
# Bind the click event of the button to the question_answer function
|
272 |
btn.click(
|
273 |
question_answer,
|
274 |
+
inputs=[chatbot, url, file, question, model],
|
275 |
outputs=[chatbot],
|
276 |
)
|
277 |
|