Farid Karimli commited on
Commit
0958f93
·
1 Parent(s): 638bffe

PDF Reader Re-structure

Browse files
code/modules/dataloader/data_loader.py CHANGED
@@ -25,6 +25,8 @@ import html2text
25
  import bs4
26
  import tempfile
27
  import PyPDF2
 
 
28
 
29
  try:
30
  from modules.dataloader.helpers import get_metadata, download_pdf_from_url
@@ -36,103 +38,6 @@ except:
36
  logger = logging.getLogger(__name__)
37
  BASE_DIR = os.getcwd()
38
 
39
- class PDFReader:
40
- def __init__(self):
41
- pass
42
-
43
- def get_loader(self, pdf_path):
44
- loader = PyMuPDFLoader(pdf_path)
45
- return loader
46
-
47
- def get_documents(self, loader):
48
- return loader.load()
49
-
50
- class LlamaParser:
51
- def __init__(self):
52
- print("Initializing LlamaParser")
53
- self.GPT_API_KEY = OPENAI_API_KEY
54
- self.LLAMA_CLOUD_API_KEY = LLAMA_CLOUD_API_KEY
55
- self.parse_url = "https://api.cloud.llamaindex.ai/api/parsing/upload"
56
- self.headers = {
57
- 'Accept': 'application/json',
58
- 'Authorization': f'Bearer {LLAMA_CLOUD_API_KEY}'
59
- }
60
- self.parser = LlamaParse(
61
- api_key=LLAMA_CLOUD_API_KEY,
62
- result_type="markdown",
63
- verbose=True,
64
- language="en",
65
- gpt4o_mode=False,
66
- # gpt4o_api_key=OPENAI_API_KEY,
67
- parsing_instruction="The provided documents are PDFs of lecture slides of deep learning material. They contain LaTeX equations, images, and text. The goal is to extract the text, images and equations from the slides. The markdown should be clean and easy to read, and any math equation should be converted to LaTeX format, between $ signs. For images, if you can, give a description and a source."
68
- )
69
-
70
- def parse(self, pdf_path):
71
- pdf_name = os.path.basename(pdf_path)
72
-
73
- if not os.path.exists(pdf_path):
74
- logger.warning(f"File {pdf_name} does not exist locally, installing temporarily...")
75
- pdf_path = download_pdf_from_url(pdf_path)
76
-
77
- documents = self.parser.load_data(pdf_path)
78
- document = [document.to_langchain_format() for document in documents][0]
79
-
80
- content = document.page_content
81
- pages = content.split("\n---\n")
82
- pages = [page.strip() for page in pages]
83
-
84
- documents = [
85
- Document(
86
- page_content=page,
87
- metadata={"source": pdf_path, "page": i}
88
- ) for i, page in enumerate(pages)
89
- ]
90
-
91
- return documents
92
-
93
- def make_request(self, pdf_url):
94
- payload = {
95
- "gpt4o_mode": "false",
96
- "parsing_instruction": "The provided document is a PDF of lecture slides of deep learning material. They contain LaTeX equations, images, and text. The goal is to extract the text, images and equations from the slides and convert them to markdown format. The markdown should be clean and easy to read, and any math equation should be converted to LaTeX, between $$. For images, give a description and if you can, a source.",
97
- }
98
-
99
- files = [
100
- ('file', ('file', requests.get(pdf_url).content, 'application/octet-stream'))
101
- ]
102
-
103
- response = requests.request(
104
- "POST", self.parse_url, headers=self.headers, data=payload, files=files)
105
-
106
- return response.json()['id'], response.json()['status']
107
-
108
- async def get_result(self, job_id):
109
- url = f"https://api.cloud.llamaindex.ai/api/parsing/job/{job_id}/result/markdown"
110
-
111
- response = requests.request("GET", url, headers=self.headers, data={})
112
-
113
- return response.json()['markdown']
114
-
115
- async def _parse(self, pdf_path):
116
- job_id, status = self.make_request(pdf_path)
117
-
118
- while status != "SUCCESS":
119
- url = f"https://api.cloud.llamaindex.ai/api/parsing/job/{job_id}"
120
- response = requests.request("GET", url, headers=self.headers, data={})
121
- status = response.json()["status"]
122
-
123
- result = await self.get_result(job_id)
124
-
125
- documents = [
126
- Document(
127
- page_content=result,
128
- metadata={"source": pdf_path}
129
- )
130
- ]
131
-
132
- return documents
133
-
134
- async def _parse(self, pdf_path):
135
- return await self._parse(pdf_path)
136
 
137
  class HTMLReader:
138
  def __init__(self):
 
25
  import bs4
26
  import tempfile
27
  import PyPDF2
28
+ from modules.dataloader.pdf_readers.base import PDFReader
29
+ from modules.dataloader.pdf_readers.llama import LlamaParser
30
 
31
  try:
32
  from modules.dataloader.helpers import get_metadata, download_pdf_from_url
 
38
  logger = logging.getLogger(__name__)
39
  BASE_DIR = os.getcwd()
40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
  class HTMLReader:
43
  def __init__(self):
code/modules/dataloader/pdf_readers/base.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_community.document_loaders import PyMuPDFLoader
2
+
3
+
4
+ class PDFReader:
5
+ def __init__(self):
6
+ pass
7
+
8
+ def get_loader(self, pdf_path):
9
+ loader = PyMuPDFLoader(pdf_path)
10
+ return loader
11
+
12
+ def parse(self, pdf_path):
13
+ loader = self.get_loader(pdf_path)
14
+ return loader.load()
code/modules/dataloader/pdf_readers/llama.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ from llama_parse import LlamaParse
4
+ from langchain.schema import Document
5
+ from modules.config.constants import OPENAI_API_KEY, LLAMA_CLOUD_API_KEY
6
+ from modules.dataloader.helpers import download_pdf_from_url
7
+
8
+
9
+
10
+ class LlamaParser:
11
+ def __init__(self):
12
+ self.GPT_API_KEY = OPENAI_API_KEY
13
+ self.LLAMA_CLOUD_API_KEY = LLAMA_CLOUD_API_KEY
14
+ self.parse_url = "https://api.cloud.llamaindex.ai/api/parsing/upload"
15
+ self.headers = {
16
+ 'Accept': 'application/json',
17
+ 'Authorization': f'Bearer {LLAMA_CLOUD_API_KEY}'
18
+ }
19
+ self.parser = LlamaParse(
20
+ api_key=LLAMA_CLOUD_API_KEY,
21
+ result_type="markdown",
22
+ verbose=True,
23
+ language="en",
24
+ gpt4o_mode=False,
25
+ # gpt4o_api_key=OPENAI_API_KEY,
26
+ parsing_instruction="The provided documents are PDFs of lecture slides of deep learning material. They contain LaTeX equations, images, and text. The goal is to extract the text, images and equations from the slides. The markdown should be clean and easy to read, and any math equation should be converted to LaTeX format, between $ signs. For images, if you can, give a description and a source."
27
+ )
28
+
29
+ def parse(self, pdf_path):
30
+ if not os.path.exists(pdf_path):
31
+ pdf_path = download_pdf_from_url(pdf_path)
32
+
33
+ documents = self.parser.load_data(pdf_path)
34
+ document = [document.to_langchain_format() for document in documents][0]
35
+
36
+ content = document.page_content
37
+ pages = content.split("\n---\n")
38
+ pages = [page.strip() for page in pages]
39
+
40
+ documents = [
41
+ Document(
42
+ page_content=page,
43
+ metadata={"source": pdf_path, "page": i}
44
+ ) for i, page in enumerate(pages)
45
+ ]
46
+
47
+ return documents
48
+
49
+ def make_request(self, pdf_url):
50
+ payload = {
51
+ "gpt4o_mode": "false",
52
+ "parsing_instruction": "The provided document is a PDF of lecture slides of deep learning material. They contain LaTeX equations, images, and text. The goal is to extract the text, images and equations from the slides and convert them to markdown format. The markdown should be clean and easy to read, and any math equation should be converted to LaTeX, between $$. For images, give a description and if you can, a source.",
53
+ }
54
+
55
+ files = [
56
+ ('file', ('file', requests.get(pdf_url).content, 'application/octet-stream'))
57
+ ]
58
+
59
+ response = requests.request(
60
+ "POST", self.parse_url, headers=self.headers, data=payload, files=files)
61
+
62
+ return response.json()['id'], response.json()['status']
63
+
64
+ async def get_result(self, job_id):
65
+ url = f"https://api.cloud.llamaindex.ai/api/parsing/job/{job_id}/result/markdown"
66
+
67
+ response = requests.request("GET", url, headers=self.headers, data={})
68
+
69
+ return response.json()['markdown']
70
+
71
+ async def _parse(self, pdf_path):
72
+ job_id, status = self.make_request(pdf_path)
73
+
74
+ while status != "SUCCESS":
75
+ url = f"https://api.cloud.llamaindex.ai/api/parsing/job/{job_id}"
76
+ response = requests.request("GET", url, headers=self.headers, data={})
77
+ status = response.json()["status"]
78
+
79
+ result = await self.get_result(job_id)
80
+
81
+ documents = [
82
+ Document(
83
+ page_content=result,
84
+ metadata={"source": pdf_path}
85
+ )
86
+ ]
87
+
88
+ return documents
89
+
90
+ async def _parse(self, pdf_path):
91
+ return await self._parse(pdf_path)
92
+