Farid Karimli commited on
Commit
8d6adc4
·
1 Parent(s): a49e92a

Refactored Llamaparser to parse PDFs text and structure only

Browse files
Files changed (1) hide show
  1. code/modules/data_loader.py +74 -4
code/modules/data_loader.py CHANGED
@@ -1,6 +1,7 @@
1
  import os
2
  import bs4
3
  from urllib.parse import urljoin
 
4
  import requests
5
  import pysrt
6
  from langchain_community.document_loaders import (
@@ -11,6 +12,7 @@ from langchain_community.document_loaders import (
11
  TextLoader,
12
  )
13
  import html2text
 
14
  from langchain_community.document_loaders import UnstructuredMarkdownLoader
15
  from llama_parse import LlamaParse
16
  from langchain.schema import Document
@@ -31,7 +33,8 @@ except:
31
  from constants import OPENAI_API_KEY, LLAMA_CLOUD_API_KEY
32
 
33
  logger = logging.getLogger(__name__)
34
-
 
35
 
36
  class PDFReader:
37
  def __init__(self):
@@ -47,21 +50,86 @@ class PDFReader:
47
 
48
  class LlamaParser:
49
  def __init__(self):
 
 
 
 
 
 
 
50
  self.parser = LlamaParse(
51
  api_key=LLAMA_CLOUD_API_KEY,
52
  result_type="markdown",
53
  verbose=True,
54
  language="en",
55
- gpt4o_mode=True,
56
- gpt4o_api_key=OPENAI_API_KEY,
57
  parsing_instruction="The provided documents are PDFs of lecture slides of deep learning material. They contain LaTeX equations, images, and text. The goal is to extract the text, images and equations from the slides and convert them to markdown format. The markdown should be clean and easy to read, and any math equation should be converted to LaTeX, between $$. For images, give a description and if you can, a source."
58
  )
59
 
60
  def parse(self, pdf_path):
 
 
 
 
 
 
 
 
 
61
  documents = self.parser.load_data(pdf_path)
62
  documents = [document.to_langchain_format() for document in documents]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  return documents
64
 
 
 
65
 
66
  class HTMLReader:
67
  def __init__(self):
@@ -127,7 +195,8 @@ class FileReader:
127
  text += page.extract_text()
128
  return text
129
 
130
- def download_pdf_from_url(self, pdf_url):
 
131
  response = requests.get(pdf_url)
132
  if response.status_code == 200:
133
  with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as temp_file:
@@ -140,6 +209,7 @@ class FileReader:
140
 
141
  def read_pdf(self, temp_file_path: str):
142
  if self.kind == "llama":
 
143
  documents = self.pdf_reader.parse(temp_file_path)
144
  else:
145
  loader = self.pdf_reader.get_loader(temp_file_path)
 
1
  import os
2
  import bs4
3
  from urllib.parse import urljoin
4
+ import asyncio
5
  import requests
6
  import pysrt
7
  from langchain_community.document_loaders import (
 
12
  TextLoader,
13
  )
14
  import html2text
15
+ import tempfile
16
  from langchain_community.document_loaders import UnstructuredMarkdownLoader
17
  from llama_parse import LlamaParse
18
  from langchain.schema import Document
 
33
  from constants import OPENAI_API_KEY, LLAMA_CLOUD_API_KEY
34
 
35
  logger = logging.getLogger(__name__)
36
+ BASE_DIR = os.getcwd()
37
+ STORAGE_DIR = os.path.join(BASE_DIR, "storage", "data")
38
 
39
  class PDFReader:
40
  def __init__(self):
 
50
 
51
  class LlamaParser:
52
  def __init__(self):
53
+ self.GPT_API_KEY = OPENAI_API_KEY
54
+ self.LLAMA_CLOUD_API_KEY = LLAMA_CLOUD_API_KEY
55
+ self.parse_url = "https://api.cloud.llamaindex.ai/api/parsing/upload"
56
+ self.headers = {
57
+ 'Accept': 'application/json',
58
+ 'Authorization': 'Bearer llx-vap5Bk2zbYLfqTq2aZDvNHwscvsBPQiSjvLOGkgUa9SS8CWB'
59
+ }
60
  self.parser = LlamaParse(
61
  api_key=LLAMA_CLOUD_API_KEY,
62
  result_type="markdown",
63
  verbose=True,
64
  language="en",
65
+ gpt4o_mode=False,
66
+ # gpt4o_api_key=OPENAI_API_KEY,
67
  parsing_instruction="The provided documents are PDFs of lecture slides of deep learning material. They contain LaTeX equations, images, and text. The goal is to extract the text, images and equations from the slides and convert them to markdown format. The markdown should be clean and easy to read, and any math equation should be converted to LaTeX, between $$. For images, give a description and if you can, a source."
68
  )
69
 
70
  def parse(self, pdf_path):
71
+ pdf_name = os.path.basename(pdf_path)
72
+ logger.info(f"Processing PDF: {pdf_name}. Path: {pdf_path}")
73
+
74
+ path = os.path.join(STORAGE_DIR, pdf_name)
75
+ if os.path.exists(path):
76
+ pdf_path = os.path.join(STORAGE_DIR, path)
77
+ else:
78
+ pdf_path = FileReader.download_pdf_from_url(pdf_url=pdf_path)
79
+
80
  documents = self.parser.load_data(pdf_path)
81
  documents = [document.to_langchain_format() for document in documents]
82
+ print(documents)
83
+
84
+ os.remove(pdf_path)
85
+ return documents
86
+
87
+ def make_request(self, pdf_url):
88
+ payload = {
89
+ "gpt4o_mode": "false",
90
+ "parsing_instruction": "The provided document is a PDF of lecture slides of deep learning material. They contain LaTeX equations, images, and text. The goal is to extract the text, images and equations from the slides and convert them to markdown format. The markdown should be clean and easy to read, and any math equation should be converted to LaTeX, between $$. For images, give a description and if you can, a source.",
91
+ }
92
+
93
+ files = [
94
+ ('file', ('file', requests.get(pdf_url).content, 'application/octet-stream'))
95
+ ]
96
+
97
+ response = requests.request(
98
+ "POST", self.parse_url, headers=self.headers, data=payload, files=files)
99
+
100
+ return response.json()['id'], response.json()['status']
101
+
102
+ async def get_result(self, job_id):
103
+ url = f"https://api.cloud.llamaindex.ai/api/parsing/job/{job_id}/result/markdown"
104
+
105
+ response = requests.request("GET", url, headers=self.headers, data={})
106
+
107
+ return response.json()['markdown']
108
+
109
+ async def _parse(self, pdf_path):
110
+ job_id, status = self.make_request(pdf_path)
111
+ print(f"Job ID: {job_id}", f"Status: {status}")
112
+
113
+ while status != "SUCCESS":
114
+ url = f"https://api.cloud.llamaindex.ai/api/parsing/job/{job_id}"
115
+ response = requests.request("GET", url, headers=self.headers, data={})
116
+ status = response.json()["status"]
117
+
118
+ print(status)
119
+
120
+ result = await self.get_result(job_id)
121
+
122
+ documents = [
123
+ Document(
124
+ page_content=result,
125
+ metadata={"source": pdf_path}
126
+ )
127
+ ]
128
+
129
  return documents
130
 
131
+ async def _parse(self, pdf_path):
132
+ return await self._parse(pdf_path)
133
 
134
  class HTMLReader:
135
  def __init__(self):
 
195
  text += page.extract_text()
196
  return text
197
 
198
+ @staticmethod
199
+ def download_pdf_from_url(pdf_url):
200
  response = requests.get(pdf_url)
201
  if response.status_code == 200:
202
  with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as temp_file:
 
209
 
210
  def read_pdf(self, temp_file_path: str):
211
  if self.kind == "llama":
212
+ #documents = asyncio.run(self.pdf_reader.parse(temp_file_path))
213
  documents = self.pdf_reader.parse(temp_file_path)
214
  else:
215
  loader = self.pdf_reader.get_loader(temp_file_path)