huseinzol05
commited on
Commit
β’
f840d29
1
Parent(s):
62c35bb
Upload 2 files
Browse files- crawl-hansard.ipynb +188 -0
- hansard-2024-07-18.jsonl +3 -0
crawl-hansard.ipynb
ADDED
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 1,
|
6 |
+
"id": "22f19553",
|
7 |
+
"metadata": {},
|
8 |
+
"outputs": [],
|
9 |
+
"source": [
|
10 |
+
"from bs4 import BeautifulSoup\n",
|
11 |
+
"import os\n",
|
12 |
+
"import pandas\n",
|
13 |
+
"from datetime import datetime, timedelta\n",
|
14 |
+
"from elasticsearch import Elasticsearch\n",
|
15 |
+
"from elasticsearch_dsl import Search\n",
|
16 |
+
"from tqdm import tqdm\n",
|
17 |
+
"import requests\n",
|
18 |
+
"import json\n",
|
19 |
+
"from tika import parser\n",
|
20 |
+
"import re\n",
|
21 |
+
"from unidecode import unidecode\n",
|
22 |
+
"from tqdm import tqdm\n",
|
23 |
+
"from glob import glob\n",
|
24 |
+
"import json\n",
|
25 |
+
"import openai\n",
|
26 |
+
"import time"
|
27 |
+
]
|
28 |
+
},
|
29 |
+
{
|
30 |
+
"cell_type": "code",
|
31 |
+
"execution_count": 6,
|
32 |
+
"id": "a223ea7f",
|
33 |
+
"metadata": {},
|
34 |
+
"outputs": [
|
35 |
+
{
|
36 |
+
"name": "stdout",
|
37 |
+
"output_type": "stream",
|
38 |
+
"text": [
|
39 |
+
"['01012024', '02012024', '03012024', '04012024', '05012024', '06012024', '07012024', '08012024', '09012024', '10012024', '11012024', '12012024', '13012024', '14012024', '15012024', '16012024', '17012024', '18012024', '19012024', '20012024', '21012024', '22012024', '23012024', '24012024', '25012024', '26012024', '27012024', '28012024', '29012024', '30012024', '31012024', '01022024', '02022024', '03022024', '04022024', '05022024', '06022024', '07022024', '08022024', '09022024', '10022024', '11022024', '12022024', '13022024', '14022024', '15022024', '16022024', '17022024', '18022024', '19022024', '20022024', '21022024', '22022024', '23022024', '24022024', '25022024', '26022024', '27022024', '28022024', '29022024', '01032024', '02032024', '03032024', '04032024', '05032024', '06032024', '07032024', '08032024', '09032024', '10032024', '11032024', '12032024', '13032024', '14032024', '15032024', '16032024', '17032024', '18032024', '19032024', '20032024', '21032024', '22032024', '23032024', '24032024', '25032024', '26032024', '27032024', '28032024', '29032024', '30032024', '31032024', '01042024', '02042024', '03042024', '04042024', '05042024', '06042024', '07042024', '08042024', '09042024', '10042024', '11042024', '12042024', '13042024', '14042024', '15042024', '16042024', '17042024', '18042024', '19042024', '20042024', '21042024', '22042024', '23042024', '24042024', '25042024', '26042024', '27042024', '28042024', '29042024', '30042024', '01052024', '02052024', '03052024', '04052024', '05052024', '06052024', '07052024', '08052024', '09052024', '10052024', '11052024', '12052024', '13052024', '14052024', '15052024', '16052024', '17052024', '18052024', '19052024', '20052024', '21052024', '22052024', '23052024', '24052024', '25052024', '26052024', '27052024', '28052024', '29052024', '30052024', '31052024', '01062024', '02062024', '03062024', '04062024', '05062024', '06062024', '07062024', '08062024', '09062024', '10062024', '11062024', '12062024', '13062024', '14062024', '15062024', '16062024', '17062024', '18062024', '19062024', '20062024', '21062024', '22062024', '23062024', '24062024', '25062024', '26062024', '27062024', '28062024', '29062024', '30062024', '01072024', '02072024', '03072024', '04072024', '05072024', '06072024', '07072024', '08072024', '09072024', '10072024', '11072024', '12072024', '13072024', '14072024', '15072024', '16072024', '17072024', '18072024', '19072024', '20072024', '21072024', '22072024', '23072024', '24072024', '25072024', '26072024', '27072024', '28072024', '29072024', '30072024', '31072024', '01082024', '02082024', '03082024', '04082024', '05082024', '06082024', '07082024', '08082024', '09082024', '10082024', '11082024', '12082024', '13082024', '14082024', '15082024', '16082024', '17082024', '18082024', '19082024', '20082024', '21082024', '22082024', '23082024', '24082024', '25082024', '26082024', '27082024']\n"
|
40 |
+
]
|
41 |
+
}
|
42 |
+
],
|
43 |
+
"source": [
|
44 |
+
"TIKA_HOST = os.environ.get('TIKA_HOST', 'http://localhost:9998')\n",
|
45 |
+
"\n",
|
46 |
+
"directory = 'pdf_temp'\n",
|
47 |
+
"index = 'hansard'\n",
|
48 |
+
"\n",
|
49 |
+
"def cleaning(string):\n",
|
50 |
+
" string = unidecode(string).replace('\\t', ' ').replace('\\r', ' ').replace('\\n', ' ').replace('_', ' ')\n",
|
51 |
+
" string = string.replace(' -', '-').replace(' ANYA', 'ANYA').replace('ki ta', 'kita').replace('s aya', 'saya')\n",
|
52 |
+
" string = string.replace('m enjadi', 'menjadi').replace('meno lak', 'menolak')\n",
|
53 |
+
" try:\n",
|
54 |
+
" soup = BeautifulSoup(string, 'lxml')\n",
|
55 |
+
" string = soup.text\n",
|
56 |
+
" except:\n",
|
57 |
+
" pass\n",
|
58 |
+
" return re.sub(r'[ ]+', ' ', string).strip()\n",
|
59 |
+
"\n",
|
60 |
+
"def download(url, file_name):\n",
|
61 |
+
" with open(file_name, \"wb\") as file:\n",
|
62 |
+
" response = requests.get(url)\n",
|
63 |
+
" file.write(response.content)\n",
|
64 |
+
"\n",
|
65 |
+
"os.makedirs(directory, exist_ok = True)\n",
|
66 |
+
"latest_date = datetime.strptime('2024-01-01', '%Y-%m-%d')\n",
|
67 |
+
"\n",
|
68 |
+
"startDate = latest_date\n",
|
69 |
+
"endDate = latest_date + timedelta(days=30 * 8)\n",
|
70 |
+
"datesRange = pandas.date_range(startDate,endDate-timedelta(days=1),freq='d')\n",
|
71 |
+
"datesRange = [d.strftime('%d%m%Y') for d in datesRange]"
|
72 |
+
]
|
73 |
+
},
|
74 |
+
{
|
75 |
+
"cell_type": "code",
|
76 |
+
"execution_count": 4,
|
77 |
+
"id": "6cb663dd",
|
78 |
+
"metadata": {},
|
79 |
+
"outputs": [
|
80 |
+
{
|
81 |
+
"name": "stderr",
|
82 |
+
"output_type": "stream",
|
83 |
+
"text": [
|
84 |
+
"100%|βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 240/240 [00:47<00:00, 5.05it/s]\n"
|
85 |
+
]
|
86 |
+
}
|
87 |
+
],
|
88 |
+
"source": [
|
89 |
+
"for d in tqdm(datesRange):\n",
|
90 |
+
" filename = f'DR-{d}.pdf'\n",
|
91 |
+
" output_filename = os.path.join(directory, filename)\n",
|
92 |
+
" if os.path.exists(output_filename) and os.path.getsize(output_filename) > 1000:\n",
|
93 |
+
" print(f'{output_filename} exists, skip.')\n",
|
94 |
+
" continue\n",
|
95 |
+
" url = f'https://www.parlimen.gov.my/files/hindex/pdf/{filename}'\n",
|
96 |
+
" try:\n",
|
97 |
+
" download(url, output_filename)\n",
|
98 |
+
" except Exception as e:\n",
|
99 |
+
" print(e)"
|
100 |
+
]
|
101 |
+
},
|
102 |
+
{
|
103 |
+
"cell_type": "code",
|
104 |
+
"execution_count": 8,
|
105 |
+
"id": "74ca1c07",
|
106 |
+
"metadata": {},
|
107 |
+
"outputs": [],
|
108 |
+
"source": [
|
109 |
+
"pdfs = sorted(glob(f'{directory}/*.pdf'))\n",
|
110 |
+
"print(pdfs)\n",
|
111 |
+
"\n",
|
112 |
+
"print('writing hansard-temp.jsonl')\n",
|
113 |
+
"with open('hansard-temp.jsonl', 'w') as fopen:\n",
|
114 |
+
" for file in tqdm(pdfs):\n",
|
115 |
+
"\n",
|
116 |
+
" raw_xml = parser.from_file(file, f'{TIKA_HOST}/tika', xmlContent=True)\n",
|
117 |
+
" body = raw_xml['content'].split('<body>')[1].split('</body>')[0]\n",
|
118 |
+
" body_without_tag = body.replace(\"<p>\", \"\").replace(\"</p>\", \"\").replace(\"<div>\", \"\").replace(\"</div>\",\"\").replace(\"<p />\",\"\")\n",
|
119 |
+
" text_pages = body_without_tag.split(\"\"\"<div class=\"page\">\"\"\")[1:]\n",
|
120 |
+
"\n",
|
121 |
+
" for i, t in enumerate(text_pages):\n",
|
122 |
+
" r = re.findall(r'DR[\\. ]\\s*[0-3]?[0-9].[0-3]?[0-9].(?:[0-9]{2})?[0-9]{2}\\s+\\d+\\b', t)\n",
|
123 |
+
" r_ = re.findall(r'\\d+\\s+DR[\\. ]\\s*[0-3]?[0-9].[0-3]?[0-9].(?:[0-9]{2})?[0-9]{2}\\b', t)\n",
|
124 |
+
" found = True\n",
|
125 |
+
" if len(r):\n",
|
126 |
+
" no_page = r[0].split()[-1]\n",
|
127 |
+
" elif len(r_):\n",
|
128 |
+
" no_page = r_[0].split()[0]\n",
|
129 |
+
" else:\n",
|
130 |
+
" found = False\n",
|
131 |
+
"\n",
|
132 |
+
" if not found:\n",
|
133 |
+
" continue\n",
|
134 |
+
"\n",
|
135 |
+
" splitted = t.split('\\n \\n')\n",
|
136 |
+
" splitted = [s for s in splitted if len(s.strip()) > 1]\n",
|
137 |
+
" if len(splitted) < 3:\n",
|
138 |
+
" splitted = t.split('\\n\\n')\n",
|
139 |
+
" splitted = [cleaning(s) for s in splitted[1:]]\n",
|
140 |
+
" splitted = [s + '.' if s[-1] not in '.;:,' else s for s in splitted if len(s)]\n",
|
141 |
+
" splitted = ' '.join(splitted)\n",
|
142 |
+
" date = datetime.strptime(file, f'{directory}/DR-%d%m%Y.pdf').strftime('%Y-%m-%d')\n",
|
143 |
+
" \n",
|
144 |
+
" if not len(splitted):\n",
|
145 |
+
" print(f, i, t)\n",
|
146 |
+
"\n",
|
147 |
+
" d = {\n",
|
148 |
+
" 'original': t,\n",
|
149 |
+
" 'cleaned': splitted,\n",
|
150 |
+
" 'no_page': int(no_page),\n",
|
151 |
+
" 'actual_no_page': i + 1,\n",
|
152 |
+
" 'date': date,\n",
|
153 |
+
" 'url': f'https://www.parlimen.gov.my/files/hindex/{file}'.replace('/pdf_temp', '/pdf')\n",
|
154 |
+
" }\n",
|
155 |
+
" fopen.write(f'{json.dumps(d)}\\n')"
|
156 |
+
]
|
157 |
+
},
|
158 |
+
{
|
159 |
+
"cell_type": "code",
|
160 |
+
"execution_count": null,
|
161 |
+
"id": "fa47c63b",
|
162 |
+
"metadata": {},
|
163 |
+
"outputs": [],
|
164 |
+
"source": []
|
165 |
+
}
|
166 |
+
],
|
167 |
+
"metadata": {
|
168 |
+
"kernelspec": {
|
169 |
+
"display_name": "Python 3 (ipykernel)",
|
170 |
+
"language": "python",
|
171 |
+
"name": "python3"
|
172 |
+
},
|
173 |
+
"language_info": {
|
174 |
+
"codemirror_mode": {
|
175 |
+
"name": "ipython",
|
176 |
+
"version": 3
|
177 |
+
},
|
178 |
+
"file_extension": ".py",
|
179 |
+
"mimetype": "text/x-python",
|
180 |
+
"name": "python",
|
181 |
+
"nbconvert_exporter": "python",
|
182 |
+
"pygments_lexer": "ipython3",
|
183 |
+
"version": "3.8.10"
|
184 |
+
}
|
185 |
+
},
|
186 |
+
"nbformat": 4,
|
187 |
+
"nbformat_minor": 5
|
188 |
+
}
|
hansard-2024-07-18.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4d451887ceca6204a2621ea665b32a1225889a8adec164db8a965840c8a12381
|
3 |
+
size 26097658
|