Datasets:
Alvant
commited on
Commit
•
12ad2b5
1
Parent(s):
bb097b6
add preproc notebook
Browse files
preprocessing/20NG-Preprocessing.ipynb
ADDED
@@ -0,0 +1,699 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {},
|
6 |
+
"source": [
|
7 |
+
"# 20NG (Twenty Newsgroups). Preprocessing\n",
|
8 |
+
"\n",
|
9 |
+
"Here goes an example of data preprocessing and converting it to TopicNet's Dataset format.\n",
|
10 |
+
"\n",
|
11 |
+
"* Example of a toy dataset: [test_dataset.csv](https://github.com/machine-intelligence-laboratory/TopicNet/blob/master/topicnet/tests/test_data/test_dataset.csv)\n",
|
12 |
+
"* Dataset source file (with some explanations in docstring): [dataset.py](https://github.com/machine-intelligence-laboratory/TopicNet/blob/master/topicnet/cooking_machine/dataset.py)"
|
13 |
+
]
|
14 |
+
},
|
15 |
+
{
|
16 |
+
"cell_type": "markdown",
|
17 |
+
"metadata": {},
|
18 |
+
"source": [
|
19 |
+
"# Contents<a id=\"contents\"></a>\n",
|
20 |
+
"\n",
|
21 |
+
"* [Loading data](#data-loading)\n",
|
22 |
+
"* [Preparing data](#data-preparation)"
|
23 |
+
]
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"cell_type": "code",
|
27 |
+
"execution_count": 45,
|
28 |
+
"metadata": {},
|
29 |
+
"outputs": [],
|
30 |
+
"source": [
|
31 |
+
"import numpy as np\n",
|
32 |
+
"import pandas as pd\n",
|
33 |
+
"import re\n",
|
34 |
+
"import shutil\n",
|
35 |
+
"import string\n",
|
36 |
+
"\n",
|
37 |
+
"from collections import Counter\n",
|
38 |
+
"from glob import glob\n",
|
39 |
+
"\n",
|
40 |
+
"from sklearn import datasets\n",
|
41 |
+
"from sklearn.datasets import fetch_20newsgroups"
|
42 |
+
]
|
43 |
+
},
|
44 |
+
{
|
45 |
+
"cell_type": "code",
|
46 |
+
"execution_count": null,
|
47 |
+
"metadata": {},
|
48 |
+
"outputs": [],
|
49 |
+
"source": [
|
50 |
+
"import nltk\n",
|
51 |
+
"\n",
|
52 |
+
"from nltk.collocations import (\n",
|
53 |
+
" BigramAssocMeasures,\n",
|
54 |
+
" BigramCollocationFinder,\n",
|
55 |
+
")\n",
|
56 |
+
"from nltk.corpus import (\n",
|
57 |
+
" stopwords,\n",
|
58 |
+
" wordnet,\n",
|
59 |
+
")\n",
|
60 |
+
"from nltk.stem import WordNetLemmatizer"
|
61 |
+
]
|
62 |
+
},
|
63 |
+
{
|
64 |
+
"cell_type": "code",
|
65 |
+
"execution_count": 2,
|
66 |
+
"metadata": {},
|
67 |
+
"outputs": [],
|
68 |
+
"source": [
|
69 |
+
"import matplotlib.pyplot as plt\n",
|
70 |
+
"%matplotlib inline\n",
|
71 |
+
"\n",
|
72 |
+
"from matplotlib import cm"
|
73 |
+
]
|
74 |
+
},
|
75 |
+
{
|
76 |
+
"cell_type": "markdown",
|
77 |
+
"metadata": {},
|
78 |
+
"source": [
|
79 |
+
"## Loading data<a id=\"data-loading\"></a>\n",
|
80 |
+
"\n",
|
81 |
+
"<div style=\"text-align: right\">Back to <a href=#contents>Contents</a></div>"
|
82 |
+
]
|
83 |
+
},
|
84 |
+
{
|
85 |
+
"cell_type": "markdown",
|
86 |
+
"metadata": {},
|
87 |
+
"source": [
|
88 |
+
"Let's download the dataset:"
|
89 |
+
]
|
90 |
+
},
|
91 |
+
{
|
92 |
+
"cell_type": "code",
|
93 |
+
"execution_count": 4,
|
94 |
+
"metadata": {},
|
95 |
+
"outputs": [
|
96 |
+
{
|
97 |
+
"name": "stderr",
|
98 |
+
"output_type": "stream",
|
99 |
+
"text": [
|
100 |
+
"Downloading 20news dataset. This may take a few minutes.\n",
|
101 |
+
"Downloading dataset from https://ndownloader.figshare.com/files/5975967 (14 MB)\n"
|
102 |
+
]
|
103 |
+
}
|
104 |
+
],
|
105 |
+
"source": [
|
106 |
+
"train_20 = fetch_20newsgroups(\n",
|
107 |
+
" subset='train',\n",
|
108 |
+
" remove=('headers', 'footers', 'quotes'),\n",
|
109 |
+
")\n",
|
110 |
+
"test_20 = fetch_20newsgroups(\n",
|
111 |
+
" subset='test',\n",
|
112 |
+
" remove=('headers', 'footers', 'quotes'),\n",
|
113 |
+
")"
|
114 |
+
]
|
115 |
+
},
|
116 |
+
{
|
117 |
+
"cell_type": "code",
|
118 |
+
"execution_count": 5,
|
119 |
+
"metadata": {},
|
120 |
+
"outputs": [
|
121 |
+
{
|
122 |
+
"name": "stdout",
|
123 |
+
"output_type": "stream",
|
124 |
+
"text": [
|
125 |
+
"11314 data\n",
|
126 |
+
"11314 filenames\n",
|
127 |
+
"11314 target\n"
|
128 |
+
]
|
129 |
+
}
|
130 |
+
],
|
131 |
+
"source": [
|
132 |
+
"train_20.pop('DESCR')\n",
|
133 |
+
"labels = train_20.pop('target_names')\n",
|
134 |
+
"\n",
|
135 |
+
"for k in train_20.keys():\n",
|
136 |
+
" print(len(train_20[k]), k)"
|
137 |
+
]
|
138 |
+
},
|
139 |
+
{
|
140 |
+
"cell_type": "code",
|
141 |
+
"execution_count": 6,
|
142 |
+
"metadata": {},
|
143 |
+
"outputs": [
|
144 |
+
{
|
145 |
+
"name": "stdout",
|
146 |
+
"output_type": "stream",
|
147 |
+
"text": [
|
148 |
+
"7532 data\n",
|
149 |
+
"7532 filenames\n",
|
150 |
+
"7532 target\n"
|
151 |
+
]
|
152 |
+
}
|
153 |
+
],
|
154 |
+
"source": [
|
155 |
+
"test_20.pop('DESCR')\n",
|
156 |
+
"labels_test = test_20.pop('target_names')\n",
|
157 |
+
"\n",
|
158 |
+
"for k in test_20.keys():\n",
|
159 |
+
" print(len(test_20[k]), k)"
|
160 |
+
]
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"cell_type": "markdown",
|
164 |
+
"metadata": {},
|
165 |
+
"source": [
|
166 |
+
"## Preparing data (lemmatization, Vowpal Wabbit & TopicNet's format)<a id=\"data-preparation\"></a>\n",
|
167 |
+
"\n",
|
168 |
+
"<div style=\"text-align: right\">Back to <a href=#contents>Contents</a></div>"
|
169 |
+
]
|
170 |
+
},
|
171 |
+
{
|
172 |
+
"cell_type": "markdown",
|
173 |
+
"metadata": {},
|
174 |
+
"source": [
|
175 |
+
"Wrapping all in .csv files:"
|
176 |
+
]
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"cell_type": "code",
|
180 |
+
"execution_count": 7,
|
181 |
+
"metadata": {},
|
182 |
+
"outputs": [],
|
183 |
+
"source": [
|
184 |
+
"train_pd = pd.DataFrame(train_20).rename(columns = {'data':'raw_text'},)\n",
|
185 |
+
"# train_pd['raw_text'] = train_pd['raw_text'].apply(lambda x: x.decode('windows-1252'))\n",
|
186 |
+
"train_pd['id'] = train_pd.filenames.apply( lambda x: '.'.join(x.split('/')[-2:]).replace('.','_'))\n",
|
187 |
+
"\n",
|
188 |
+
"test_pd = pd.DataFrame(test_20).rename(columns = {'data':'raw_text'})\n",
|
189 |
+
"# test_pd['raw_text'] = test_pd['raw_text'].apply(lambda x: x.decode('windows-1252'))\n",
|
190 |
+
"test_pd['id'] = test_pd.filenames.apply( lambda x: '.'.join(x.split('/')[-2:]))"
|
191 |
+
]
|
192 |
+
},
|
193 |
+
{
|
194 |
+
"cell_type": "markdown",
|
195 |
+
"metadata": {},
|
196 |
+
"source": [
|
197 |
+
"Better to exclude these documents (one may look here [20-newsgroups-secrets](https://github.com/Alvant/20-newsgroups-secrets) for more details)."
|
198 |
+
]
|
199 |
+
},
|
200 |
+
{
|
201 |
+
"cell_type": "code",
|
202 |
+
"execution_count": 36,
|
203 |
+
"metadata": {},
|
204 |
+
"outputs": [],
|
205 |
+
"source": [
|
206 |
+
"bad_names = [9976, 9977, 9978, 9979, 9980, 9981, 9982, 9983, 9984, 9985, 9986, 9987, 9988, 9990]\n",
|
207 |
+
"bad_names = [f\"comp_os_ms-windows_misc_{i}\" for i in bad_names]\n",
|
208 |
+
"\n",
|
209 |
+
"bad_indices = train_pd.query(\"id in @bad_names\").index"
|
210 |
+
]
|
211 |
+
},
|
212 |
+
{
|
213 |
+
"cell_type": "markdown",
|
214 |
+
"metadata": {},
|
215 |
+
"source": [
|
216 |
+
"Below we define some functions for text preprocessing."
|
217 |
+
]
|
218 |
+
},
|
219 |
+
{
|
220 |
+
"cell_type": "code",
|
221 |
+
"execution_count": 24,
|
222 |
+
"metadata": {},
|
223 |
+
"outputs": [],
|
224 |
+
"source": [
|
225 |
+
"def nltk2wn_tag(nltk_tag):\n",
|
226 |
+
" if nltk_tag.startswith('J'):\n",
|
227 |
+
" return wordnet.ADJ\n",
|
228 |
+
" elif nltk_tag.startswith('V'):\n",
|
229 |
+
" return wordnet.VERB\n",
|
230 |
+
" elif nltk_tag.startswith('N'):\n",
|
231 |
+
" return wordnet.NOUN\n",
|
232 |
+
" elif nltk_tag.startswith('R'):\n",
|
233 |
+
" return wordnet.ADV\n",
|
234 |
+
" else: \n",
|
235 |
+
" return ''"
|
236 |
+
]
|
237 |
+
},
|
238 |
+
{
|
239 |
+
"cell_type": "code",
|
240 |
+
"execution_count": null,
|
241 |
+
"metadata": {},
|
242 |
+
"outputs": [],
|
243 |
+
"source": [
|
244 |
+
"pattern = re.compile('\\S*@\\S*\\s?')"
|
245 |
+
]
|
246 |
+
},
|
247 |
+
{
|
248 |
+
"cell_type": "code",
|
249 |
+
"execution_count": 26,
|
250 |
+
"metadata": {},
|
251 |
+
"outputs": [],
|
252 |
+
"source": [
|
253 |
+
"def vowpalize_sequence(sequence):\n",
|
254 |
+
" word_2_frequency = Counter(sequence)\n",
|
255 |
+
" \n",
|
256 |
+
" del word_2_frequency['']\n",
|
257 |
+
" \n",
|
258 |
+
" vw_string = ''\n",
|
259 |
+
" \n",
|
260 |
+
" for word in word_2_frequency:\n",
|
261 |
+
" vw_string += word + \":\" + str(word_2_frequency[word]) + ' '\n",
|
262 |
+
" \n",
|
263 |
+
" return vw_string\n",
|
264 |
+
"\n",
|
265 |
+
"def do_vw_for_me_please(dataframe):\n",
|
266 |
+
" bad_entries = []\n",
|
267 |
+
" tokenized_text = []\n",
|
268 |
+
" \n",
|
269 |
+
" for indx, text in enumerate(dataframe['raw_text'].values):\n",
|
270 |
+
" try:\n",
|
271 |
+
" text = str(pattern.sub('', text))\n",
|
272 |
+
" except TypeError:\n",
|
273 |
+
" text=''\n",
|
274 |
+
" \n",
|
275 |
+
" tokens = [tok for tok in nltk.wordpunct_tokenize(text.lower()) if len(tok) > 1]\n",
|
276 |
+
" tokenized_text.append(nltk.pos_tag(tokens))\n",
|
277 |
+
" \n",
|
278 |
+
" dataframe['tokenized'] = tokenized_text\n",
|
279 |
+
"\n",
|
280 |
+
" stop = set(stopwords.words('english'))\n",
|
281 |
+
"\n",
|
282 |
+
" lemmatized_text = []\n",
|
283 |
+
" wnl = WordNetLemmatizer()\n",
|
284 |
+
" \n",
|
285 |
+
" for text in dataframe['tokenized'].values:\n",
|
286 |
+
" lemmatized = [wnl.lemmatize(word, nltk2wn_tag(pos))\n",
|
287 |
+
" if nltk2wn_tag(pos) != ''\n",
|
288 |
+
" else wnl.lemmatize(word)\n",
|
289 |
+
" for word, pos in text ]\n",
|
290 |
+
" lemmatized = [word for word in lemmatized \n",
|
291 |
+
" if word not in stop and word.isalpha()]\n",
|
292 |
+
" lemmatized_text.append(lemmatized)\n",
|
293 |
+
" \n",
|
294 |
+
" dataframe['lemmatized'] = lemmatized_text\n",
|
295 |
+
"\n",
|
296 |
+
" bigram_measures = BigramAssocMeasures()\n",
|
297 |
+
" finder = BigramCollocationFinder.from_documents(dataframe['lemmatized'])\n",
|
298 |
+
" finder.apply_freq_filter(5)\n",
|
299 |
+
" set_dict = set(finder.nbest(bigram_measures.pmi,32100)[100:])\n",
|
300 |
+
" documents = dataframe['lemmatized']\n",
|
301 |
+
" bigrams = []\n",
|
302 |
+
"\n",
|
303 |
+
" for doc in documents:\n",
|
304 |
+
" entry = ['_'.join([word_first, word_second])\n",
|
305 |
+
" for word_first, word_second in zip(doc[:-1],doc[1:])\n",
|
306 |
+
" if (word_first, word_second) in set_dict]\n",
|
307 |
+
" bigrams.append(entry)\n",
|
308 |
+
"\n",
|
309 |
+
" dataframe['bigram'] = bigrams\n",
|
310 |
+
" \n",
|
311 |
+
" vw_text = []\n",
|
312 |
+
"\n",
|
313 |
+
" for index, data in dataframe.iterrows():\n",
|
314 |
+
" vw_string = '' \n",
|
315 |
+
" doc_id = data.id\n",
|
316 |
+
" lemmatized = '@lemmatized ' + vowpalize_sequence(data.lemmatized)\n",
|
317 |
+
" bigram = '@bigram ' + vowpalize_sequence(data.bigram)\n",
|
318 |
+
" vw_string = ' |'.join([doc_id, lemmatized, bigram])\n",
|
319 |
+
" vw_text.append(vw_string)\n",
|
320 |
+
"\n",
|
321 |
+
" dataframe['vw_text'] = vw_text\n",
|
322 |
+
"\n",
|
323 |
+
" print('num bad entries ', len(bad_entries))\n",
|
324 |
+
" print(bad_entries)\n",
|
325 |
+
"\n",
|
326 |
+
" return dataframe"
|
327 |
+
]
|
328 |
+
},
|
329 |
+
{
|
330 |
+
"cell_type": "markdown",
|
331 |
+
"metadata": {},
|
332 |
+
"source": [
|
333 |
+
"And here are the final datasets!\n",
|
334 |
+
"Each row represents a document.\n",
|
335 |
+
"Columns `id`, `raw_text` and `vw_text` are required (look at this [toy dataset](https://github.com/machine-intelligence-laboratory/TopicNet/blob/master/topicnet/tests/test_data/test_dataset.csv), for example)."
|
336 |
+
]
|
337 |
+
},
|
338 |
+
{
|
339 |
+
"cell_type": "code",
|
340 |
+
"execution_count": 41,
|
341 |
+
"metadata": {},
|
342 |
+
"outputs": [
|
343 |
+
{
|
344 |
+
"name": "stdout",
|
345 |
+
"output_type": "stream",
|
346 |
+
"text": [
|
347 |
+
"num bad entries 0\n",
|
348 |
+
"[]\n"
|
349 |
+
]
|
350 |
+
},
|
351 |
+
{
|
352 |
+
"data": {
|
353 |
+
"text/html": [
|
354 |
+
"<div>\n",
|
355 |
+
"<style scoped>\n",
|
356 |
+
" .dataframe tbody tr th:only-of-type {\n",
|
357 |
+
" vertical-align: middle;\n",
|
358 |
+
" }\n",
|
359 |
+
"\n",
|
360 |
+
" .dataframe tbody tr th {\n",
|
361 |
+
" vertical-align: top;\n",
|
362 |
+
" }\n",
|
363 |
+
"\n",
|
364 |
+
" .dataframe thead th {\n",
|
365 |
+
" text-align: right;\n",
|
366 |
+
" }\n",
|
367 |
+
"</style>\n",
|
368 |
+
"<table border=\"1\" class=\"dataframe\">\n",
|
369 |
+
" <thead>\n",
|
370 |
+
" <tr style=\"text-align: right;\">\n",
|
371 |
+
" <th></th>\n",
|
372 |
+
" <th>raw_text</th>\n",
|
373 |
+
" <th>filenames</th>\n",
|
374 |
+
" <th>target</th>\n",
|
375 |
+
" <th>id</th>\n",
|
376 |
+
" <th>tokenized</th>\n",
|
377 |
+
" <th>lemmatized</th>\n",
|
378 |
+
" <th>bigram</th>\n",
|
379 |
+
" <th>vw_text</th>\n",
|
380 |
+
" </tr>\n",
|
381 |
+
" </thead>\n",
|
382 |
+
" <tbody>\n",
|
383 |
+
" <tr>\n",
|
384 |
+
" <th>0</th>\n",
|
385 |
+
" <td>I was wondering if anyone out there could enli...</td>\n",
|
386 |
+
" <td>/home/bulatov/scikit_learn_data/20news_home/20...</td>\n",
|
387 |
+
" <td>7</td>\n",
|
388 |
+
" <td>rec_autos_102994</td>\n",
|
389 |
+
" <td>[(was, VBD), (wondering, VBG), (if, IN), (anyo...</td>\n",
|
390 |
+
" <td>[wonder, anyone, could, enlighten, car, saw, d...</td>\n",
|
391 |
+
" <td>[wonder_anyone, anyone_could, sport_car, car_l...</td>\n",
|
392 |
+
" <td>rec_autos_102994 |@lemmatized wonder:1 anyone:...</td>\n",
|
393 |
+
" </tr>\n",
|
394 |
+
" <tr>\n",
|
395 |
+
" <th>1</th>\n",
|
396 |
+
" <td>A fair number of brave souls who upgraded thei...</td>\n",
|
397 |
+
" <td>/home/bulatov/scikit_learn_data/20news_home/20...</td>\n",
|
398 |
+
" <td>4</td>\n",
|
399 |
+
" <td>comp_sys_mac_hardware_51861</td>\n",
|
400 |
+
" <td>[(fair, JJ), (number, NN), (of, IN), (brave, J...</td>\n",
|
401 |
+
" <td>[fair, number, brave, soul, upgrade, si, clock...</td>\n",
|
402 |
+
" <td>[clock_oscillator, please_send, top_speed, hea...</td>\n",
|
403 |
+
" <td>comp_sys_mac_hardware_51861 |@lemmatized fair:...</td>\n",
|
404 |
+
" </tr>\n",
|
405 |
+
" <tr>\n",
|
406 |
+
" <th>2</th>\n",
|
407 |
+
" <td>well folks, my mac plus finally gave up the gh...</td>\n",
|
408 |
+
" <td>/home/bulatov/scikit_learn_data/20news_home/20...</td>\n",
|
409 |
+
" <td>4</td>\n",
|
410 |
+
" <td>comp_sys_mac_hardware_51879</td>\n",
|
411 |
+
" <td>[(well, RB), (folks, NNS), (my, PRP$), (mac, J...</td>\n",
|
412 |
+
" <td>[well, folk, mac, plus, finally, give, ghost, ...</td>\n",
|
413 |
+
" <td>[mac_plus, life_way, way_back, market_new, new...</td>\n",
|
414 |
+
" <td>comp_sys_mac_hardware_51879 |@lemmatized well:...</td>\n",
|
415 |
+
" </tr>\n",
|
416 |
+
" <tr>\n",
|
417 |
+
" <th>3</th>\n",
|
418 |
+
" <td>\\nDo you have Weitek's address/phone number? ...</td>\n",
|
419 |
+
" <td>/home/bulatov/scikit_learn_data/20news_home/20...</td>\n",
|
420 |
+
" <td>1</td>\n",
|
421 |
+
" <td>comp_graphics_38242</td>\n",
|
422 |
+
" <td>[(do, VBP), (you, PRP), (have, VB), (weitek, V...</td>\n",
|
423 |
+
" <td>[weitek, address, phone, number, like, get, in...</td>\n",
|
424 |
+
" <td>[address_phone, phone_number, number_like, lik...</td>\n",
|
425 |
+
" <td>comp_graphics_38242 |@lemmatized weitek:1 addr...</td>\n",
|
426 |
+
" </tr>\n",
|
427 |
+
" <tr>\n",
|
428 |
+
" <th>4</th>\n",
|
429 |
+
" <td>From article <[email protected]>, by to...</td>\n",
|
430 |
+
" <td>/home/bulatov/scikit_learn_data/20news_home/20...</td>\n",
|
431 |
+
" <td>14</td>\n",
|
432 |
+
" <td>sci_space_60880</td>\n",
|
433 |
+
" <td>[(from, IN), (article, NN), (by, IN), (tom, NN...</td>\n",
|
434 |
+
" <td>[article, tom, baker, understanding, expected,...</td>\n",
|
435 |
+
" <td>[system_software, thing_check, introduce_new, ...</td>\n",
|
436 |
+
" <td>sci_space_60880 |@lemmatized article:1 tom:1 b...</td>\n",
|
437 |
+
" </tr>\n",
|
438 |
+
" </tbody>\n",
|
439 |
+
"</table>\n",
|
440 |
+
"</div>"
|
441 |
+
],
|
442 |
+
"text/plain": [
|
443 |
+
" raw_text \\\n",
|
444 |
+
"0 I was wondering if anyone out there could enli... \n",
|
445 |
+
"1 A fair number of brave souls who upgraded thei... \n",
|
446 |
+
"2 well folks, my mac plus finally gave up the gh... \n",
|
447 |
+
"3 \\nDo you have Weitek's address/phone number? ... \n",
|
448 |
+
"4 From article <[email protected]>, by to... \n",
|
449 |
+
"\n",
|
450 |
+
" filenames target \\\n",
|
451 |
+
"0 /home/bulatov/scikit_learn_data/20news_home/20... 7 \n",
|
452 |
+
"1 /home/bulatov/scikit_learn_data/20news_home/20... 4 \n",
|
453 |
+
"2 /home/bulatov/scikit_learn_data/20news_home/20... 4 \n",
|
454 |
+
"3 /home/bulatov/scikit_learn_data/20news_home/20... 1 \n",
|
455 |
+
"4 /home/bulatov/scikit_learn_data/20news_home/20... 14 \n",
|
456 |
+
"\n",
|
457 |
+
" id \\\n",
|
458 |
+
"0 rec_autos_102994 \n",
|
459 |
+
"1 comp_sys_mac_hardware_51861 \n",
|
460 |
+
"2 comp_sys_mac_hardware_51879 \n",
|
461 |
+
"3 comp_graphics_38242 \n",
|
462 |
+
"4 sci_space_60880 \n",
|
463 |
+
"\n",
|
464 |
+
" tokenized \\\n",
|
465 |
+
"0 [(was, VBD), (wondering, VBG), (if, IN), (anyo... \n",
|
466 |
+
"1 [(fair, JJ), (number, NN), (of, IN), (brave, J... \n",
|
467 |
+
"2 [(well, RB), (folks, NNS), (my, PRP$), (mac, J... \n",
|
468 |
+
"3 [(do, VBP), (you, PRP), (have, VB), (weitek, V... \n",
|
469 |
+
"4 [(from, IN), (article, NN), (by, IN), (tom, NN... \n",
|
470 |
+
"\n",
|
471 |
+
" lemmatized \\\n",
|
472 |
+
"0 [wonder, anyone, could, enlighten, car, saw, d... \n",
|
473 |
+
"1 [fair, number, brave, soul, upgrade, si, clock... \n",
|
474 |
+
"2 [well, folk, mac, plus, finally, give, ghost, ... \n",
|
475 |
+
"3 [weitek, address, phone, number, like, get, in... \n",
|
476 |
+
"4 [article, tom, baker, understanding, expected,... \n",
|
477 |
+
"\n",
|
478 |
+
" bigram \\\n",
|
479 |
+
"0 [wonder_anyone, anyone_could, sport_car, car_l... \n",
|
480 |
+
"1 [clock_oscillator, please_send, top_speed, hea... \n",
|
481 |
+
"2 [mac_plus, life_way, way_back, market_new, new... \n",
|
482 |
+
"3 [address_phone, phone_number, number_like, lik... \n",
|
483 |
+
"4 [system_software, thing_check, introduce_new, ... \n",
|
484 |
+
"\n",
|
485 |
+
" vw_text \n",
|
486 |
+
"0 rec_autos_102994 |@lemmatized wonder:1 anyone:... \n",
|
487 |
+
"1 comp_sys_mac_hardware_51861 |@lemmatized fair:... \n",
|
488 |
+
"2 comp_sys_mac_hardware_51879 |@lemmatized well:... \n",
|
489 |
+
"3 comp_graphics_38242 |@lemmatized weitek:1 addr... \n",
|
490 |
+
"4 sci_space_60880 |@lemmatized article:1 tom:1 b... "
|
491 |
+
]
|
492 |
+
},
|
493 |
+
"metadata": {},
|
494 |
+
"output_type": "display_data"
|
495 |
+
},
|
496 |
+
{
|
497 |
+
"name": "stdout",
|
498 |
+
"output_type": "stream",
|
499 |
+
"text": [
|
500 |
+
"num bad entries 0\n",
|
501 |
+
"[]\n"
|
502 |
+
]
|
503 |
+
},
|
504 |
+
{
|
505 |
+
"data": {
|
506 |
+
"text/html": [
|
507 |
+
"<div>\n",
|
508 |
+
"<style scoped>\n",
|
509 |
+
" .dataframe tbody tr th:only-of-type {\n",
|
510 |
+
" vertical-align: middle;\n",
|
511 |
+
" }\n",
|
512 |
+
"\n",
|
513 |
+
" .dataframe tbody tr th {\n",
|
514 |
+
" vertical-align: top;\n",
|
515 |
+
" }\n",
|
516 |
+
"\n",
|
517 |
+
" .dataframe thead th {\n",
|
518 |
+
" text-align: right;\n",
|
519 |
+
" }\n",
|
520 |
+
"</style>\n",
|
521 |
+
"<table border=\"1\" class=\"dataframe\">\n",
|
522 |
+
" <thead>\n",
|
523 |
+
" <tr style=\"text-align: right;\">\n",
|
524 |
+
" <th></th>\n",
|
525 |
+
" <th>raw_text</th>\n",
|
526 |
+
" <th>filenames</th>\n",
|
527 |
+
" <th>target</th>\n",
|
528 |
+
" <th>id</th>\n",
|
529 |
+
" <th>tokenized</th>\n",
|
530 |
+
" <th>lemmatized</th>\n",
|
531 |
+
" <th>bigram</th>\n",
|
532 |
+
" <th>vw_text</th>\n",
|
533 |
+
" </tr>\n",
|
534 |
+
" </thead>\n",
|
535 |
+
" <tbody>\n",
|
536 |
+
" <tr>\n",
|
537 |
+
" <th>0</th>\n",
|
538 |
+
" <td>I am a little confused on all of the models of...</td>\n",
|
539 |
+
" <td>/home/bulatov/scikit_learn_data/20news_home/20...</td>\n",
|
540 |
+
" <td>7</td>\n",
|
541 |
+
" <td>rec.autos.103343</td>\n",
|
542 |
+
" <td>[(am, VBP), (little, JJ), (confused, VBN), (on...</td>\n",
|
543 |
+
" <td>[little, confuse, model, bonnevilles, hear, le...</td>\n",
|
544 |
+
" <td>[could_someone, someone_tell, tell_difference,...</td>\n",
|
545 |
+
" <td>rec.autos.103343 |@lemmatized little:1 confuse...</td>\n",
|
546 |
+
" </tr>\n",
|
547 |
+
" <tr>\n",
|
548 |
+
" <th>1</th>\n",
|
549 |
+
" <td>I'm not familiar at all with the format of the...</td>\n",
|
550 |
+
" <td>/home/bulatov/scikit_learn_data/20news_home/20...</td>\n",
|
551 |
+
" <td>5</td>\n",
|
552 |
+
" <td>comp.windows.x.67445</td>\n",
|
553 |
+
" <td>[(not, RB), (familiar, JJ), (at, IN), (all, DT...</td>\n",
|
554 |
+
" <td>[familiar, format, face, thingies, see, folk, ...</td>\n",
|
555 |
+
" <td>[get_see, make_one, one_get, seem_find, could_...</td>\n",
|
556 |
+
" <td>comp.windows.x.67445 |@lemmatized familiar:1 f...</td>\n",
|
557 |
+
" </tr>\n",
|
558 |
+
" <tr>\n",
|
559 |
+
" <th>2</th>\n",
|
560 |
+
" <td>\\nIn a word, yes.\\n</td>\n",
|
561 |
+
" <td>/home/bulatov/scikit_learn_data/20news_home/20...</td>\n",
|
562 |
+
" <td>0</td>\n",
|
563 |
+
" <td>alt.atheism.53603</td>\n",
|
564 |
+
" <td>[(in, IN), (word, NN), (yes, NN)]</td>\n",
|
565 |
+
" <td>[word, yes]</td>\n",
|
566 |
+
" <td>[]</td>\n",
|
567 |
+
" <td>alt.atheism.53603 |@lemmatized word:1 yes:1 |...</td>\n",
|
568 |
+
" </tr>\n",
|
569 |
+
" <tr>\n",
|
570 |
+
" <th>3</th>\n",
|
571 |
+
" <td>\\nThey were attacking the Iraqis to drive them...</td>\n",
|
572 |
+
" <td>/home/bulatov/scikit_learn_data/20news_home/20...</td>\n",
|
573 |
+
" <td>17</td>\n",
|
574 |
+
" <td>talk.politics.mideast.77355</td>\n",
|
575 |
+
" <td>[(they, PRP), (were, VBD), (attacking, VBG), (...</td>\n",
|
576 |
+
" <td>[attack, iraqi, drive, kuwait, country, whose,...</td>\n",
|
577 |
+
" <td>[think_u, saudi_arabia, much_anything, saudi_a...</td>\n",
|
578 |
+
" <td>talk.politics.mideast.77355 |@lemmatized attac...</td>\n",
|
579 |
+
" </tr>\n",
|
580 |
+
" <tr>\n",
|
581 |
+
" <th>4</th>\n",
|
582 |
+
" <td>\\nI've just spent two solid months arguing tha...</td>\n",
|
583 |
+
" <td>/home/bulatov/scikit_learn_data/20news_home/20...</td>\n",
|
584 |
+
" <td>19</td>\n",
|
585 |
+
" <td>talk.religion.misc.84194</td>\n",
|
586 |
+
" <td>[(ve, NN), (just, RB), (spent, VBN), (two, CD)...</td>\n",
|
587 |
+
" <td>[spend, two, solid, month, argue, thing, objec...</td>\n",
|
588 |
+
" <td>[moral_system]</td>\n",
|
589 |
+
" <td>talk.religion.misc.84194 |@lemmatized spend:1 ...</td>\n",
|
590 |
+
" </tr>\n",
|
591 |
+
" </tbody>\n",
|
592 |
+
"</table>\n",
|
593 |
+
"</div>"
|
594 |
+
],
|
595 |
+
"text/plain": [
|
596 |
+
" raw_text \\\n",
|
597 |
+
"0 I am a little confused on all of the models of... \n",
|
598 |
+
"1 I'm not familiar at all with the format of the... \n",
|
599 |
+
"2 \\nIn a word, yes.\\n \n",
|
600 |
+
"3 \\nThey were attacking the Iraqis to drive them... \n",
|
601 |
+
"4 \\nI've just spent two solid months arguing tha... \n",
|
602 |
+
"\n",
|
603 |
+
" filenames target \\\n",
|
604 |
+
"0 /home/bulatov/scikit_learn_data/20news_home/20... 7 \n",
|
605 |
+
"1 /home/bulatov/scikit_learn_data/20news_home/20... 5 \n",
|
606 |
+
"2 /home/bulatov/scikit_learn_data/20news_home/20... 0 \n",
|
607 |
+
"3 /home/bulatov/scikit_learn_data/20news_home/20... 17 \n",
|
608 |
+
"4 /home/bulatov/scikit_learn_data/20news_home/20... 19 \n",
|
609 |
+
"\n",
|
610 |
+
" id \\\n",
|
611 |
+
"0 rec.autos.103343 \n",
|
612 |
+
"1 comp.windows.x.67445 \n",
|
613 |
+
"2 alt.atheism.53603 \n",
|
614 |
+
"3 talk.politics.mideast.77355 \n",
|
615 |
+
"4 talk.religion.misc.84194 \n",
|
616 |
+
"\n",
|
617 |
+
" tokenized \\\n",
|
618 |
+
"0 [(am, VBP), (little, JJ), (confused, VBN), (on... \n",
|
619 |
+
"1 [(not, RB), (familiar, JJ), (at, IN), (all, DT... \n",
|
620 |
+
"2 [(in, IN), (word, NN), (yes, NN)] \n",
|
621 |
+
"3 [(they, PRP), (were, VBD), (attacking, VBG), (... \n",
|
622 |
+
"4 [(ve, NN), (just, RB), (spent, VBN), (two, CD)... \n",
|
623 |
+
"\n",
|
624 |
+
" lemmatized \\\n",
|
625 |
+
"0 [little, confuse, model, bonnevilles, hear, le... \n",
|
626 |
+
"1 [familiar, format, face, thingies, see, folk, ... \n",
|
627 |
+
"2 [word, yes] \n",
|
628 |
+
"3 [attack, iraqi, drive, kuwait, country, whose,... \n",
|
629 |
+
"4 [spend, two, solid, month, argue, thing, objec... \n",
|
630 |
+
"\n",
|
631 |
+
" bigram \\\n",
|
632 |
+
"0 [could_someone, someone_tell, tell_difference,... \n",
|
633 |
+
"1 [get_see, make_one, one_get, seem_find, could_... \n",
|
634 |
+
"2 [] \n",
|
635 |
+
"3 [think_u, saudi_arabia, much_anything, saudi_a... \n",
|
636 |
+
"4 [moral_system] \n",
|
637 |
+
"\n",
|
638 |
+
" vw_text \n",
|
639 |
+
"0 rec.autos.103343 |@lemmatized little:1 confuse... \n",
|
640 |
+
"1 comp.windows.x.67445 |@lemmatized familiar:1 f... \n",
|
641 |
+
"2 alt.atheism.53603 |@lemmatized word:1 yes:1 |... \n",
|
642 |
+
"3 talk.politics.mideast.77355 |@lemmatized attac... \n",
|
643 |
+
"4 talk.religion.misc.84194 |@lemmatized spend:1 ... "
|
644 |
+
]
|
645 |
+
},
|
646 |
+
"metadata": {},
|
647 |
+
"output_type": "display_data"
|
648 |
+
}
|
649 |
+
],
|
650 |
+
"source": [
|
651 |
+
"train_pd = do_vw_for_me_please(train_pd)\n",
|
652 |
+
"display(train_pd.head())\n",
|
653 |
+
"\n",
|
654 |
+
"test_pd = do_vw_for_me_please(test_pd)\n",
|
655 |
+
"display(test_pd.head())"
|
656 |
+
]
|
657 |
+
},
|
658 |
+
{
|
659 |
+
"cell_type": "markdown",
|
660 |
+
"metadata": {},
|
661 |
+
"source": [
|
662 |
+
"Saving to disk (TopicNet's [Dataset](https://github.com/machine-intelligence-laboratory/TopicNet/blob/master/topicnet/cooking_machine/dataset.py) can be constructed using saved .csv file with text data)."
|
663 |
+
]
|
664 |
+
},
|
665 |
+
{
|
666 |
+
"cell_type": "code",
|
667 |
+
"execution_count": 44,
|
668 |
+
"metadata": {},
|
669 |
+
"outputs": [],
|
670 |
+
"source": [
|
671 |
+
"! mkdir 20_News_dataset\n",
|
672 |
+
"\n",
|
673 |
+
"train_pd.drop(bad_indices).to_csv('/data/datasets/20_News_dataset/train_preprocessed.csv')\n",
|
674 |
+
"test_pd.to_csv('/data/datasets/20_News_dataset/test_preprocessed.csv')"
|
675 |
+
]
|
676 |
+
}
|
677 |
+
],
|
678 |
+
"metadata": {
|
679 |
+
"kernelspec": {
|
680 |
+
"display_name": "Python 3",
|
681 |
+
"language": "python",
|
682 |
+
"name": "python3"
|
683 |
+
},
|
684 |
+
"language_info": {
|
685 |
+
"codemirror_mode": {
|
686 |
+
"name": "ipython",
|
687 |
+
"version": 3
|
688 |
+
},
|
689 |
+
"file_extension": ".py",
|
690 |
+
"mimetype": "text/x-python",
|
691 |
+
"name": "python",
|
692 |
+
"nbconvert_exporter": "python",
|
693 |
+
"pygments_lexer": "ipython3",
|
694 |
+
"version": "3.6.9"
|
695 |
+
}
|
696 |
+
},
|
697 |
+
"nbformat": 4,
|
698 |
+
"nbformat_minor": 2
|
699 |
+
}
|