{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# NLTK: Brown & Reuters\n", "\n", "https://www.nltk.org/book/ch02.html" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Contents\n", "\n", "* [Brown](#brown)\n", "* [Reuters](#reuters)" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "# General\n", "\n", "import json\n", "import pandas as pd\n", "import numpy as np\n", "import string\n", "import sys\n", "import tqdm\n", "\n", "from collections import (\n", " Counter,\n", " defaultdict,\n", ")\n", "\n", "# NLP\n", "\n", "import nltk\n", "import spacy\n", "\n", "from nltk.corpus import (\n", " brown,\n", " reuters,\n", " stopwords,\n", ")\n", "\n", "# Plots\n", "\n", "from matplotlib import pyplot as plt\n", "\n", "%matplotlib inline" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "[nltk_data] Downloading package brown to /home/alekseev/nltk_data...\n", "[nltk_data] Package brown is already up-to-date!\n", "[nltk_data] Downloading package reuters to /home/alekseev/nltk_data...\n", "[nltk_data] Package reuters is already up-to-date!\n", "[nltk_data] Downloading package stopwords to\n", "[nltk_data] /home/alekseev/nltk_data...\n", "[nltk_data] Package stopwords is already up-to-date!\n" ] }, { "data": { "text/plain": [ "True" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "nltk.download('brown')\n", "nltk.download('reuters')\n", "nltk.download('stopwords')" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: en_core_web_sm==2.2.5 from https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.2.5/en_core_web_sm-2.2.5.tar.gz#egg=en_core_web_sm==2.2.5 in /data/topicnet/lib/python3.6/site-packages (2.2.5)\n", "Requirement already satisfied: spacy>=2.2.2 in /data/topicnet/lib/python3.6/site-packages (from en_core_web_sm==2.2.5) (2.2.4)\n", "Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /data/topicnet/lib/python3.6/site-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (2.0.3)\n", "Requirement already satisfied: tqdm<5.0.0,>=4.38.0 in /data/topicnet/lib/python3.6/site-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (4.45.0)\n", "Requirement already satisfied: wasabi<1.1.0,>=0.4.0 in /data/topicnet/lib/python3.6/site-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (0.6.0)\n", "Requirement already satisfied: thinc==7.4.0 in /data/topicnet/lib/python3.6/site-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (7.4.0)\n", "Requirement already satisfied: srsly<1.1.0,>=1.0.2 in /data/topicnet/lib/python3.6/site-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (1.0.2)\n", "Requirement already satisfied: catalogue<1.1.0,>=0.0.7 in /data/topicnet/lib/python3.6/site-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (1.0.0)\n", "Requirement already satisfied: blis<0.5.0,>=0.4.0 in /data/topicnet/lib/python3.6/site-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (0.4.1)\n", "Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /data/topicnet/lib/python3.6/site-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (1.0.2)\n", "Requirement already satisfied: setuptools in /data/topicnet/lib/python3.6/site-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (46.1.3.post20200330)\n", "Requirement already satisfied: numpy>=1.15.0 in /data/topicnet/lib/python3.6/site-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (1.18.2)\n", "Requirement already satisfied: requests<3.0.0,>=2.13.0 in /data/topicnet/lib/python3.6/site-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (2.23.0)\n", "Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /data/topicnet/lib/python3.6/site-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (3.0.2)\n", "Requirement already satisfied: plac<1.2.0,>=0.9.6 in /data/topicnet/lib/python3.6/site-packages (from spacy>=2.2.2->en_core_web_sm==2.2.5) (1.1.3)\n", "Requirement already satisfied: importlib-metadata>=0.20; python_version < \"3.8\" in /data/topicnet/lib/python3.6/site-packages (from catalogue<1.1.0,>=0.0.7->spacy>=2.2.2->en_core_web_sm==2.2.5) (1.6.0)\n", "Requirement already satisfied: idna<3,>=2.5 in /data/topicnet/lib/python3.6/site-packages (from requests<3.0.0,>=2.13.0->spacy>=2.2.2->en_core_web_sm==2.2.5) (2.9)\n", "Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /data/topicnet/lib/python3.6/site-packages (from requests<3.0.0,>=2.13.0->spacy>=2.2.2->en_core_web_sm==2.2.5) (1.25.9)\n", "Requirement already satisfied: chardet<4,>=3.0.2 in /data/topicnet/lib/python3.6/site-packages (from requests<3.0.0,>=2.13.0->spacy>=2.2.2->en_core_web_sm==2.2.5) (3.0.4)\n", "Requirement already satisfied: certifi>=2017.4.17 in /data/topicnet/lib/python3.6/site-packages (from requests<3.0.0,>=2.13.0->spacy>=2.2.2->en_core_web_sm==2.2.5) (2020.4.5.1)\n", "Requirement already satisfied: zipp>=0.5 in /data/topicnet/lib/python3.6/site-packages (from importlib-metadata>=0.20; python_version < \"3.8\"->catalogue<1.1.0,>=0.0.7->spacy>=2.2.2->en_core_web_sm==2.2.5) (3.1.0)\n", "\u001b[38;5;2m✔ Download and installation successful\u001b[0m\n", "You can now load the model via spacy.load('en_core_web_sm')\n", "\u001b[38;5;2m✔ Linking successful\u001b[0m\n", "/data/topicnet/lib/python3.6/site-packages/en_core_web_sm -->\n", "/data/topicnet/lib/python3.6/site-packages/spacy/data/en\n", "You can now load the model via spacy.load('en')\n" ] } ], "source": [ "! {sys.executable} -m spacy download en" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "# Initializing spacy 'en' model, keeping only tagger component needed for lemmatization\n", "\n", "SPACY_MODEL = spacy.load('en', disable=['parser', 'ner'])\n", "SPACY_PRON = '-PRON-'" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'the stripe bat be hang on -PRON- foot for good'" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# https://webdevblog.ru/podhody-lemmatizacii-s-primerami-v-python/\n", "\n", "\n", "test_sentence = 'The striped bats are hanging on their feet for best'\n", "\n", "# Parse the sentence using the loaded 'en' model object `nlp`\n", "doc = SPACY_MODEL(test_sentence)\n", "\n", "# Extract the lemma for each token and join\n", "' '.join([token.lemma_ for token in doc])" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "STOPWORDS = set(stopwords.words('english'))\n", "PUNCTUATION = string.punctuation" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~'" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "PUNCTUATION" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "# https://github.com/machine-intelligence-laboratory/TopicNet/blob/master/topicnet/demos/Making-Decorrelation-and-Topic-Selection-Friends.ipynb\n", "\n", "\n", "COLOR = (176/255, 0, 0)\n", "FIGSIZE = (15, 10)\n", "LINEWIDTH = 5\n", "LABELPAD = 10\n", "\n", "\n", "# https://en.wikipedia.org/wiki/Zipf%27s_law\n", "\n", "def draw_zipfs_plot(\n", " texts,\n", " low_outlier_percentile_bound=1,\n", " upper_outlier_percentile_bound=99,\n", " ax=None):\n", "\n", " counter = Counter(' '.join(texts).split())\n", " \n", " if ax is None:\n", " fig, ax = plt.subplots(1, 1, figsize=FIGSIZE)\n", " else:\n", " fig = None\n", " \n", " frequencies = [freq for word, freq in counter.most_common()]\n", " low_outlier_bound = np.percentile(frequencies, low_outlier_percentile_bound)\n", " upper_outlier_bound = np.percentile(frequencies, upper_outlier_percentile_bound)\n", " frequencies = [freq for freq in frequencies if freq > low_outlier_bound and freq <= upper_outlier_bound]\n", " \n", " ranks = range(1, len(frequencies) + 1)\n", " \n", " ax.plot(ranks, frequencies, lw=LINEWIDTH, color=COLOR)\n", " \n", " ax.set_xlabel('Rank', labelpad=LABELPAD)\n", " ax.set_ylabel('Frequency', labelpad=LABELPAD)\n", " ax.set_title('Zipf\\'s law')\n", " \n", " if fig is not None:\n", " plt.show()\n", "\n", " return ax\n", "\n", "\n", "# https://en.wikipedia.org/wiki/Heaps%27_law\n", "\n", "def draw_heaps_plot(\n", " texts,\n", " in_symbols=True,\n", " ax=None):\n", "\n", " if in_symbols is True:\n", " lengths = [len(t) for t in texts]\n", " length_units = 'symbols'\n", " else:\n", " lengths = [len(t.split()) for t in texts]\n", " length_units = 'words'\n", "\n", " num_uniques = [len(set(t.split())) for t in texts]\n", " \n", " if ax is None:\n", " fig, ax = plt.subplots(1, 1, figsize=FIGSIZE)\n", " else:\n", " fig = None\n", " \n", " ax.scatter(lengths, num_uniques, s=4 * LINEWIDTH ** 2, color=COLOR)\n", " \n", " ax.set_xlabel(f'Document length ({length_units})', labelpad=LABELPAD)\n", " ax.set_ylabel('Number of unique tokens', labelpad=LABELPAD)\n", " ax.set_title('Heaps\\' law')\n", " \n", " if fig is not None:\n", " plt.show()\n", "\n", " return ax\n", "\n", "\n", "def draw_document_lengths_histogram(\n", " texts,\n", " in_symbols=False,\n", " low_outlier_percentile_bound=1,\n", " upper_outlier_percentile_bound=99,\n", " bins=200,\n", " ax=None):\n", "\n", " if in_symbols is True:\n", " lengths = [len(t) for t in texts]\n", " length_units = 'symbols'\n", " else:\n", " lengths = [len(t.split()) for t in texts]\n", " length_units = 'words'\n", "\n", " low_outlier_bound = np.percentile(lengths, low_outlier_percentile_bound)\n", " upper_outlier_bound = np.percentile(lengths, upper_outlier_percentile_bound)\n", " lengths = [l for l in lengths if l > low_outlier_bound and l <= upper_outlier_bound]\n", " \n", " if ax is None:\n", " fig, ax = plt.subplots(1, 1, figsize=FIGSIZE)\n", " else:\n", " fig = None\n", " \n", " ax.hist(lengths, bins=bins, color=COLOR)\n", " \n", " ax.set_xlabel(f'Document length ({length_units})', labelpad=LABELPAD)\n", " ax.set_ylabel('Number of documents', labelpad=LABELPAD)\n", " ax.set_title('Document lengths distribution')\n", " \n", " if fig is not None:\n", " plt.show()\n", "\n", " return ax\n", "\n", "\n", "def draw_statistics(texts, figsize=(20, 15), font_size=16):\n", " old_font_size = plt.rcParams['font.size']\n", " plt.rcParams.update({'font.size': font_size})\n", "\n", " fig, axes = plt.subplots(2, 2, figsize=figsize)\n", " \n", " draw_zipfs_plot(\n", " texts,\n", " ax=axes[0][0]\n", " )\n", " draw_document_lengths_histogram(\n", " texts,\n", " ax=axes[0][1]\n", " )\n", " draw_heaps_plot(\n", " texts,\n", " ax=axes[1][0],\n", " in_symbols=True\n", " )\n", " draw_heaps_plot(\n", " texts,\n", " ax=axes[1][1],\n", " in_symbols=False\n", " )\n", " \n", " plt.show()\n", " \n", " plt.rcParams.update({'font.size': old_font_size})" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Brown\n", "\n", "