repo_name
stringlengths 7
90
| path
stringlengths 4
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 762
838k
| license
stringclasses 15
values |
---|---|---|---|---|---|
gfyoung/pandas | asv_bench/benchmarks/hash_functions.py | 2 | 4253 | import numpy as np
import pandas as pd
class IsinAlmostFullWithRandomInt:
params = [
[np.float64, np.int64, np.uint64, np.object],
range(10, 21),
]
param_names = ["dtype", "exponent"]
def setup(self, dtype, exponent):
M = 3 * 2 ** (exponent - 2)
# 0.77-the maximal share of occupied buckets
np.random.seed(42)
self.s = pd.Series(np.random.randint(0, M, M)).astype(dtype)
self.values = np.random.randint(0, M, M).astype(dtype)
self.values_outside = self.values + M
def time_isin(self, dtype, exponent):
self.s.isin(self.values)
def time_isin_outside(self, dtype, exponent):
self.s.isin(self.values_outside)
class IsinWithRandomFloat:
params = [
[np.float64, np.object],
[
1_300,
2_000,
7_000,
8_000,
70_000,
80_000,
750_000,
900_000,
],
]
param_names = ["dtype", "M"]
def setup(self, dtype, M):
np.random.seed(42)
self.values = np.random.rand(M)
self.s = pd.Series(self.values).astype(dtype)
np.random.shuffle(self.values)
self.values_outside = self.values + 0.1
def time_isin(self, dtype, M):
self.s.isin(self.values)
def time_isin_outside(self, dtype, M):
self.s.isin(self.values_outside)
class IsinWithArangeSorted:
params = [
[np.float64, np.int64, np.uint64, np.object],
[
1_000,
2_000,
8_000,
100_000,
1_000_000,
],
]
param_names = ["dtype", "M"]
def setup(self, dtype, M):
self.s = pd.Series(np.arange(M)).astype(dtype)
self.values = np.arange(M).astype(dtype)
def time_isin(self, dtype, M):
self.s.isin(self.values)
class IsinWithArange:
params = [
[np.float64, np.int64, np.uint64, np.object],
[
1_000,
2_000,
8_000,
],
[-2, 0, 2],
]
param_names = ["dtype", "M", "offset_factor"]
def setup(self, dtype, M, offset_factor):
offset = int(M * offset_factor)
np.random.seed(42)
tmp = pd.Series(np.random.randint(offset, M + offset, 10 ** 6))
self.s = tmp.astype(dtype)
self.values = np.arange(M).astype(dtype)
def time_isin(self, dtype, M, offset_factor):
self.s.isin(self.values)
class Float64GroupIndex:
# GH28303
def setup(self):
self.df = pd.date_range(
start="1/1/2018", end="1/2/2018", periods=10 ** 6
).to_frame()
self.group_index = np.round(self.df.index.astype(int) / 10 ** 9)
def time_groupby(self):
self.df.groupby(self.group_index).last()
class UniqueAndFactorizeArange:
params = range(4, 16)
param_names = ["exponent"]
def setup(self, exponent):
a = np.arange(10 ** 4, dtype="float64")
self.a2 = (a + 10 ** exponent).repeat(100)
def time_factorize(self, exponent):
pd.factorize(self.a2)
def time_unique(self, exponent):
pd.unique(self.a2)
class NumericSeriesIndexing:
params = [
(pd.Int64Index, pd.UInt64Index, pd.Float64Index),
(10 ** 4, 10 ** 5, 5 * 10 ** 5, 10 ** 6, 5 * 10 ** 6),
]
param_names = ["index_dtype", "N"]
def setup(self, index, N):
vals = np.array(list(range(55)) + [54] + list(range(55, N - 1)))
indices = index(vals)
self.data = pd.Series(np.arange(N), index=indices)
def time_loc_slice(self, index, N):
# trigger building of mapping
self.data.loc[:800]
class NumericSeriesIndexingShuffled:
params = [
(pd.Int64Index, pd.UInt64Index, pd.Float64Index),
(10 ** 4, 10 ** 5, 5 * 10 ** 5, 10 ** 6, 5 * 10 ** 6),
]
param_names = ["index_dtype", "N"]
def setup(self, index, N):
vals = np.array(list(range(55)) + [54] + list(range(55, N - 1)))
np.random.seed(42)
np.random.shuffle(vals)
indices = index(vals)
self.data = pd.Series(np.arange(N), index=indices)
def time_loc_slice(self, index, N):
# trigger building of mapping
self.data.loc[:800]
| bsd-3-clause |
cdegroc/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 4 | 2890 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from numpy.testing import assert_equal
from scipy.spatial import distance
from sklearn.cluster.dbscan_ import DBSCAN, dbscan
from .common import generate_clustered_data
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
"""Tests the DBSCAN algorithm with a similarity array."""
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed",
eps=eps, min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed")
labels = db.fit(D, eps=eps, min_samples=min_samples).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
"""Tests the DBSCAN algorithm with a feature vector array."""
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric,
eps=eps, min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric)
labels = db.fit(X, eps=eps, min_samples=min_samples).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_callable():
"""Tests the DBSCAN algorithm with a callable metric."""
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric,
eps=eps, min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric)
labels = db.fit(X, eps=eps, min_samples=min_samples).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
| bsd-3-clause |
sangwook236/general-development-and-testing | sw_dev/python/rnd/test/machine_learning/saliency_test.py | 2 | 12493 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# REF [site] >> https://github.com/PAIR-code/saliency
import os, time
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import saliency
from matplotlib import pylab as plt
import PIL.Image
# Boilerplate methods.
def ShowImage(im, title='', ax=None):
if ax is None:
plt.figure()
plt.axis('off')
im = ((im + 1) * 127.5).astype(np.uint8)
plt.imshow(im)
plt.title(title)
def ShowGrayscaleImage(im, title='', ax=None):
if ax is None:
plt.figure()
plt.axis('off')
plt.imshow(im, cmap=plt.cm.gray, vmin=0, vmax=1)
plt.title(title)
def ShowHeatMap(im, title, ax=None):
if ax is None:
plt.figure()
plt.axis('off')
plt.imshow(im, cmap=plt.cm.inferno)
plt.title(title)
def ShowDivergingImage(grad, title='', percentile=99, ax=None):
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.figure
plt.axis('off')
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
im = ax.imshow(grad, cmap=plt.cm.coolwarm, vmin=-1, vmax=1)
fig.colorbar(im, cax=cax, orientation='vertical')
plt.title(title)
def LoadImage(file_path):
im = PIL.Image.open(file_path)
im = np.asarray(im)
return im / 127.5 - 1.0
# REF [site] >> https://github.com/PAIR-code/saliency/blob/master/Examples.ipynb
def simple_example():
#--------------------
mnist = input_data.read_data_sets('./MNIST_data', one_hot=True)
#--------------------
# Define a model.
num_classes = 10
input_shape = (None, 28, 28, 1) # 784 = 28 * 28.
output_shape = (None, num_classes)
input_ph = tf.placeholder(tf.float32, shape=input_shape, name='input_ph')
output_ph = tf.placeholder(tf.float32, shape=output_shape, name='output_ph')
with tf.variable_scope('conv1', reuse=tf.AUTO_REUSE):
conv1 = tf.layers.conv2d(input_ph, 32, 5, activation=tf.nn.relu, name='conv')
conv1 = tf.layers.max_pooling2d(conv1, 2, 2, name='maxpool')
with tf.variable_scope('conv2', reuse=tf.AUTO_REUSE):
conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu, name='conv')
conv2 = tf.layers.max_pooling2d(conv2, 2, 2, name='maxpool')
with tf.variable_scope('fc1', reuse=tf.AUTO_REUSE):
fc1 = tf.layers.flatten(conv2, name='flatten')
fc1 = tf.layers.dense(fc1, 1024, activation=tf.nn.relu, name='dense')
with tf.variable_scope('fc2', reuse=tf.AUTO_REUSE):
model_output = tf.layers.dense(fc1, num_classes, activation=tf.nn.softmax, name='dense')
#--------------------
# Train.
loss = tf.reduce_mean(-tf.reduce_sum(output_ph * tf.log(model_output), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print('Start training...')
start_time = time.time()
for _ in range(2000):
batch_xs, batch_ys = mnist.train.next_batch(512)
batch_xs = np.reshape(batch_xs, (-1,) + input_shape[1:])
sess.run(train_step, feed_dict={input_ph: batch_xs, output_ph: batch_ys})
if 0 == idx % 100: print('.', end='', flush=True)
print()
print('End training: {} secs.'.format(time.time() - start_time))
#--------------------
# Evaluate.
correct_prediction = tf.equal(tf.argmax(model_output, 1), tf.argmax(output_ph, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Start testing...')
acc = sess.run(accuracy, feed_dict={input_ph: np.reshape(mnist.test.images, (-1,) + input_shape[1:]), output_ph: mnist.test.labels})
print('Test accuracy = {}.'.format(acc))
print('End testing: {} secs.'.format(time.time() - start_time))
if acc < 0.95:
print('Failed to train...')
return
#--------------------
# Visualize.
images = np.reshape(mnist.test.images, (-1,) + input_shape[1:])
img = images[0]
minval, maxval = np.min(img), np.max(img)
img_scaled = np.squeeze((img - minval) / (maxval - minval), axis=-1)
# Construct the scalar neuron tensor.
logits = model_output
neuron_selector = tf.placeholder(tf.int32)
y = logits[0][neuron_selector]
# Construct a tensor for predictions.
prediction = tf.argmax(logits, 1)
# Make a prediction.
prediction_class = sess.run(prediction, feed_dict={input_ph: [img]})[0]
#--------------------
start_time = time.time()
saliency_obj = saliency.Occlusion(sess.graph, sess, y, input_ph)
print('Occlusion: {} secs.'.format(time.time() - start_time))
# NOTE [info] >> An error exists in GetMask() of ${Saliency_HOME}/saliency/occlusion.py.
# <before>
# occlusion_window = np.array([size, size, x_value.shape[2]])
# occlusion_window.fill(value)
# <after>
# occlusion_window = np.full([size, size, x_value.shape[2]], value)
mask_3d = saliency_obj.GetMask(img, feed_dict={neuron_selector: prediction_class})
# Compute a 2D tensor for visualization.
mask_gray = saliency.VisualizeImageGrayscale(mask_3d)
mask_div = saliency.VisualizeImageDiverging(mask_3d)
fig = plt.figure()
ax = plt.subplot(1, 3, 1)
ax.imshow(img_scaled, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Input')
ax = plt.subplot(1, 3, 2)
ax.imshow(mask_gray, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Grayscale')
ax = plt.subplot(1, 3, 3)
ax.imshow(mask_div, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Diverging')
fig.suptitle('Occlusion', fontsize=16)
fig.tight_layout()
#plt.savefig('./saliency_occlusion.png')
plt.show()
#--------------------
start_time = time.time()
conv_layer = sess.graph.get_tensor_by_name('conv2/conv/BiasAdd:0')
saliency_obj = saliency.GradCam(sess.graph, sess, y, input_ph, conv_layer)
print('GradCam: {} secs.'.format(time.time() - start_time))
mask_3d = saliency_obj.GetMask(img, feed_dict={neuron_selector: prediction_class})
# Compute a 2D tensor for visualization.
mask_gray = saliency.VisualizeImageGrayscale(mask_3d)
mask_div = saliency.VisualizeImageDiverging(mask_3d)
fig = plt.figure()
ax = plt.subplot(1, 3, 1)
ax.imshow(img_scaled, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Input')
ax = plt.subplot(1, 3, 2)
ax.imshow(mask_gray, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Grayscale')
ax = plt.subplot(1, 3, 3)
ax.imshow(mask_div, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Diverging')
fig.suptitle('Grad-CAM', fontsize=16)
fig.tight_layout()
#plt.savefig('./saliency_gradcam.png')
plt.show()
#--------------------
start_time = time.time()
saliency_obj = saliency.GradientSaliency(sess.graph, sess, y, input_ph)
print('GradientSaliency: {} secs.'.format(time.time() - start_time))
vanilla_mask_3d = saliency_obj.GetMask(img, feed_dict={neuron_selector: prediction_class})
smoothgrad_mask_3d = saliency_obj.GetSmoothedMask(img, feed_dict={neuron_selector: prediction_class})
# Compute a 2D tensor for visualization.
vanilla_mask_gray = saliency.VisualizeImageGrayscale(vanilla_mask_3d)
smoothgrad_mask_gray = saliency.VisualizeImageGrayscale(smoothgrad_mask_3d)
vanilla_mask_div = saliency.VisualizeImageDiverging(vanilla_mask_3d)
smoothgrad_mask_div = saliency.VisualizeImageDiverging(smoothgrad_mask_3d)
fig = plt.figure()
ax = plt.subplot(2, 3, 1)
ax.imshow(img_scaled, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Input')
ax = plt.subplot(2, 3, 2)
ax.imshow(vanilla_mask_gray, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Vanilla Grayscale')
ax = plt.subplot(2, 3, 3)
ax.imshow(smoothgrad_mask_gray, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('SmoothGrad Grayscale')
ax = plt.subplot(2, 3, 5)
ax.imshow(vanilla_mask_div, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Vanilla Diverging')
ax = plt.subplot(2, 3, 6)
ax.imshow(smoothgrad_mask_div, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('SmoothGrad Diverging')
fig.suptitle('Gradient Saliency', fontsize=16)
fig.tight_layout()
#plt.savefig('./saliency_gradientsaliency.png')
plt.show()
#--------------------
start_time = time.time()
saliency_obj = saliency.GuidedBackprop(sess.graph, sess, y, input_ph)
print('GuidedBackprop: {} secs.'.format(time.time() - start_time))
vanilla_mask_3d = saliency_obj.GetMask(img, feed_dict={neuron_selector: prediction_class})
smoothgrad_mask_3d = saliency_obj.GetSmoothedMask(img, feed_dict={neuron_selector: prediction_class})
# Compute a 2D tensor for visualization.
vanilla_mask_gray = saliency.VisualizeImageGrayscale(vanilla_mask_3d)
smoothgrad_mask_gray = saliency.VisualizeImageGrayscale(smoothgrad_mask_3d)
vanilla_mask_div = saliency.VisualizeImageDiverging(vanilla_mask_3d)
smoothgrad_mask_div = saliency.VisualizeImageDiverging(smoothgrad_mask_3d)
fig = plt.figure()
ax = plt.subplot(2, 3, 1)
ax.imshow(img_scaled, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Input')
ax = plt.subplot(2, 3, 2)
ax.imshow(vanilla_mask_gray, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Vanilla Grayscale')
ax = plt.subplot(2, 3, 3)
ax.imshow(smoothgrad_mask_gray, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('SmoothGrad Grayscale')
ax = plt.subplot(2, 3, 4)
ax.imshow(vanilla_mask_div, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Vanilla Diverging')
ax = plt.subplot(2, 3, 5)
ax.imshow(smoothgrad_mask_div, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('SmoothGrad Diverging')
fig.suptitle('Guided Backprop', fontsize=16)
fig.tight_layout()
#plt.savefig('./saliency_guidedbackprop.png')
plt.show()
#--------------------
start_time = time.time()
saliency_obj = saliency.IntegratedGradients(sess.graph, sess, y, input_ph)
print('IntegratedGradients: {} secs.'.format(time.time() - start_time))
vanilla_mask_3d = saliency_obj.GetMask(img, feed_dict={neuron_selector: prediction_class})
smoothgrad_mask_3d = saliency_obj.GetSmoothedMask(img, feed_dict={neuron_selector: prediction_class})
# Compute a 2D tensor for visualization.
vanilla_mask_gray = saliency.VisualizeImageGrayscale(vanilla_mask_3d)
smoothgrad_mask_gray = saliency.VisualizeImageGrayscale(smoothgrad_mask_3d)
vanilla_mask_div = saliency.VisualizeImageDiverging(vanilla_mask_3d)
smoothgrad_mask_div = saliency.VisualizeImageDiverging(smoothgrad_mask_3d)
fig = plt.figure()
ax = plt.subplot(2, 3, 1)
ax.imshow(img_scaled, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Input')
ax = plt.subplot(2, 3, 2)
ax.imshow(vanilla_mask_gray, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Vanilla Grayscale')
ax = plt.subplot(2, 3, 3)
ax.imshow(smoothgrad_mask_gray, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('SmoothGrad Grayscale')
ax = plt.subplot(2, 3, 4)
ax.imshow(vanilla_mask_div, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Vanilla Diverging')
ax = plt.subplot(2, 3, 5)
ax.imshow(smoothgrad_mask_div, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('SmoothGrad Diverging')
fig.suptitle('Integrated Gradients', fontsize=16)
fig.tight_layout()
#plt.savefig('./saliency_integratedgradients.png')
plt.show()
#--------------------
start_time = time.time()
xrai_obj = saliency.XRAI(sess.graph, sess, y, input_ph)
print('XRAI: {} secs.'.format(time.time() - start_time))
if True:
xrai_attributions = xrai_obj.GetMask(img, feed_dict={neuron_selector: prediction_class})
else:
# Create XRAIParameters and set the algorithm to fast mode which will produce an approximate result.
xrai_params = saliency.XRAIParameters()
xrai_params.algorithm = 'fast'
xrai_attributions_fast = xrai_obj.GetMask(img, feed_dict={neuron_selector: prediction_class}, extra_parameters=xrai_params)
# Show most salient 30% of the image.
mask = xrai_attributions > np.percentile(xrai_attributions, 70)
img_masked = img_scaled.copy()
img_masked[~mask] = 0
fig = plt.figure()
ax = plt.subplot(1, 3, 1)
ax.imshow(img_scaled, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Input')
ax = plt.subplot(1, 3, 2)
ax.imshow(xrai_attributions, cmap=plt.cm.inferno)
ax.axis('off')
ax.set_title('XRAI Attributions')
ax = plt.subplot(1, 3, 3)
ax.imshow(img_masked, cmap=plt.cm.gray)
ax.axis('off')
ax.set_title('Masked Input')
fig.suptitle('XRAI', fontsize=16)
fig.tight_layout()
#plt.savefig('./saliency_xrai.png')
plt.show()
def main():
simple_example()
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
| gpl-2.0 |
xbenjox/CryptoTrade | mainui.py | 1 | 21685 | import threading
import datetime
import time
from collections import Counter
from tkinter import *
from Cryptsy import Cryptsy
from CoinDesk import CoinDesk
from chartui import ChartUI
from orderbookui import TradeHistUI
from capitalui import CapitalUI
from balancesui import BalanceUI
from marketoverviewui import MarketOverviewUI
from settingsui import SettingsUI
from dataui import DataUI
import matplotlib.finance
from finindicator import FinStrategy
import xml.etree.ElementTree as ET
class MainFrame(Frame):
BIG_FONT = 12
SMALL_FONT = 8
pubKey = ""
privKey = ""
markets = [473, 120, 3, 454, 132, 155]
c = NONE
lblPointsRSI = NONE
lblZiftPrice = None
strBTCValue = NONE
def __init__(self, master=None):
Frame.__init__(self, master)
self.finish = FALSE
self.master.title("Cryto Currency Trader.")
self.pack()
self.createWidgets()
# Load Cryptsy Keys
try:
self.tree = ET.parse('Data/settings.xml')
root = self.tree.getroot()
for keys in root.findall('keys'):
pubKey = keys.find('public').text
privKey = keys.find('private').text
except FileNotFoundError:
print("Settings File Not Found!!!")
self.c = Cryptsy(str(pubKey), str(privKey))
self.cd = CoinDesk()
self.btcPrice = self.cd.getPrice()
self.fs = FinStrategy()
# Get Currencies
try:
self.currencies = self.c.currencies()
#print("Currencies: " + str(self.currencies))
except:
print("Currency Exception.")
# Get market data, including last trade prices
try:
self.marketData = self.c.markets()
self.last_trade_prices = {}
for market in self.marketData['data']:
self.last_trade_prices[market['label']] = market['last_trade']['price']
if market['id'] == '473':
#print(market['24hr'])
ziftLastTrade = market['last_trade']['price']
self.lblZiftPrice['text'] = "{:.8f}".format(ziftLastTrade)
elif market['id'] == '120':
pointsLastTrade = "{:.8f}".format(market['last_trade']['price'])
self.lblPointsPrice['text'] = pointsLastTrade
elif market['id'] == '3':
ltcLastTrade = "{:.8f}".format(market['last_trade']['price'])
self.lblLTCPrice['text'] = ltcLastTrade
elif market['id'] == '454':
xrpLastTrade = "{:.8f}".format(market['last_trade']['price'])
self.lblXRPPrice['text'] = xrpLastTrade
elif market['id'] == '132':
dogeLastTrade = "{:.8f}".format(market['last_trade']['price'])
self.lblDOGPrice['text'] = dogeLastTrade
elif market['id'] == '119':
dshLastTrade = "{:.8f}".format(market['last_trade']['price'])
self.lblDSHPrice['text'] = dshLastTrade
except KeyError:
print("No Market Data")
# Get Balances
try:
self.balances = self.c.balances()
availableBalance = self.balances['data']['available']
#print(availableBalance)
heldBalance = self.balances['data']['held']
#print("Available Balances: ")
#print(availableBalance)
# Calculate Gross Balances
self.gross_balances = Counter()
self.gross_balances.update(availableBalance)
self.gross_balances.update(heldBalance)
ziftValue = availableBalance['275'] * ziftLastTrade
pointsValue = availableBalance['89'] * float(pointsLastTrade)
dogeValue = self.gross_balances['94'] * float(dogeLastTrade)
ltcValue = self.gross_balances['2'] * float(ltcLastTrade)
xrpValue = self.gross_balances['240'] * float(xrpLastTrade)
self.lblBalBTC["text"] = "Bitcoin: "
self.lblVolBTC["text"] = str(availableBalance['3'])
self.lblValBTC["text"] = str(availableBalance['3'])
self.lblInvBTC["text"] = str(availableBalance['3'] * self.fs.risk)
self.lblBalXRP["text"] = "Ripple: "
self.lblVolXRP["text"] = str(availableBalance['240'])
self.lblValXRP["text"] = str(xrpValue)
self.lblBalLTC["text"] = "Litecoin: "
self.lblVolLTC["text"] = str(availableBalance['2'])
self.lblValLTC["text"] = str(ltcValue)
self.lblBalDSH["text"] = "Dashcoin: "
self.lblVolDSH["text"] = str(availableBalance['2'])
self.lblValDSH["text"] = str(availableBalance['2'])
self.lblBalDOG["text"] = "Dogecoin: "
self.lblVolDOG["text"] = str(self.gross_balances['94'])
self.lblValDOG["text"] = str(dogeValue)
self.lblBalZift["text"] = "ZiftrCoin: "
self.lblVolZift['text'] = str(availableBalance['275'])
self.lblValZift['text'] = str(ziftValue)
self.lblBalPoints["text"] = "Points: "
self.lblVolPoints['text'] = str(availableBalance['89'])
self.lblValPoints['text'] = str(pointsValue)
self.lblBTCValue['text'] = str(self.btcPrice)
self.lblTotalBal["text"] = "{:.4f}".format(availableBalance['3'] + ziftValue + pointsValue + dogeValue + ltcValue + xrpValue) + " BTC"
self.lblTotalVal["text"] = "{:.2f}".format((availableBalance['3'] + ziftValue + pointsValue + dogeValue + ltcValue + xrpValue) * self.btcPrice) + " GBP"
#self.updateThread = threading.Thread(target= self.update)
#self.updateThread.start()
except KeyError:
print("No Balance Data")
return
def createWidgets(self):
# Menu
menubar = Menu(self)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="Exit", command=self.quit)
menubar.add_cascade(label="File", menu=filemenu)
settingsmenu = Menu(menubar, tearoff=0)
settingsmenu.add_command(label="Settings...", command=self.settings_gui)
menubar.add_cascade(label="Settings", menu=settingsmenu)
self.master.config(menu=menubar)
# Status Frame
self.statusFrame()
# Markets
self.coins_marketFrame()
# Market Overview Chart
self.btnMarketOverview = Button(self)
self.btnMarketOverview['text'] = "Overview"
self.btnMarketOverview['command'] = self.marketOverview
self.btnMarketOverview.grid({"row": "1", "column":"3", "columnspan":"1"})
# Balances
self.coins_balFrame()
self.btnBalanceDetail = Button(self)
self.btnBalanceDetail['text'] = "Balance Detail"
self.btnBalanceDetail['command'] = self.balanceDetail
self.btnBalanceDetail.grid({"row": "20", "column":"3", "columnspan":"1"})
# Values
self.total_balFrame()
# Update Button
self.btnUpdate = Button(self)
self.btnUpdate["text"] = "Update"
self.btnUpdate["command"] = self.update
self.btnUpdate.grid({"row": "50", "column": "0"})
# Capital Button
self.btnCapital = Button(self)
self.btnCapital["text"] = "Capital"
self.btnCapital["command"] = self.Capital
self.btnCapital.grid({"row": "50", "column": "1"})
# Data Button
self.btnData = Button(self)
self.btnData["text"] = "Data"
self.btnData["command"] = self.Data
self.btnData.grid({"row": "50", "column": "2"})
# Quit Button
self.QUIT = Button(self)
self.QUIT["text"] = "Quit"
self.QUIT["fg"] = "red"
self.QUIT["command"] = self.exit
self.QUIT.grid({"row": "50", "column":"3"})
return
def statusFrame(self):
self.statusLblFrame = LabelFrame(self)
self.statusLblFrame["text"] = "API Status"
self.statusLblFrame.grid({"row": "0", "column":"0", "columnspan":"3"})
self.lblCrypsyAPI = Label(self.statusLblFrame)
self.lblCrypsyAPI["text"] = ""
self.lblCrypsyAPI.grid({"row": "0", "column":"0"})
return
def coins_marketFrame(self):
self.marketsLblFrame = LabelFrame(self)
self.marketsLblFrame["text"] = "Markets"
self.marketsLblFrame.grid({"row": "1", "column":"0", "columnspan":"2"})
self.lblXRP = Label(self.marketsLblFrame)
self.lblXRP["text"] = "Ripple"
self.lblXRP.grid({"row": "0"})
self.lblXRPPrice = Label(self.marketsLblFrame)
self.lblXRPPrice["text"] = "Price: xx"
self.lblXRPPrice.grid({"row": "0", "column":"1"})
self.btnXRPChart = Button(self.marketsLblFrame)
self.btnXRPChart["text"] = "Chart"
self.btnXRPChart["command"] = lambda: self.Chart(454)
self.btnXRPChart.grid({"row": "0", "column":"2"})
self.btnXRPOrders = Button(self.marketsLblFrame)
self.btnXRPOrders["text"] = "Order Book"
self.btnXRPOrders["command"] = lambda: self.OrderBook(275, 454)
self.btnXRPOrders.grid({"row": "0", "column":"3"})
self.lblLTC = Label(self.marketsLblFrame)
self.lblLTC["text"] = "Litecoin"
self.lblLTC.grid({"row": "1"})
self.lblLTCPrice = Label(self.marketsLblFrame)
self.lblLTCPrice["text"] = "Price: xx"
self.lblLTCPrice.grid({"row": "1", "column":"1"})
self.btnLTCChart = Button(self.marketsLblFrame)
self.btnLTCChart["text"] = "Chart"
self.btnLTCChart["command"] = lambda: self.Chart(3)
self.btnLTCChart.grid({"row": "1", "column":"2"})
self.btnLTCOrders = Button(self.marketsLblFrame)
self.btnLTCOrders["text"] = "Order Book"
self.btnLTCOrders["command"] = lambda: self.OrderBook(275, 3)
self.btnLTCOrders.grid({"row": "1", "column":"3"})
self.lblDSH = Label(self.marketsLblFrame)
self.lblDSH["text"] = "Dashcoin"
self.lblDSH.grid({"row": "2"})
self.lblDSHPrice = Label(self.marketsLblFrame)
self.lblDSHPrice["text"] = "Price: xx"
self.lblDSHPrice.grid({"row": "2", "column":"1"})
self.btnDSHChart = Button(self.marketsLblFrame)
self.btnDSHChart["text"] = "Chart"
self.btnDSHChart["command"] = lambda: self.Chart(119)
self.btnDSHChart.grid({"row": "2", "column":"2"})
self.btnDSHOrders = Button(self.marketsLblFrame)
self.btnDSHOrders["text"] = "Order Book"
self.btnDSHOrders["command"] = lambda: self.OrderBook(275, 119)
self.btnDSHOrders.grid({"row": "2", "column":"3"})
self.lblDOG = Label(self.marketsLblFrame)
self.lblDOG["text"] = "Dogecoin"
self.lblDOG.grid({"row": "3"})
self.lblDOGPrice = Label(self.marketsLblFrame)
self.lblDOGPrice["text"] = "Price: xx"
self.lblDOGPrice.grid({"row": "3", "column":"1"})
self.btnDOGChart = Button(self.marketsLblFrame)
self.btnDOGChart["text"] = "Chart"
self.btnDOGChart["command"] = lambda: self.Chart(132)
self.btnDOGChart.grid({"row": "3", "column":"2"})
self.btnDOGOrders = Button(self.marketsLblFrame)
self.btnDOGOrders["text"] = "Order Book"
self.btnDOGOrders["command"] = lambda: self.OrderBook(275, 132)
self.btnDOGOrders.grid({"row": "3", "column":"3"})
self.lblZift = Label(self.marketsLblFrame)
self.lblZift["text"] = "ZiftrCoin"
self.lblZift.grid({"row": "4"})
self.lblZiftPrice = Label(self.marketsLblFrame)
self.lblZiftPrice["text"] = "Price: xx"
self.lblZiftPrice.grid({"row": "4", "column":"1"})
self.btnZiftChart = Button(self.marketsLblFrame)
self.btnZiftChart["text"] = "Chart"
self.btnZiftChart["command"] = lambda: self.Chart(473)
self.btnZiftChart.grid({"row": "4", "column":"2"})
self.btnZiftOrders = Button(self.marketsLblFrame)
self.btnZiftOrders["text"] = "Order Book"
self.btnZiftOrders["command"] = lambda: self.OrderBook(275, 473)
self.btnZiftOrders.grid({"row": "4", "column":"3"})
self.lblPoints = Label(self.marketsLblFrame)
self.lblPoints["text"] = "Points"
self.lblPoints.grid({"row": "5"})
self.lblPointsPrice = Label(self.marketsLblFrame)
self.lblPointsPrice["text"] = "Price: xx"
self.lblPointsPrice.grid({"row": "5", "column":"1"})
self.btnPointsChart = Button(self.marketsLblFrame)
self.btnPointsChart["text"] = "Chart"
self.btnPointsChart["command"] = lambda: self.Chart(120)
self.btnPointsChart.grid({"row": "5", "column":"2"})
self.btnPointsOrders = Button(self.marketsLblFrame)
self.btnPointsOrders["text"] = "Order Book"
self.btnPointsOrders["command"] = lambda: self.OrderBook(275, 120)
self.btnPointsOrders.grid({"row": "5", "column":"3"})
return
def total_balFrame(self):
self.balLblFrame = LabelFrame(self)
self.balLblFrame["text"] = "Total Value"
self.balLblFrame.grid({"row": "30", "column":"0", "columnspan":"4"})
self.lblTotalBal = Label(self.balLblFrame)
self.lblTotalBal["text"] = ""
self.lblTotalBal.grid({"row": "0", "column":"1"})
self.lblBTCValue = Label(self.balLblFrame)
self.lblBTCValue['text'] = '0'
self.lblBTCValue.grid({"row": "1", "column":"0"})
self.lblTotalVal = Label(self.balLblFrame)
self.lblTotalVal["text"] = ""
self.lblTotalVal["fg"] = "green"
self.lblTotalVal.grid({"row": "1", "column":"1"})
return
def coins_balFrame(self):
self.coinsbalLblFrame = LabelFrame(self)
self.coinsbalLblFrame["text"] = "Coin Balances"
self.coinsbalLblFrame["font"] = self.BIG_FONT
self.coinsbalLblFrame.grid({"row": "20", "column": "0", "columnspan":"3"})
self.lblHdrCurrency = Label(self.coinsbalLblFrame)
self.lblHdrCurrency["text"] = "Currency"
self.lblHdrCurrency.grid({"row": "1", "column":"0"})
self.lblHdrVolume = Label(self.coinsbalLblFrame)
self.lblHdrVolume["text"] = "Volume"
self.lblHdrVolume.grid({"row": "1", "column":"1"})
self.lblHdrValue = Label(self.coinsbalLblFrame)
self.lblHdrValue["text"] = "Value"
self.lblHdrValue.grid({"row": "1", "column":"2"})
self.lblHdrInvestable = Label(self.coinsbalLblFrame)
self.lblHdrInvestable["text"] = "Investable"
self.lblHdrInvestable.grid({"row": "1", "column":"3"})
self.lblHdrInvestable = Label(self.coinsbalLblFrame)
self.lblHdrInvestable["text"] = "Investable"
self.lblHdrInvestable.grid({"row": "1", "column":"3"})
self.lblBalBTC = Label(self.coinsbalLblFrame)
self.lblBalBTC["text"] = "Bitcoins"
self.lblBalBTC.grid({"row": "2", "column":"0"})
self.lblVolBTC = Label(self.coinsbalLblFrame)
self.lblVolBTC["text"] = ""
self.lblVolBTC.grid({"row": "2", "column":"1"})
self.lblValBTC = Label(self.coinsbalLblFrame)
self.lblValBTC["text"] = ""
self.lblValBTC.grid({"row": "2", "column":"2"})
self.lblInvBTC = Label(self.coinsbalLblFrame)
self.lblInvBTC["text"] = ""
self.lblInvBTC.grid({"row": "2", "column":"3"})
self.lblBalXRP = Label(self.coinsbalLblFrame)
self.lblBalXRP["text"] = "Ripple"
self.lblBalXRP.grid({"row": "3", "column":"0"})
self.lblVolXRP = Label(self.coinsbalLblFrame)
self.lblVolXRP["text"] = ""
self.lblVolXRP.grid({"row": "3", "column":"1"})
self.lblValXRP = Label(self.coinsbalLblFrame)
self.lblValXRP["text"] = ""
self.lblValXRP.grid({"row": "3", "column":"2"})
self.lblBalLTC = Label(self.coinsbalLblFrame)
self.lblBalLTC["text"] = "Litecoins"
self.lblBalLTC.grid({"row": "4", "column":"0"})
self.lblVolLTC = Label(self.coinsbalLblFrame)
self.lblVolLTC["text"] = ""
self.lblVolLTC.grid({"row": "4", "column":"1"})
self.lblValLTC = Label(self.coinsbalLblFrame)
self.lblValLTC["text"] = ""
self.lblValLTC.grid({"row": "4", "column":"2"})
self.lblBalDSH = Label(self.coinsbalLblFrame)
self.lblBalDSH["text"] = "Bitshares"
self.lblBalDSH.grid({"row": "5", "column":"0"})
self.lblVolDSH = Label(self.coinsbalLblFrame)
self.lblVolDSH["text"] = ""
self.lblVolDSH.grid({"row": "5", "column":"1"})
self.lblValDSH = Label(self.coinsbalLblFrame)
self.lblValDSH["text"] = ""
self.lblValDSH.grid({"row": "5", "column":"2"})
self.lblBalDOG = Label(self.coinsbalLblFrame)
self.lblBalDOG["text"] = "Dogecoins"
self.lblBalDOG.grid({"row": "6", "column":"0"})
self.lblVolDOG = Label(self.coinsbalLblFrame)
self.lblVolDOG["text"] = ""
self.lblVolDOG.grid({"row": "6", "column":"1"})
self.lblValDOG = Label(self.coinsbalLblFrame)
self.lblValDOG["text"] = ""
self.lblValDOG.grid({"row": "6", "column":"2"})
self.lblBalZift = Label(self.coinsbalLblFrame)
self.lblBalZift["text"] = "ZiftrCoin"
self.lblBalZift.grid({"row": "7", "column":"0"})
self.lblVolZift = Label(self.coinsbalLblFrame)
self.lblVolZift["text"] = ""
self.lblVolZift.grid({"row": "7", "column":"1"})
self.lblValZift = Label(self.coinsbalLblFrame)
self.lblValZift["text"] = ""
self.lblValZift.grid({"row": "7", "column":"2"})
self.lblBalPoints = Label(self.coinsbalLblFrame)
self.lblBalPoints["text"] = "Points"
self.lblBalPoints.grid({"row": "8", "column":"0"})
self.lblVolPoints = Label(self.coinsbalLblFrame)
self.lblVolPoints["text"] = ""
self.lblVolPoints.grid({"row": "8", "column":"1"})
self.lblValPoints = Label(self.coinsbalLblFrame)
self.lblValPoints["text"] = ""
self.lblValPoints.grid({"row": "8", "column":"2"})
return
def Chart(self, mid):
for m in self.marketData['data']:
if m['id'] == str(mid):
title = m['label']
chart = ChartUI(self,mid,self.c, self.fs, title)
return
def marketOverview(self):
chart = MarketOverviewUI(self, self.c, self.markets)
return
def balanceDetail(self):
balUI = BalanceUI(self, self.gross_balances, self.currencies)
return
def OrderBook(self, cid, mid):
trad_hist = TradeHistUI(self, cid, mid,self.c)
return
def Capital(self):
capital = CapitalUI(self, self.c, self.last_trade_prices)
return
def Data(self):
data = DataUI(self, self.c)
return
def update(self):
#while not self.finish:
print("Updating Market Data")
marketData = self.c.markets()
for market in marketData['data']:
#print(market)
if market['id'] == '473':
self.lblZiftPrice['text'] = "{:.8f}".format(market['last_trade']['price'])
elif market['id'] == '120':
self.lblPointsPrice['text'] = "{:.8f}".format(market['last_trade']['price'])
elif market['id'] == '132':
self.lblDOGPrice['text'] = "{:.8f}".format(market['last_trade']['price'])
#time.sleep(10)
return
def exit(self):
self.finish = TRUE
#self.updateThread.join()
self.quit()
return
def getClosePrices(self, mid):
ohlc = self.c.market_ohlc(mid, start=0, stop=time.time(), interval="hour", limit=14)
closePrices = []
for price in reversed(ohlc['data']):
closePrices.append(price['close'])
return closePrices
def getSpread(self, mid):
spread = 0
bid = self.c.market_orderbook(mid, 1, "buy")
ask = self.c.market_orderbook(mid, 1, "sell")
spread = ask - bid
return spread
def calcRSI(self, cp):
gains = 0
iGains = 0
losses = 0
iLosses = 0
for i in range(1, len(cp)):
if cp[i] > cp[i-1]:
gains += cp[i] - cp[i-1]
iGains += 1
elif cp[i] < cp[i-1]:
losses += cp[i-1] - cp[i]
iLosses += 1
rs = (gains / iGains) / (losses / iLosses)
RSI = 100 - 100/(1+rs)
return RSI
def calcEMA(self, curPrice, periods, prevEMA):
k = 2/(periods + 1)
(curPrice * k) + (prevEMA * (1- k))
return
def settings_gui(self):
settings = SettingsUI(self)
return | lgpl-3.0 |
mattgiguere/scikit-learn | examples/feature_selection/plot_feature_selection.py | 249 | 2827 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='b')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
jzt5132/scikit-learn | sklearn/feature_selection/__init__.py | 244 | 1088 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression']
| bsd-3-clause |
CforED/Machine-Learning | sklearn/decomposition/__init__.py | 76 | 1490 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF, non_negative_factorization
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'non_negative_factorization',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
joernhees/scikit-learn | sklearn/feature_extraction/text.py | 4 | 52183 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
normalized = unicodedata.normalize('NFKD', s)
if normalized == s:
return s
else:
return ''.join([c for c in normalized if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
operating only inside word boundaries. n-grams at the edges
of words are padded with space."""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if isinstance(vocabulary, set):
vocabulary = sorted(vocabulary)
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary : boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
if isinstance(X, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
if isinstance(X, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.csr_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df or min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
vocabulary[term] = new_val
map_index[old_val] = new_val
X.indices = map_index.take(X.indices, mode='clip')
return X
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = []
indptr = _make_int_array()
values = _make_int_array()
indptr.append(0)
for doc in raw_documents:
feature_counter = {}
for feature in analyze(doc):
try:
feature_idx = vocabulary[feature]
if feature_idx not in feature_counter:
feature_counter[feature_idx] = 1
else:
feature_counter[feature_idx] += 1
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
j_indices.extend(feature_counter.keys())
values.extend(feature_counter.values())
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = np.asarray(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = frombuffer_empty(values, dtype=np.intc)
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sort_indices()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
if isinstance(raw_documents, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if isinstance(raw_documents, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The formula that is used to compute the tf-idf of term t is
tf-idf(d, t) = tf(t) * idf(d, t), and the idf is computed as
idf(d, t) = log [ n / df(d, t) ] + 1 (if ``smooth_idf=False``),
where n is the total number of documents and df(d, t) is the
document frequency; the document frequency is the number of documents d
that contain term t. The effect of adding "1" to the idf in the equation
above is that terms with zero idf, i.e., terms that occur in all documents
in a training set, will not be entirely ignored.
(Note that the idf formula above differs from the standard
textbook notation that defines the idf as
idf(d, t) = log [ n / (df(d, t) + 1) ]).
If ``smooth_idf=True`` (the default), the constant "1" is added to the
numerator and denominator of the idf as if an extra document was seen
containing every term in the collection exactly once, which prevents
zero divisions: idf(d, t) = log [ (1 + n) / (1 + df(d, t)) ] + 1.
Furthermore, the formulas used to compute tf and idf depend
on parameter settings that correspond to the SMART notation used in IR
as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when
``sublinear_tf=True``.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when ``norm='l2'``, "n" (none)
when ``norm=None``.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schütze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf, diags=0, m=n_features,
n=n_features, format='csr')
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
silky/sms-tools | software/models_interface/stft_function.py | 2 | 2796 | # function to call the main analysis/synthesis functions in software/models/stft.py
import numpy as np
import matplotlib.pyplot as plt
import os, sys
from scipy.signal import get_window
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import utilFunctions as UF
import stft as STFT
def main(inputFile = '../../sounds/piano.wav', window = 'hamming', M = 1024, N = 1024, H = 512):
"""
analysis/synthesis using the STFT
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (choice of rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size
N: fft size (power of two, bigger or equal than M)
H: hop size (at least 1/2 of analysis window size to have good overlap-add)
"""
# read input sound (monophonic with sampling rate of 44100)
fs, x = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# compute the magnitude and phase spectrogram
mX, pX = STFT.stftAnal(x, fs, w, N, H)
# perform the inverse stft
y = STFT.stftSynth(mX, pX, M, H)
# output sound file (monophonic with sampling rate of 44100)
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_stft.wav'
# write the sound resulting from the inverse stft
UF.wavwrite(y, fs, outputFile)
# create figure to plot
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 5000.0
# plot the input sound
plt.subplot(4,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
# plot magnitude spectrogram
plt.subplot(4,1,2)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = fs*np.arange(N*maxplotfreq/fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:N*maxplotfreq/fs+1]))
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.title('magnitude spectrogram')
plt.autoscale(tight=True)
# plot the phase spectrogram
plt.subplot(4,1,3)
numFrames = int(pX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = fs*np.arange(N*maxplotfreq/fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(np.diff(pX[:,:N*maxplotfreq/fs+1],axis=1)))
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.title('phase spectrogram (derivative)')
plt.autoscale(tight=True)
# plot the output sound
plt.subplot(4,1,4)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show(block=False)
if __name__ == "__main__":
main()
| agpl-3.0 |
0x0all/scikit-learn | sklearn/datasets/tests/test_base.py | 39 | 5607 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
| bsd-3-clause |
altair-viz/altair | altair/vegalite/v4/api.py | 1 | 87022 | import warnings
import hashlib
import io
import json
import jsonschema
import pandas as pd
from toolz.curried import pipe as _pipe
from .schema import core, channels, mixins, Undefined, SCHEMA_URL
from .data import data_transformers
from ... import utils, expr
from .display import renderers, VEGALITE_VERSION, VEGAEMBED_VERSION, VEGA_VERSION
from .theme import themes
# ------------------------------------------------------------------------
# Data Utilities
def _dataset_name(values):
"""Generate a unique hash of the data
Parameters
----------
values : list or dict
A list/dict representation of data values.
Returns
-------
name : string
A unique name generated from the hash of the values.
"""
if isinstance(values, core.InlineDataset):
values = values.to_dict()
values_json = json.dumps(values, sort_keys=True)
hsh = hashlib.md5(values_json.encode()).hexdigest()
return "data-" + hsh
def _consolidate_data(data, context):
"""If data is specified inline, then move it to context['datasets']
This function will modify context in-place, and return a new version of data
"""
values = Undefined
kwds = {}
if isinstance(data, core.InlineData):
if data.name is Undefined and data.values is not Undefined:
values = data.values
kwds = {"format": data.format}
elif isinstance(data, dict):
if "name" not in data and "values" in data:
values = data["values"]
kwds = {k: v for k, v in data.items() if k != "values"}
if values is not Undefined:
name = _dataset_name(values)
data = core.NamedData(name=name, **kwds)
context.setdefault("datasets", {})[name] = values
return data
def _prepare_data(data, context=None):
"""Convert input data to data for use within schema
Parameters
----------
data :
The input dataset in the form of a DataFrame, dictionary, altair data
object, or other type that is recognized by the data transformers.
context : dict (optional)
The to_dict context in which the data is being prepared. This is used
to keep track of information that needs to be passed up and down the
recursive serialization routine, such as global named datasets.
"""
if data is Undefined:
return data
# convert dataframes or objects with __geo_interface__ to dict
if isinstance(data, pd.DataFrame) or hasattr(data, "__geo_interface__"):
data = _pipe(data, data_transformers.get())
# convert string input to a URLData
if isinstance(data, str):
data = core.UrlData(data)
# consolidate inline data to top-level datasets
if context is not None and data_transformers.consolidate_datasets:
data = _consolidate_data(data, context)
# if data is still not a recognized type, then return
if not isinstance(data, (dict, core.Data)):
warnings.warn("data of type {} not recognized".format(type(data)))
return data
# ------------------------------------------------------------------------
# Aliases & specializations
Bin = core.BinParams
@utils.use_signature(core.LookupData)
class LookupData(core.LookupData):
def to_dict(self, *args, **kwargs):
"""Convert the chart to a dictionary suitable for JSON export"""
copy = self.copy(deep=False)
copy.data = _prepare_data(copy.data, kwargs.get("context"))
return super(LookupData, copy).to_dict(*args, **kwargs)
@utils.use_signature(core.FacetMapping)
class FacetMapping(core.FacetMapping):
_class_is_valid_at_instantiation = False
def to_dict(self, *args, **kwargs):
copy = self.copy(deep=False)
context = kwargs.get("context", {})
data = context.get("data", None)
if isinstance(self.row, str):
copy.row = core.FacetFieldDef(**utils.parse_shorthand(self.row, data))
if isinstance(self.column, str):
copy.column = core.FacetFieldDef(**utils.parse_shorthand(self.column, data))
return super(FacetMapping, copy).to_dict(*args, **kwargs)
# ------------------------------------------------------------------------
# Encoding will contain channel objects that aren't valid at instantiation
core.FacetedEncoding._class_is_valid_at_instantiation = False
# ------------------------------------------------------------------------
# These are parameters that are valid at the top level, but are not valid
# for specs that are within a composite chart
# (layer, hconcat, vconcat, facet, repeat)
TOPLEVEL_ONLY_KEYS = {"background", "config", "autosize", "padding", "$schema"}
def _get_channels_mapping():
mapping = {}
for attr in dir(channels):
cls = getattr(channels, attr)
if isinstance(cls, type) and issubclass(cls, core.SchemaBase):
mapping[cls] = attr.replace("Value", "").lower()
return mapping
# -------------------------------------------------------------------------
# Tools for working with selections
class Selection(object):
"""A Selection object"""
_counter = 0
@classmethod
def _get_name(cls):
cls._counter += 1
return "selector{:03d}".format(cls._counter)
def __init__(self, name, selection):
if name is None:
name = self._get_name()
self.name = name
self.selection = selection
def __repr__(self):
return "Selection({0!r}, {1})".format(self.name, self.selection)
def ref(self):
return self.to_dict()
def to_dict(self):
return {
"selection": self.name.to_dict()
if hasattr(self.name, "to_dict")
else self.name
}
def __invert__(self):
return Selection(core.SelectionNot(**{"not": self.name}), self.selection)
def __and__(self, other):
if isinstance(other, Selection):
other = other.name
return Selection(
core.SelectionAnd(**{"and": [self.name, other]}), self.selection
)
def __or__(self, other):
if isinstance(other, Selection):
other = other.name
return Selection(core.SelectionOr(**{"or": [self.name, other]}), self.selection)
def __getattr__(self, field_name):
if field_name.startswith("__") and field_name.endswith("__"):
raise AttributeError(field_name)
return expr.core.GetAttrExpression(self.name, field_name)
def __getitem__(self, field_name):
return expr.core.GetItemExpression(self.name, field_name)
# ------------------------------------------------------------------------
# Top-Level Functions
def value(value, **kwargs):
"""Specify a value for use in an encoding"""
return dict(value=value, **kwargs)
def selection(name=None, type=Undefined, **kwds):
"""Create a named selection.
Parameters
----------
name : string (optional)
The name of the selection. If not specified, a unique name will be
created.
type : string
The type of the selection: one of ["interval", "single", or "multi"]
**kwds :
additional keywords will be used to construct a SelectionDef instance
that controls the selection.
Returns
-------
selection: Selection
The selection object that can be used in chart creation.
"""
return Selection(name, core.SelectionDef(type=type, **kwds))
@utils.use_signature(core.IntervalSelection)
def selection_interval(**kwargs):
"""Create a selection with type='interval'"""
return selection(type="interval", **kwargs)
@utils.use_signature(core.MultiSelection)
def selection_multi(**kwargs):
"""Create a selection with type='multi'"""
return selection(type="multi", **kwargs)
@utils.use_signature(core.SingleSelection)
def selection_single(**kwargs):
"""Create a selection with type='single'"""
return selection(type="single", **kwargs)
@utils.use_signature(core.Binding)
def binding(input, **kwargs):
"""A generic binding"""
return core.Binding(input=input, **kwargs)
@utils.use_signature(core.BindCheckbox)
def binding_checkbox(**kwargs):
"""A checkbox binding"""
return core.BindCheckbox(input="checkbox", **kwargs)
@utils.use_signature(core.BindRadioSelect)
def binding_radio(**kwargs):
"""A radio button binding"""
return core.BindRadioSelect(input="radio", **kwargs)
@utils.use_signature(core.BindRadioSelect)
def binding_select(**kwargs):
"""A select binding"""
return core.BindRadioSelect(input="select", **kwargs)
@utils.use_signature(core.BindRange)
def binding_range(**kwargs):
"""A range binding"""
return core.BindRange(input="range", **kwargs)
def condition(predicate, if_true, if_false, **kwargs):
"""A conditional attribute or encoding
Parameters
----------
predicate: Selection, PredicateComposition, expr.Expression, dict, or string
the selection predicate or test predicate for the condition.
if a string is passed, it will be treated as a test operand.
if_true:
the spec or object to use if the selection predicate is true
if_false:
the spec or object to use if the selection predicate is false
**kwargs:
additional keyword args are added to the resulting dict
Returns
-------
spec: dict or VegaLiteSchema
the spec that describes the condition
"""
test_predicates = (str, expr.Expression, core.PredicateComposition)
if isinstance(predicate, Selection):
condition = {"selection": predicate.name}
elif isinstance(predicate, core.SelectionComposition):
condition = {"selection": predicate}
elif isinstance(predicate, test_predicates):
condition = {"test": predicate}
elif isinstance(predicate, dict):
condition = predicate
else:
raise NotImplementedError(
"condition predicate of type {}" "".format(type(predicate))
)
if isinstance(if_true, core.SchemaBase):
# convert to dict for now; the from_dict call below will wrap this
# dict in the appropriate schema
if_true = if_true.to_dict()
elif isinstance(if_true, str):
if_true = {"shorthand": if_true}
if_true.update(kwargs)
condition.update(if_true)
if isinstance(if_false, core.SchemaBase):
# For the selection, the channel definitions all allow selections
# already. So use this SchemaBase wrapper if possible.
selection = if_false.copy()
selection.condition = condition
elif isinstance(if_false, str):
selection = {"condition": condition, "shorthand": if_false}
selection.update(kwargs)
else:
selection = dict(condition=condition, **if_false)
return selection
# --------------------------------------------------------------------
# Top-level objects
class TopLevelMixin(mixins.ConfigMethodMixin):
"""Mixin for top-level chart objects such as Chart, LayeredChart, etc."""
_class_is_valid_at_instantiation = False
def to_dict(self, *args, **kwargs):
"""Convert the chart to a dictionary suitable for JSON export"""
# We make use of three context markers:
# - 'data' points to the data that should be referenced for column type
# inference.
# - 'top_level' is a boolean flag that is assumed to be true; if it's
# true then a "$schema" arg is added to the dict.
# - 'datasets' is a dict of named datasets that should be inserted
# in the top-level object
# note: not a deep copy because we want datasets and data arguments to
# be passed by reference
context = kwargs.get("context", {}).copy()
context.setdefault("datasets", {})
is_top_level = context.get("top_level", True)
copy = self.copy(deep=False)
original_data = getattr(copy, "data", Undefined)
copy.data = _prepare_data(original_data, context)
if original_data is not Undefined:
context["data"] = original_data
# remaining to_dict calls are not at top level
context["top_level"] = False
kwargs["context"] = context
try:
dct = super(TopLevelMixin, copy).to_dict(*args, **kwargs)
except jsonschema.ValidationError:
dct = None
# If we hit an error, then re-convert with validate='deep' to get
# a more useful traceback. We don't do this by default because it's
# much slower in the case that there are no errors.
if dct is None:
kwargs["validate"] = "deep"
dct = super(TopLevelMixin, copy).to_dict(*args, **kwargs)
# TODO: following entries are added after validation. Should they be validated?
if is_top_level:
# since this is top-level we add $schema if it's missing
if "$schema" not in dct:
dct["$schema"] = SCHEMA_URL
# apply theme from theme registry
the_theme = themes.get()
dct = utils.update_nested(the_theme(), dct, copy=True)
# update datasets
if context["datasets"]:
dct.setdefault("datasets", {}).update(context["datasets"])
return dct
def to_html(
self,
base_url="https://cdn.jsdelivr.net/npm/",
output_div="vis",
embed_options=None,
json_kwds=None,
fullhtml=True,
requirejs=False,
):
return utils.spec_to_html(
self.to_dict(),
mode="vega-lite",
vegalite_version=VEGALITE_VERSION,
vegaembed_version=VEGAEMBED_VERSION,
vega_version=VEGA_VERSION,
base_url=base_url,
output_div=output_div,
embed_options=embed_options,
json_kwds=json_kwds,
fullhtml=fullhtml,
requirejs=requirejs,
)
def save(
self,
fp,
format=None,
override_data_transformer=True,
scale_factor=1.0,
vegalite_version=VEGALITE_VERSION,
vega_version=VEGA_VERSION,
vegaembed_version=VEGAEMBED_VERSION,
**kwargs,
):
"""Save a chart to file in a variety of formats
Supported formats are json, html, png, svg, pdf; the last three require
the altair_saver package to be installed.
Parameters
----------
fp : string filename or file-like object
file in which to write the chart.
format : string (optional)
the format to write: one of ['json', 'html', 'png', 'svg', 'pdf'].
If not specified, the format will be determined from the filename.
override_data_transformer : boolean (optional)
If True (default), then the save action will be done with
the MaxRowsError disabled. If False, then do not change the data
transformer.
scale_factor : float
For svg or png formats, scale the image by this factor when saving.
This can be used to control the size or resolution of the output.
Default is 1.0
**kwargs :
Additional keyword arguments are passed to the output method
associated with the specified format.
"""
from ...utils.save import save
kwds = dict(
chart=self,
fp=fp,
format=format,
scale_factor=scale_factor,
vegalite_version=vegalite_version,
vega_version=vega_version,
vegaembed_version=vegaembed_version,
**kwargs,
)
# By default we override the data transformer. This makes it so
# that save() will succeed even for large datasets that would
# normally trigger a MaxRowsError
if override_data_transformer:
with data_transformers.disable_max_rows():
result = save(**kwds)
else:
result = save(**kwds)
return result
# Fallback for when rendering fails; the full repr is too long to be
# useful in nearly all cases.
def __repr__(self):
return "alt.{}(...)".format(self.__class__.__name__)
# Layering and stacking
def __add__(self, other):
if not isinstance(other, TopLevelMixin):
raise ValueError("Only Chart objects can be layered.")
return layer(self, other)
def __and__(self, other):
if not isinstance(other, TopLevelMixin):
raise ValueError("Only Chart objects can be concatenated.")
return vconcat(self, other)
def __or__(self, other):
if not isinstance(other, TopLevelMixin):
raise ValueError("Only Chart objects can be concatenated.")
return hconcat(self, other)
def repeat(
self,
repeat=Undefined,
row=Undefined,
column=Undefined,
columns=Undefined,
**kwargs,
):
"""Return a RepeatChart built from the chart
Fields within the chart can be set to correspond to the row or
column using `alt.repeat('row')` and `alt.repeat('column')`.
Parameters
----------
repeat : list
a list of data column names to be repeated. This cannot be
used along with the ``row`` or ``column`` argument.
row : list
a list of data column names to be mapped to the row facet
column : list
a list of data column names to be mapped to the column facet
columns : int
the maximum number of columns before wrapping. Only referenced
if ``repeat`` is specified.
**kwargs :
additional keywords passed to RepeatChart.
Returns
-------
chart : RepeatChart
a repeated chart.
"""
repeat_specified = repeat is not Undefined
rowcol_specified = row is not Undefined or column is not Undefined
if repeat_specified and rowcol_specified:
raise ValueError(
"repeat argument cannot be combined with row/column argument."
)
if repeat_specified:
repeat = repeat
else:
repeat = core.RepeatMapping(row=row, column=column)
return RepeatChart(spec=self, repeat=repeat, columns=columns, **kwargs)
def properties(self, **kwargs):
"""Set top-level properties of the Chart.
Argument names and types are the same as class initialization.
"""
copy = self.copy(deep=False)
for key, val in kwargs.items():
if key == "selection" and isinstance(val, Selection):
# For backward compatibility with old selection interface.
setattr(copy, key, {val.name: val.selection})
else:
# Don't validate data, because it hasn't been processed.
if key != "data":
self.validate_property(key, val)
setattr(copy, key, val)
return copy
def project(
self,
type="mercator",
center=Undefined,
clipAngle=Undefined,
clipExtent=Undefined,
coefficient=Undefined,
distance=Undefined,
fraction=Undefined,
lobes=Undefined,
parallel=Undefined,
precision=Undefined,
radius=Undefined,
ratio=Undefined,
reflectX=Undefined,
reflectY=Undefined,
rotate=Undefined,
scale=Undefined,
spacing=Undefined,
tilt=Undefined,
translate=Undefined,
**kwds,
):
"""Add a geographic projection to the chart.
This is generally used either with ``mark_geoshape`` or with the
``latitude``/``longitude`` encodings.
Available projection types are
['albers', 'albersUsa', 'azimuthalEqualArea', 'azimuthalEquidistant',
'conicConformal', 'conicEqualArea', 'conicEquidistant', 'equalEarth', 'equirectangular',
'gnomonic', 'identity', 'mercator', 'orthographic', 'stereographic', 'transverseMercator']
Attributes
----------
type : ProjectionType
The cartographic projection to use. This value is case-insensitive, for example
`"albers"` and `"Albers"` indicate the same projection type. You can find all valid
projection types [in the
documentation](https://vega.github.io/vega-lite/docs/projection.html#projection-types).
**Default value:** `mercator`
center : List(float)
Sets the projection’s center to the specified center, a two-element array of
longitude and latitude in degrees.
**Default value:** `[0, 0]`
clipAngle : float
Sets the projection’s clipping circle radius to the specified angle in degrees. If
`null`, switches to [antimeridian](http://bl.ocks.org/mbostock/3788999) cutting
rather than small-circle clipping.
clipExtent : List(List(float))
Sets the projection’s viewport clip extent to the specified bounds in pixels. The
extent bounds are specified as an array `[[x0, y0], [x1, y1]]`, where `x0` is the
left-side of the viewport, `y0` is the top, `x1` is the right and `y1` is the
bottom. If `null`, no viewport clipping is performed.
coefficient : float
distance : float
fraction : float
lobes : float
parallel : float
precision : Mapping(required=[length])
Sets the threshold for the projection’s [adaptive
resampling](http://bl.ocks.org/mbostock/3795544) to the specified value in pixels.
This value corresponds to the [Douglas–Peucker
distance](http://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm).
If precision is not specified, returns the projection’s current resampling
precision which defaults to `√0.5 ≅ 0.70710…`.
radius : float
ratio : float
reflectX : boolean
reflectY : boolean
rotate : List(float)
Sets the projection’s three-axis rotation to the specified angles, which must be a
two- or three-element array of numbers [`lambda`, `phi`, `gamma`] specifying the
rotation angles in degrees about each spherical axis. (These correspond to yaw,
pitch and roll.)
**Default value:** `[0, 0, 0]`
scale : float
Sets the projection's scale (zoom) value, overriding automatic fitting.
spacing : float
tilt : float
translate : List(float)
Sets the projection's translation (pan) value, overriding automatic fitting.
"""
projection = core.Projection(
center=center,
clipAngle=clipAngle,
clipExtent=clipExtent,
coefficient=coefficient,
distance=distance,
fraction=fraction,
lobes=lobes,
parallel=parallel,
precision=precision,
radius=radius,
ratio=ratio,
reflectX=reflectX,
reflectY=reflectY,
rotate=rotate,
scale=scale,
spacing=spacing,
tilt=tilt,
translate=translate,
type=type,
**kwds,
)
return self.properties(projection=projection)
def _add_transform(self, *transforms):
"""Copy the chart and add specified transforms to chart.transform"""
copy = self.copy(deep=["transform"])
if copy.transform is Undefined:
copy.transform = []
copy.transform.extend(transforms)
return copy
def transform_aggregate(self, aggregate=Undefined, groupby=Undefined, **kwds):
"""
Add an AggregateTransform to the schema.
Parameters
----------
aggregate : List(:class:`AggregatedFieldDef`)
Array of objects that define fields to aggregate.
groupby : List(string)
The data fields to group by. If not specified, a single group containing all data
objects will be used.
**kwds :
additional keywords are converted to aggregates using standard
shorthand parsing.
Returns
-------
self : Chart object
returns chart to allow for chaining
Examples
--------
The aggregate transform allows you to specify transforms directly using
the same shorthand syntax as used in encodings:
>>> import altair as alt
>>> chart1 = alt.Chart().transform_aggregate(
... mean_acc='mean(Acceleration)',
... groupby=['Origin']
... )
>>> print(chart1.transform[0].to_json()) # doctest: +NORMALIZE_WHITESPACE
{
"aggregate": [
{
"as": "mean_acc",
"field": "Acceleration",
"op": "mean"
}
],
"groupby": [
"Origin"
]
}
It also supports including AggregatedFieldDef instances or dicts directly,
so you can create the above transform like this:
>>> chart2 = alt.Chart().transform_aggregate(
... [alt.AggregatedFieldDef(field='Acceleration', op='mean',
... **{'as': 'mean_acc'})],
... groupby=['Origin']
... )
>>> chart2.transform == chart1.transform
True
See Also
--------
alt.AggregateTransform : underlying transform object
"""
if aggregate is Undefined:
aggregate = []
for key, val in kwds.items():
parsed = utils.parse_shorthand(val)
dct = {
"as": key,
"field": parsed.get("field", Undefined),
"op": parsed.get("aggregate", Undefined),
}
aggregate.append(core.AggregatedFieldDef(**dct))
return self._add_transform(
core.AggregateTransform(aggregate=aggregate, groupby=groupby)
)
def transform_bin(self, as_=Undefined, field=Undefined, bin=True, **kwargs):
"""
Add a BinTransform to the schema.
Parameters
----------
as_ : anyOf(string, List(string))
The output fields at which to write the start and end bin values.
bin : anyOf(boolean, :class:`BinParams`)
An object indicating bin properties, or simply ``true`` for using default bin
parameters.
field : string
The data field to bin.
Returns
-------
self : Chart object
returns chart to allow for chaining
Examples
--------
>>> import altair as alt
>>> chart = alt.Chart().transform_bin("x_binned", "x")
>>> chart.transform[0]
BinTransform({
as: 'x_binned',
bin: True,
field: 'x'
})
>>> chart = alt.Chart().transform_bin("x_binned", "x",
... bin=alt.Bin(maxbins=10))
>>> chart.transform[0]
BinTransform({
as: 'x_binned',
bin: BinParams({
maxbins: 10
}),
field: 'x'
})
See Also
--------
alt.BinTransform : underlying transform object
"""
if as_ is not Undefined:
if "as" in kwargs:
raise ValueError(
"transform_bin: both 'as_' and 'as' passed as arguments."
)
kwargs["as"] = as_
kwargs["bin"] = bin
kwargs["field"] = field
return self._add_transform(core.BinTransform(**kwargs))
def transform_calculate(self, as_=Undefined, calculate=Undefined, **kwargs):
"""
Add a CalculateTransform to the schema.
Parameters
----------
as_ : string
The field for storing the computed formula value.
calculate : string or alt.expr expression
A `expression <https://vega.github.io/vega-lite/docs/types.html#expression>`__
string. Use the variable ``datum`` to refer to the current data object.
**kwargs
transforms can also be passed by keyword argument; see Examples
Returns
-------
self : Chart object
returns chart to allow for chaining
Examples
--------
>>> import altair as alt
>>> from altair import datum, expr
>>> chart = alt.Chart().transform_calculate(y = 2 * expr.sin(datum.x))
>>> chart.transform[0]
CalculateTransform({
as: 'y',
calculate: (2 * sin(datum.x))
})
It's also possible to pass the ``CalculateTransform`` arguments directly:
>>> kwds = {'as': 'y', 'calculate': '2 * sin(datum.x)'}
>>> chart = alt.Chart().transform_calculate(**kwds)
>>> chart.transform[0]
CalculateTransform({
as: 'y',
calculate: '2 * sin(datum.x)'
})
As the first form is easier to write and understand, that is the
recommended method.
See Also
--------
alt.CalculateTransform : underlying transform object
"""
if as_ is Undefined:
as_ = kwargs.pop("as", Undefined)
elif "as" in kwargs:
raise ValueError(
"transform_calculate: both 'as_' and 'as' passed as arguments."
)
if as_ is not Undefined or calculate is not Undefined:
dct = {"as": as_, "calculate": calculate}
self = self._add_transform(core.CalculateTransform(**dct))
for as_, calculate in kwargs.items():
dct = {"as": as_, "calculate": calculate}
self = self._add_transform(core.CalculateTransform(**dct))
return self
def transform_density(
self,
density,
as_=Undefined,
bandwidth=Undefined,
counts=Undefined,
cumulative=Undefined,
extent=Undefined,
groupby=Undefined,
maxsteps=Undefined,
minsteps=Undefined,
steps=Undefined,
):
"""Add a DensityTransform to the spec.
Attributes
----------
density : str
The data field for which to perform density estimation.
as_ : [str, str]
The output fields for the sample value and corresponding density estimate.
**Default value:** ``["value", "density"]``
bandwidth : float
The bandwidth (standard deviation) of the Gaussian kernel. If unspecified or set to
zero, the bandwidth value is automatically estimated from the input data using
Scott’s rule.
counts : boolean
A boolean flag indicating if the output values should be probability estimates
(false) or smoothed counts (true).
**Default value:** ``false``
cumulative : boolean
A boolean flag indicating whether to produce density estimates (false) or cumulative
density estimates (true).
**Default value:** ``false``
extent : List([float, float])
A [min, max] domain from which to sample the distribution. If unspecified, the
extent will be determined by the observed minimum and maximum values of the density
value field.
groupby : List(str)
The data fields to group by. If not specified, a single group containing all data
objects will be used.
maxsteps : float
The maximum number of samples to take along the extent domain for plotting the
density. **Default value:** ``200``
minsteps : float
The minimum number of samples to take along the extent domain for plotting the
density. **Default value:** ``25``
steps : float
The exact number of samples to take along the extent domain for plotting the
density. If specified, overrides both minsteps and maxsteps to set an exact number
of uniform samples. Potentially useful in conjunction with a fixed extent to ensure
consistent sample points for stacked densities.
"""
return self._add_transform(
core.DensityTransform(
density=density,
bandwidth=bandwidth,
counts=counts,
cumulative=cumulative,
extent=extent,
groupby=groupby,
maxsteps=maxsteps,
minsteps=minsteps,
steps=steps,
**{"as": as_},
)
)
def transform_impute(
self,
impute,
key,
frame=Undefined,
groupby=Undefined,
keyvals=Undefined,
method=Undefined,
value=Undefined,
):
"""
Add an ImputeTransform to the schema.
Parameters
----------
impute : string
The data field for which the missing values should be imputed.
key : string
A key field that uniquely identifies data objects within a group.
Missing key values (those occurring in the data but not in the current group) will
be imputed.
frame : List(anyOf(None, float))
A frame specification as a two-element array used to control the window over which
the specified method is applied. The array entries should either be a number
indicating the offset from the current data object, or null to indicate unbounded
rows preceding or following the current data object. For example, the value ``[-5,
5]`` indicates that the window should include five objects preceding and five
objects following the current object.
**Default value:** : ``[null, null]`` indicating that the window includes all
objects.
groupby : List(string)
An optional array of fields by which to group the values.
Imputation will then be performed on a per-group basis.
keyvals : anyOf(List(Mapping(required=[])), :class:`ImputeSequence`)
Defines the key values that should be considered for imputation.
An array of key values or an object defining a `number sequence
<https://vega.github.io/vega-lite/docs/impute.html#sequence-def>`__.
If provided, this will be used in addition to the key values observed within the
input data. If not provided, the values will be derived from all unique values of
the ``key`` field. For ``impute`` in ``encoding``, the key field is the x-field if
the y-field is imputed, or vice versa.
If there is no impute grouping, this property *must* be specified.
method : :class:`ImputeMethod`
The imputation method to use for the field value of imputed data objects.
One of ``value``, ``mean``, ``median``, ``max`` or ``min``.
**Default value:** ``"value"``
value : Mapping(required=[])
The field value to use when the imputation ``method`` is ``"value"``.
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
alt.ImputeTransform : underlying transform object
"""
return self._add_transform(
core.ImputeTransform(
impute=impute,
key=key,
frame=frame,
groupby=groupby,
keyvals=keyvals,
method=method,
value=value,
)
)
def transform_joinaggregate(
self, joinaggregate=Undefined, groupby=Undefined, **kwargs
):
"""
Add a JoinAggregateTransform to the schema.
Parameters
----------
joinaggregate : List(:class:`JoinAggregateFieldDef`)
The definition of the fields in the join aggregate, and what calculations to use.
groupby : List(string)
The data fields for partitioning the data objects into separate groups. If
unspecified, all data points will be in a single group.
**kwargs
joinaggregates can also be passed by keyword argument; see Examples.
Returns
-------
self : Chart object
returns chart to allow for chaining
Examples
--------
>>> import altair as alt
>>> chart = alt.Chart().transform_joinaggregate(x='sum(y)')
>>> chart.transform[0]
JoinAggregateTransform({
joinaggregate: [JoinAggregateFieldDef({
as: 'x',
field: 'y',
op: 'sum'
})]
})
See Also
--------
alt.JoinAggregateTransform : underlying transform object
"""
if joinaggregate is Undefined:
joinaggregate = []
for key, val in kwargs.items():
parsed = utils.parse_shorthand(val)
dct = {
"as": key,
"field": parsed.get("field", Undefined),
"op": parsed.get("aggregate", Undefined),
}
joinaggregate.append(core.JoinAggregateFieldDef(**dct))
return self._add_transform(
core.JoinAggregateTransform(joinaggregate=joinaggregate, groupby=groupby)
)
def transform_filter(self, filter, **kwargs):
"""
Add a FilterTransform to the schema.
Parameters
----------
filter : a filter expression or :class:`PredicateComposition`
The `filter` property must be one of the predicate definitions:
(1) a string or alt.expr expression
(2) a range predicate
(3) a selection predicate
(4) a logical operand combining (1)-(3)
(5) a Selection object
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
alt.FilterTransform : underlying transform object
"""
if isinstance(filter, Selection):
filter = {"selection": filter.name}
elif isinstance(filter, core.SelectionComposition):
filter = {"selection": filter}
return self._add_transform(core.FilterTransform(filter=filter, **kwargs))
def transform_flatten(self, flatten, as_=Undefined):
"""Add a FlattenTransform to the schema.
Parameters
----------
flatten : List(string)
An array of one or more data fields containing arrays to flatten.
If multiple fields are specified, their array values should have a parallel
structure, ideally with the same length.
If the lengths of parallel arrays do not match,
the longest array will be used with ``null`` values added for missing entries.
as : List(string)
The output field names for extracted array values.
**Default value:** The field name of the corresponding array field
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
alt.FlattenTransform : underlying transform object
"""
return self._add_transform(
core.FlattenTransform(flatten=flatten, **{"as": as_})
)
def transform_fold(self, fold, as_=Undefined):
"""Add a FoldTransform to the spec.
Parameters
----------
fold : List(string)
An array of data fields indicating the properties to fold.
as : [string, string]
The output field names for the key and value properties produced by the fold
transform. Default: ``["key", "value"]``
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
Chart.transform_pivot : pivot transform - opposite of fold.
alt.FoldTransform : underlying transform object
"""
return self._add_transform(core.FoldTransform(fold=fold, **{"as": as_}))
def transform_loess(
self, on, loess, as_=Undefined, bandwidth=Undefined, groupby=Undefined
):
"""Add a LoessTransform to the spec.
Parameters
----------
on : str
The data field of the independent variable to use a predictor.
loess : str
The data field of the dependent variable to smooth.
as_ : [str, str]
The output field names for the smoothed points generated by the loess transform.
**Default value:** The field names of the input x and y values.
bandwidth : float
A bandwidth parameter in the range ``[0, 1]`` that determines the amount of
smoothing. **Default value:** ``0.3``
groupby : List(str)
The data fields to group by. If not specified, a single group containing all data
objects will be used.
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
Chart.transform_regression: regression transform
alt.LoessTransform : underlying transform object
"""
return self._add_transform(
core.LoessTransform(
loess=loess, on=on, bandwidth=bandwidth, groupby=groupby, **{"as": as_}
)
)
def transform_lookup(
self,
lookup=Undefined,
from_=Undefined,
as_=Undefined,
default=Undefined,
**kwargs,
):
"""Add a DataLookupTransform or SelectionLookupTransform to the chart
Attributes
----------
lookup : string
Key in primary data source.
from_ : anyOf(:class:`LookupData`, :class:`LookupSelection`)
Secondary data reference.
as_ : anyOf(string, List(string))
The output fields on which to store the looked up data values.
For data lookups, this property may be left blank if ``from_.fields``
has been specified (those field names will be used); if ``from_.fields``
has not been specified, ``as_`` must be a string.
For selection lookups, this property is optional: if unspecified,
looked up values will be stored under a property named for the selection;
and if specified, it must correspond to ``from_.fields``.
default : string
The default value to use if lookup fails. **Default value:** ``null``
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
alt.DataLookupTransform : underlying transform object
alt.SelectionLookupTransform : underlying transform object
"""
if as_ is not Undefined:
if "as" in kwargs:
raise ValueError(
"transform_lookup: both 'as_' and 'as' passed as arguments."
)
kwargs["as"] = as_
if from_ is not Undefined:
if "from" in kwargs:
raise ValueError(
"transform_lookup: both 'from_' and 'from' passed as arguments."
)
kwargs["from"] = from_
kwargs["lookup"] = lookup
kwargs["default"] = default
return self._add_transform(core.LookupTransform(**kwargs))
def transform_pivot(
self, pivot, value, groupby=Undefined, limit=Undefined, op=Undefined
):
"""Add a pivot transform to the chart.
Parameters
----------
pivot : str
The data field to pivot on. The unique values of this field become new field names
in the output stream.
value : str
The data field to populate pivoted fields. The aggregate values of this field become
the values of the new pivoted fields.
groupby : List(str)
The optional data fields to group by. If not specified, a single group containing
all data objects will be used.
limit : float
An optional parameter indicating the maximum number of pivoted fields to generate.
The default ( ``0`` ) applies no limit. The pivoted ``pivot`` names are sorted in
ascending order prior to enforcing the limit.
**Default value:** ``0``
op : string
The aggregation operation to apply to grouped ``value`` field values.
**Default value:** ``sum``
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
Chart.transform_fold : fold transform - opposite of pivot.
alt.PivotTransform : underlying transform object
"""
return self._add_transform(
core.PivotTransform(
pivot=pivot, value=value, groupby=groupby, limit=limit, op=op
)
)
def transform_quantile(
self,
quantile,
as_=Undefined,
groupby=Undefined,
probs=Undefined,
step=Undefined,
):
"""Add a quantile transform to the chart
Parameters
----------
quantile : str
The data field for which to perform quantile estimation.
as : [str, str]
The output field names for the probability and quantile values.
groupby : List(str)
The data fields to group by. If not specified, a single group containing all data
objects will be used.
probs : List(float)
An array of probabilities in the range (0, 1) for which to compute quantile values.
If not specified, the *step* parameter will be used.
step : float
A probability step size (default 0.01) for sampling quantile values. All values from
one-half the step size up to 1 (exclusive) will be sampled. This parameter is only
used if the *probs* parameter is not provided. **Default value:** ``["prob", "value"]``
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
alt.QuantileTransform : underlying transform object
"""
return self._add_transform(
core.QuantileTransform(
quantile=quantile,
groupby=groupby,
probs=probs,
step=step,
**{"as": as_},
)
)
def transform_regression(
self,
on,
regression,
as_=Undefined,
extent=Undefined,
groupby=Undefined,
method=Undefined,
order=Undefined,
params=Undefined,
):
"""Add a RegressionTransform to the chart.
Parameters
----------
on : str
The data field of the independent variable to use a predictor.
regression : str
The data field of the dependent variable to predict.
as_ : [str, str]
The output field names for the smoothed points generated by the regression
transform. **Default value:** The field names of the input x and y values.
extent : [float, float]
A [min, max] domain over the independent (x) field for the starting and ending
points of the generated trend line.
groupby : List(str)
The data fields to group by. If not specified, a single group containing all data
objects will be used.
method : enum('linear', 'log', 'exp', 'pow', 'quad', 'poly')
The functional form of the regression model. One of ``"linear"``, ``"log"``,
``"exp"``, ``"pow"``, ``"quad"``, or ``"poly"``. **Default value:** ``"linear"``
order : float
The polynomial order (number of coefficients) for the 'poly' method.
**Default value:** ``3``
params : boolean
A boolean flag indicating if the transform should return the regression model
parameters (one object per group), rather than trend line points.
The resulting objects include a ``coef`` array of fitted coefficient values
(starting with the intercept term and then including terms of increasing order)
and an ``rSquared`` value (indicating the total variance explained by the model).
**Default value:** ``false``
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
Chart.transform_loess : LOESS transform
alt.RegressionTransform : underlying transform object
"""
return self._add_transform(
core.RegressionTransform(
regression=regression,
on=on,
extent=extent,
groupby=groupby,
method=method,
order=order,
params=params,
**{"as": as_},
)
)
def transform_sample(self, sample=1000):
"""
Add a SampleTransform to the schema.
Parameters
----------
sample : float
The maximum number of data objects to include in the sample. Default: 1000.
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
alt.SampleTransform : underlying transform object
"""
return self._add_transform(core.SampleTransform(sample))
def transform_stack(self, as_, stack, groupby, offset=Undefined, sort=Undefined):
"""
Add a StackTransform to the schema.
Parameters
----------
as_ : anyOf(string, List(string))
Output field names. This can be either a string or an array of strings with
two elements denoting the name for the fields for stack start and stack end
respectively.
If a single string(eg."val") is provided, the end field will be "val_end".
stack : string
The field which is stacked.
groupby : List(string)
The data fields to group by.
offset : enum('zero', 'center', 'normalize')
Mode for stacking marks. Default: 'zero'.
sort : List(:class:`SortField`)
Field that determines the order of leaves in the stacked charts.
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
alt.StackTransform : underlying transform object
"""
return self._add_transform(
core.StackTransform(
stack=stack, groupby=groupby, offset=offset, sort=sort, **{"as": as_}
)
)
def transform_timeunit(
self, as_=Undefined, field=Undefined, timeUnit=Undefined, **kwargs
):
"""
Add a TimeUnitTransform to the schema.
Parameters
----------
as_ : string
The output field to write the timeUnit value.
field : string
The data field to apply time unit.
timeUnit : :class:`TimeUnit`
The timeUnit.
**kwargs
transforms can also be passed by keyword argument; see Examples
Returns
-------
self : Chart object
returns chart to allow for chaining
Examples
--------
>>> import altair as alt
>>> from altair import datum, expr
>>> chart = alt.Chart().transform_timeunit(month='month(date)')
>>> chart.transform[0]
TimeUnitTransform({
as: 'month',
field: 'date',
timeUnit: 'month'
})
It's also possible to pass the ``TimeUnitTransform`` arguments directly;
this is most useful in cases where the desired field name is not a
valid python identifier:
>>> kwds = {'as': 'month', 'timeUnit': 'month', 'field': 'The Month'}
>>> chart = alt.Chart().transform_timeunit(**kwds)
>>> chart.transform[0]
TimeUnitTransform({
as: 'month',
field: 'The Month',
timeUnit: 'month'
})
As the first form is easier to write and understand, that is the
recommended method.
See Also
--------
alt.TimeUnitTransform : underlying transform object
"""
if as_ is Undefined:
as_ = kwargs.pop("as", Undefined)
else:
if "as" in kwargs:
raise ValueError(
"transform_timeunit: both 'as_' and 'as' passed as arguments."
)
if as_ is not Undefined:
dct = {"as": as_, "timeUnit": timeUnit, "field": field}
self = self._add_transform(core.TimeUnitTransform(**dct))
for as_, shorthand in kwargs.items():
dct = utils.parse_shorthand(
shorthand,
parse_timeunits=True,
parse_aggregates=False,
parse_types=False,
)
dct.pop("type", None)
dct["as"] = as_
if "timeUnit" not in dct:
raise ValueError("'{}' must include a valid timeUnit".format(shorthand))
self = self._add_transform(core.TimeUnitTransform(**dct))
return self
def transform_window(
self,
window=Undefined,
frame=Undefined,
groupby=Undefined,
ignorePeers=Undefined,
sort=Undefined,
**kwargs,
):
"""Add a WindowTransform to the schema
Parameters
----------
window : List(:class:`WindowFieldDef`)
The definition of the fields in the window, and what calculations to use.
frame : List(anyOf(None, float))
A frame specification as a two-element array indicating how the sliding window
should proceed. The array entries should either be a number indicating the offset
from the current data object, or null to indicate unbounded rows preceding or
following the current data object. The default value is ``[null, 0]``, indicating
that the sliding window includes the current object and all preceding objects. The
value ``[-5, 5]`` indicates that the window should include five objects preceding
and five objects following the current object. Finally, ``[null, null]`` indicates
that the window frame should always include all data objects. The only operators
affected are the aggregation operations and the ``first_value``, ``last_value``, and
``nth_value`` window operations. The other window operations are not affected by
this.
**Default value:** : ``[null, 0]`` (includes the current object and all preceding
objects)
groupby : List(string)
The data fields for partitioning the data objects into separate windows. If
unspecified, all data points will be in a single group.
ignorePeers : boolean
Indicates if the sliding window frame should ignore peer values. (Peer values are
those considered identical by the sort criteria). The default is false, causing the
window frame to expand to include all peer values. If set to true, the window frame
will be defined by offset values only. This setting only affects those operations
that depend on the window frame, namely aggregation operations and the first_value,
last_value, and nth_value window operations.
**Default value:** ``false``
sort : List(:class:`SortField`)
A sort field definition for sorting data objects within a window. If two data
objects are considered equal by the comparator, they are considered “peer” values of
equal rank. If sort is not specified, the order is undefined: data objects are
processed in the order they are observed and none are considered peers (the
ignorePeers parameter is ignored and treated as if set to ``true`` ).
**kwargs
transforms can also be passed by keyword argument; see Examples
Examples
--------
A cumulative line chart
>>> import altair as alt
>>> import numpy as np
>>> import pandas as pd
>>> data = pd.DataFrame({'x': np.arange(100),
... 'y': np.random.randn(100)})
>>> chart = alt.Chart(data).mark_line().encode(
... x='x:Q',
... y='ycuml:Q'
... ).transform_window(
... ycuml='sum(y)'
... )
>>> chart.transform[0]
WindowTransform({
window: [WindowFieldDef({
as: 'ycuml',
field: 'y',
op: 'sum'
})]
})
"""
if kwargs:
if window is Undefined:
window = []
for as_, shorthand in kwargs.items():
kwds = {"as": as_}
kwds.update(
utils.parse_shorthand(
shorthand,
parse_aggregates=False,
parse_window_ops=True,
parse_timeunits=False,
parse_types=False,
)
)
window.append(core.WindowFieldDef(**kwds))
return self._add_transform(
core.WindowTransform(
window=window,
frame=frame,
groupby=groupby,
ignorePeers=ignorePeers,
sort=sort,
)
)
# Display-related methods
def _repr_mimebundle_(self, include=None, exclude=None):
"""Return a MIME bundle for display in Jupyter frontends."""
# Catch errors explicitly to get around issues in Jupyter frontend
# see https://github.com/ipython/ipython/issues/11038
try:
dct = self.to_dict()
except Exception:
utils.display_traceback(in_ipython=True)
return {}
else:
return renderers.get()(dct)
def display(self, renderer=Undefined, theme=Undefined, actions=Undefined, **kwargs):
"""Display chart in Jupyter notebook or JupyterLab
Parameters are passed as options to vega-embed within supported frontends.
See https://github.com/vega/vega-embed#options for details.
Parameters
----------
renderer : string ('canvas' or 'svg')
The renderer to use
theme : string
The Vega theme name to use; see https://github.com/vega/vega-themes
actions : bool or dict
Specify whether action links ("Open In Vega Editor", etc.) are
included in the view.
**kwargs :
Additional parameters are also passed to vega-embed as options.
"""
from IPython.display import display
if renderer is not Undefined:
kwargs["renderer"] = renderer
if theme is not Undefined:
kwargs["theme"] = theme
if actions is not Undefined:
kwargs["actions"] = actions
if kwargs:
options = renderers.options.copy()
options["embed_options"] = options.get("embed_options", {}).copy()
options["embed_options"].update(kwargs)
with renderers.enable(**options):
display(self)
else:
display(self)
@utils.deprecation.deprecated(message="serve() is deprecated. Use show() instead.")
def serve(
self,
ip="127.0.0.1",
port=8888,
n_retries=50,
files=None,
jupyter_warning=True,
open_browser=True,
http_server=None,
**kwargs,
):
"""Open a browser window and display a rendering of the chart
Parameters
----------
html : string
HTML to serve
ip : string (default = '127.0.0.1')
ip address at which the HTML will be served.
port : int (default = 8888)
the port at which to serve the HTML
n_retries : int (default = 50)
the number of nearby ports to search if the specified port
is already in use.
files : dictionary (optional)
dictionary of extra content to serve
jupyter_warning : bool (optional)
if True (default), then print a warning if this is used
within the Jupyter notebook
open_browser : bool (optional)
if True (default), then open a web browser to the given HTML
http_server : class (optional)
optionally specify an HTTPServer class to use for showing the
figure. The default is Python's basic HTTPServer.
**kwargs :
additional keyword arguments passed to the save() method
"""
from ...utils.server import serve
html = io.StringIO()
self.save(html, format="html", **kwargs)
html.seek(0)
serve(
html.read(),
ip=ip,
port=port,
n_retries=n_retries,
files=files,
jupyter_warning=jupyter_warning,
open_browser=open_browser,
http_server=http_server,
)
def show(self, embed_opt=None, open_browser=None):
"""Show the chart in an external browser window.
This requires a recent version of the altair_viewer package.
Parameters
----------
embed_opt : dict (optional)
The Vega embed options that control the dispay of the chart.
open_browser : bool (optional)
Specify whether a browser window should be opened. If not specified,
a browser window will be opened only if the server is not already
connected to a browser.
"""
try:
import altair_viewer
except ImportError:
raise ValueError(
"show() method requires the altair_viewer package. "
"See http://github.com/altair-viz/altair_viewer"
)
altair_viewer.show(self, embed_opt=embed_opt, open_browser=open_browser)
@utils.use_signature(core.Resolve)
def _set_resolve(self, **kwargs):
"""Copy the chart and update the resolve property with kwargs"""
if not hasattr(self, "resolve"):
raise ValueError(
"{} object has no attribute " "'resolve'".format(self.__class__)
)
copy = self.copy(deep=["resolve"])
if copy.resolve is Undefined:
copy.resolve = core.Resolve()
for key, val in kwargs.items():
copy.resolve[key] = val
return copy
@utils.use_signature(core.AxisResolveMap)
def resolve_axis(self, *args, **kwargs):
return self._set_resolve(axis=core.AxisResolveMap(*args, **kwargs))
@utils.use_signature(core.LegendResolveMap)
def resolve_legend(self, *args, **kwargs):
return self._set_resolve(legend=core.LegendResolveMap(*args, **kwargs))
@utils.use_signature(core.ScaleResolveMap)
def resolve_scale(self, *args, **kwargs):
return self._set_resolve(scale=core.ScaleResolveMap(*args, **kwargs))
class _EncodingMixin(object):
@utils.use_signature(core.FacetedEncoding)
def encode(self, *args, **kwargs):
# Convert args to kwargs based on their types.
kwargs = utils.infer_encoding_types(args, kwargs, channels)
# get a copy of the dict representation of the previous encoding
copy = self.copy(deep=["encoding"])
encoding = copy._get("encoding", {})
if isinstance(encoding, core.VegaLiteSchema):
encoding = {k: v for k, v in encoding._kwds.items() if v is not Undefined}
# update with the new encodings, and apply them to the copy
encoding.update(kwargs)
copy.encoding = core.FacetedEncoding(**encoding)
return copy
def facet(
self,
facet=Undefined,
row=Undefined,
column=Undefined,
data=Undefined,
columns=Undefined,
**kwargs,
):
"""Create a facet chart from the current chart.
Faceted charts require data to be specified at the top level; if data
is not specified, the data from the current chart will be used at the
top level.
Parameters
----------
facet : string or alt.Facet (optional)
The data column to use as an encoding for a wrapped facet.
If specified, then neither row nor column may be specified.
column : string or alt.Column (optional)
The data column to use as an encoding for a column facet.
May be combined with row argument, but not with facet argument.
row : string or alt.Column (optional)
The data column to use as an encoding for a row facet.
May be combined with column argument, but not with facet argument.
data : string or dataframe (optional)
The dataset to use for faceting. If not supplied, then data must
be specified in the top-level chart that calls this method.
columns : integer
the maximum number of columns for a wrapped facet.
Returns
-------
self :
for chaining
"""
facet_specified = facet is not Undefined
rowcol_specified = row is not Undefined or column is not Undefined
if facet_specified and rowcol_specified:
raise ValueError(
"facet argument cannot be combined with row/column argument."
)
if data is Undefined:
if self.data is Undefined:
raise ValueError(
"Facet charts require data to be specified at the top level."
)
self = self.copy(deep=False)
data, self.data = self.data, Undefined
if facet_specified:
if isinstance(facet, str):
facet = channels.Facet(facet)
else:
facet = FacetMapping(row=row, column=column)
return FacetChart(spec=self, facet=facet, data=data, columns=columns, **kwargs)
class Chart(
TopLevelMixin, _EncodingMixin, mixins.MarkMethodMixin, core.TopLevelUnitSpec
):
"""Create a basic Altair/Vega-Lite chart.
Although it is possible to set all Chart properties as constructor attributes,
it is more idiomatic to use methods such as ``mark_point()``, ``encode()``,
``transform_filter()``, ``properties()``, etc. See Altair's documentation
for details and examples: http://altair-viz.github.io/.
Attributes
----------
data : Data
An object describing the data source
mark : AnyMark
A string describing the mark type (one of `"bar"`, `"circle"`, `"square"`, `"tick"`,
`"line"`, * `"area"`, `"point"`, `"rule"`, `"geoshape"`, and `"text"`) or a
MarkDef object.
encoding : FacetedEncoding
A key-value mapping between encoding channels and definition of fields.
autosize : anyOf(AutosizeType, AutoSizeParams)
Sets how the visualization size should be determined. If a string, should be one of
`"pad"`, `"fit"` or `"none"`. Object values can additionally specify parameters for
content sizing and automatic resizing. `"fit"` is only supported for single and
layered views that don't use `rangeStep`. __Default value__: `pad`
background : string
CSS color property to use as the background of visualization.
**Default value:** none (transparent)
config : Config
Vega-Lite configuration object. This property can only be defined at the top-level
of a specification.
description : string
Description of this mark for commenting purpose.
height : float
The height of a visualization.
name : string
Name of the visualization for later reference.
padding : Padding
The default visualization padding, in pixels, from the edge of the visualization
canvas to the data rectangle. If a number, specifies padding for all sides. If an
object, the value should have the format `{"left": 5, "top": 5, "right": 5,
"bottom": 5}` to specify padding for each side of the visualization. __Default
value__: `5`
projection : Projection
An object defining properties of geographic projection. Works with `"geoshape"`
marks and `"point"` or `"line"` marks that have a channel (one or more of `"X"`,
`"X2"`, `"Y"`, `"Y2"`) with type `"latitude"`, or `"longitude"`.
selection : Mapping(required=[])
A key-value mapping between selection names and definitions.
title : anyOf(string, TitleParams)
Title for the plot.
transform : List(Transform)
An array of data transformations such as filter and new field calculation.
width : float
The width of a visualization.
"""
def __init__(
self,
data=Undefined,
encoding=Undefined,
mark=Undefined,
width=Undefined,
height=Undefined,
**kwargs,
):
super(Chart, self).__init__(
data=data,
encoding=encoding,
mark=mark,
width=width,
height=height,
**kwargs,
)
@classmethod
def from_dict(cls, dct, validate=True):
"""Construct class from a dictionary representation
Parameters
----------
dct : dictionary
The dict from which to construct the class
validate : boolean
If True (default), then validate the input against the schema.
Returns
-------
obj : Chart object
The wrapped schema
Raises
------
jsonschema.ValidationError :
if validate=True and dct does not conform to the schema
"""
for class_ in TopLevelMixin.__subclasses__():
if class_ is Chart:
class_ = super(Chart, cls)
try:
return class_.from_dict(dct, validate=validate)
except jsonschema.ValidationError:
pass
# As a last resort, try using the Root vegalite object
return core.Root.from_dict(dct, validate)
def add_selection(self, *selections):
"""Add one or more selections to the chart."""
if not selections:
return self
copy = self.copy(deep=["selection"])
if copy.selection is Undefined:
copy.selection = {}
for s in selections:
copy.selection[s.name] = s.selection
return copy
def interactive(self, name=None, bind_x=True, bind_y=True):
"""Make chart axes scales interactive
Parameters
----------
name : string
The selection name to use for the axes scales. This name should be
unique among all selections within the chart.
bind_x : boolean, default True
If true, then bind the interactive scales to the x-axis
bind_y : boolean, default True
If true, then bind the interactive scales to the y-axis
Returns
-------
chart :
copy of self, with interactive axes added
"""
encodings = []
if bind_x:
encodings.append("x")
if bind_y:
encodings.append("y")
return self.add_selection(
selection_interval(bind="scales", encodings=encodings)
)
def _check_if_valid_subspec(spec, classname):
"""Check if the spec is a valid sub-spec.
If it is not, then raise a ValueError
"""
err = (
'Objects with "{0}" attribute cannot be used within {1}. '
"Consider defining the {0} attribute in the {1} object instead."
)
if not isinstance(spec, (core.SchemaBase, dict)):
raise ValueError("Only chart objects can be used in {0}.".format(classname))
for attr in TOPLEVEL_ONLY_KEYS:
if isinstance(spec, core.SchemaBase):
val = getattr(spec, attr, Undefined)
else:
val = spec.get(attr, Undefined)
if val is not Undefined:
raise ValueError(err.format(attr, classname))
def _check_if_can_be_layered(spec):
"""Check if the spec can be layered."""
def _get(spec, attr):
if isinstance(spec, core.SchemaBase):
return spec._get(attr)
else:
return spec.get(attr, Undefined)
encoding = _get(spec, "encoding")
if encoding is not Undefined:
for channel in ["row", "column", "facet"]:
if _get(encoding, channel) is not Undefined:
raise ValueError("Faceted charts cannot be layered.")
if isinstance(spec, (Chart, LayerChart)):
return
if not isinstance(spec, (core.SchemaBase, dict)):
raise ValueError("Only chart objects can be layered.")
if _get(spec, "facet") is not Undefined:
raise ValueError("Faceted charts cannot be layered.")
if isinstance(spec, FacetChart) or _get(spec, "facet") is not Undefined:
raise ValueError("Faceted charts cannot be layered.")
if isinstance(spec, RepeatChart) or _get(spec, "repeat") is not Undefined:
raise ValueError("Repeat charts cannot be layered.")
if isinstance(spec, ConcatChart) or _get(spec, "concat") is not Undefined:
raise ValueError("Concatenated charts cannot be layered.")
if isinstance(spec, HConcatChart) or _get(spec, "hconcat") is not Undefined:
raise ValueError("Concatenated charts cannot be layered.")
if isinstance(spec, VConcatChart) or _get(spec, "vconcat") is not Undefined:
raise ValueError("Concatenated charts cannot be layered.")
@utils.use_signature(core.TopLevelRepeatSpec)
class RepeatChart(TopLevelMixin, core.TopLevelRepeatSpec):
"""A chart repeated across rows and columns with small changes"""
def __init__(self, data=Undefined, spec=Undefined, repeat=Undefined, **kwargs):
_check_if_valid_subspec(spec, "RepeatChart")
super(RepeatChart, self).__init__(data=data, spec=spec, repeat=repeat, **kwargs)
def interactive(self, name=None, bind_x=True, bind_y=True):
"""Make chart axes scales interactive
Parameters
----------
name : string
The selection name to use for the axes scales. This name should be
unique among all selections within the chart.
bind_x : boolean, default True
If true, then bind the interactive scales to the x-axis
bind_y : boolean, default True
If true, then bind the interactive scales to the y-axis
Returns
-------
chart :
copy of self, with interactive axes added
"""
copy = self.copy(deep=False)
copy.spec = copy.spec.interactive(name=name, bind_x=bind_x, bind_y=bind_y)
return copy
def add_selection(self, *selections):
"""Add one or more selections to the chart."""
if not selections or self.spec is Undefined:
return self
copy = self.copy()
copy.spec = copy.spec.add_selection(*selections)
return copy
def repeat(repeater="repeat"):
"""Tie a channel to the row or column within a repeated chart
The output of this should be passed to the ``field`` attribute of
a channel.
Parameters
----------
repeater : {'row'|'column'|'repeat'}
The repeater to tie the field to. Default is 'repeat'.
Returns
-------
repeat : RepeatRef object
"""
if repeater not in ["row", "column", "repeat"]:
raise ValueError("repeater must be one of ['row', 'column', 'repeat']")
return core.RepeatRef(repeat=repeater)
@utils.use_signature(core.TopLevelNormalizedConcatSpecGenericSpec)
class ConcatChart(TopLevelMixin, core.TopLevelNormalizedConcatSpecGenericSpec):
"""A chart with horizontally-concatenated facets"""
def __init__(self, data=Undefined, concat=(), columns=Undefined, **kwargs):
# TODO: move common data to top level?
for spec in concat:
_check_if_valid_subspec(spec, "ConcatChart")
super(ConcatChart, self).__init__(
data=data, concat=list(concat), columns=columns, **kwargs
)
self.data, self.concat = _combine_subchart_data(self.data, self.concat)
def __ior__(self, other):
_check_if_valid_subspec(other, "ConcatChart")
self.concat.append(other)
self.data, self.concat = _combine_subchart_data(self.data, self.concat)
return self
def __or__(self, other):
copy = self.copy(deep=["concat"])
copy |= other
return copy
def add_selection(self, *selections):
"""Add one or more selections to all subcharts."""
if not selections or not self.concat:
return self
copy = self.copy()
copy.concat = [chart.add_selection(*selections) for chart in copy.concat]
return copy
def concat(*charts, **kwargs):
"""Concatenate charts horizontally"""
return ConcatChart(concat=charts, **kwargs)
@utils.use_signature(core.TopLevelNormalizedHConcatSpecGenericSpec)
class HConcatChart(TopLevelMixin, core.TopLevelNormalizedHConcatSpecGenericSpec):
"""A chart with horizontally-concatenated facets"""
def __init__(self, data=Undefined, hconcat=(), **kwargs):
# TODO: move common data to top level?
for spec in hconcat:
_check_if_valid_subspec(spec, "HConcatChart")
super(HConcatChart, self).__init__(data=data, hconcat=list(hconcat), **kwargs)
self.data, self.hconcat = _combine_subchart_data(self.data, self.hconcat)
def __ior__(self, other):
_check_if_valid_subspec(other, "HConcatChart")
self.hconcat.append(other)
self.data, self.hconcat = _combine_subchart_data(self.data, self.hconcat)
return self
def __or__(self, other):
copy = self.copy(deep=["hconcat"])
copy |= other
return copy
def add_selection(self, *selections):
"""Add one or more selections to all subcharts."""
if not selections or not self.hconcat:
return self
copy = self.copy()
copy.hconcat = [chart.add_selection(*selections) for chart in copy.hconcat]
return copy
def hconcat(*charts, **kwargs):
"""Concatenate charts horizontally"""
return HConcatChart(hconcat=charts, **kwargs)
@utils.use_signature(core.TopLevelNormalizedVConcatSpecGenericSpec)
class VConcatChart(TopLevelMixin, core.TopLevelNormalizedVConcatSpecGenericSpec):
"""A chart with vertically-concatenated facets"""
def __init__(self, data=Undefined, vconcat=(), **kwargs):
# TODO: move common data to top level?
for spec in vconcat:
_check_if_valid_subspec(spec, "VConcatChart")
super(VConcatChart, self).__init__(data=data, vconcat=list(vconcat), **kwargs)
self.data, self.vconcat = _combine_subchart_data(self.data, self.vconcat)
def __iand__(self, other):
_check_if_valid_subspec(other, "VConcatChart")
self.vconcat.append(other)
self.data, self.vconcat = _combine_subchart_data(self.data, self.vconcat)
return self
def __and__(self, other):
copy = self.copy(deep=["vconcat"])
copy &= other
return copy
def add_selection(self, *selections):
"""Add one or more selections to all subcharts."""
if not selections or not self.vconcat:
return self
copy = self.copy()
copy.vconcat = [chart.add_selection(*selections) for chart in copy.vconcat]
return copy
def vconcat(*charts, **kwargs):
"""Concatenate charts vertically"""
return VConcatChart(vconcat=charts, **kwargs)
@utils.use_signature(core.TopLevelLayerSpec)
class LayerChart(TopLevelMixin, _EncodingMixin, core.TopLevelLayerSpec):
"""A Chart with layers within a single panel"""
def __init__(self, data=Undefined, layer=(), **kwargs):
# TODO: move common data to top level?
# TODO: check for conflicting interaction
for spec in layer:
_check_if_valid_subspec(spec, "LayerChart")
_check_if_can_be_layered(spec)
super(LayerChart, self).__init__(data=data, layer=list(layer), **kwargs)
self.data, self.layer = _combine_subchart_data(self.data, self.layer)
def __iadd__(self, other):
_check_if_valid_subspec(other, "LayerChart")
_check_if_can_be_layered(other)
self.layer.append(other)
self.data, self.layer = _combine_subchart_data(self.data, self.layer)
return self
def __add__(self, other):
copy = self.copy(deep=["layer"])
copy += other
return copy
def add_layers(self, *layers):
copy = self.copy(deep=["layer"])
for layer in layers:
copy += layer
return copy
def interactive(self, name=None, bind_x=True, bind_y=True):
"""Make chart axes scales interactive
Parameters
----------
name : string
The selection name to use for the axes scales. This name should be
unique among all selections within the chart.
bind_x : boolean, default True
If true, then bind the interactive scales to the x-axis
bind_y : boolean, default True
If true, then bind the interactive scales to the y-axis
Returns
-------
chart :
copy of self, with interactive axes added
"""
if not self.layer:
raise ValueError(
"LayerChart: cannot call interactive() until a " "layer is defined"
)
copy = self.copy(deep=["layer"])
copy.layer[0] = copy.layer[0].interactive(
name=name, bind_x=bind_x, bind_y=bind_y
)
return copy
def add_selection(self, *selections):
"""Add one or more selections to all subcharts."""
if not selections or not self.layer:
return self
copy = self.copy()
copy.layer[0] = copy.layer[0].add_selection(*selections)
return copy
def layer(*charts, **kwargs):
"""layer multiple charts"""
return LayerChart(layer=charts, **kwargs)
@utils.use_signature(core.TopLevelFacetSpec)
class FacetChart(TopLevelMixin, core.TopLevelFacetSpec):
"""A Chart with layers within a single panel"""
def __init__(self, data=Undefined, spec=Undefined, facet=Undefined, **kwargs):
_check_if_valid_subspec(spec, "FacetChart")
super(FacetChart, self).__init__(data=data, spec=spec, facet=facet, **kwargs)
def interactive(self, name=None, bind_x=True, bind_y=True):
"""Make chart axes scales interactive
Parameters
----------
name : string
The selection name to use for the axes scales. This name should be
unique among all selections within the chart.
bind_x : boolean, default True
If true, then bind the interactive scales to the x-axis
bind_y : boolean, default True
If true, then bind the interactive scales to the y-axis
Returns
-------
chart :
copy of self, with interactive axes added
"""
copy = self.copy(deep=False)
copy.spec = copy.spec.interactive(name=name, bind_x=bind_x, bind_y=bind_y)
return copy
def add_selection(self, *selections):
"""Add one or more selections to the chart."""
if not selections or self.spec is Undefined:
return self
copy = self.copy()
copy.spec = copy.spec.add_selection(*selections)
return copy
def topo_feature(url, feature, **kwargs):
"""A convenience function for extracting features from a topojson url
Parameters
----------
url : string
An URL from which to load the data set.
feature : string
The name of the TopoJSON object set to convert to a GeoJSON feature collection. For
example, in a map of the world, there may be an object set named `"countries"`.
Using the feature property, we can extract this set and generate a GeoJSON feature
object for each country.
**kwargs :
additional keywords passed to TopoDataFormat
"""
return core.UrlData(
url=url, format=core.TopoDataFormat(type="topojson", feature=feature, **kwargs)
)
def _combine_subchart_data(data, subcharts):
def remove_data(subchart):
if subchart.data is not Undefined:
subchart = subchart.copy()
subchart.data = Undefined
return subchart
if not subcharts:
# No subcharts = nothing to do.
pass
elif data is Undefined:
# Top level has no data; all subchart data must
# be identical to proceed.
subdata = subcharts[0].data
if subdata is not Undefined and all(c.data is subdata for c in subcharts):
data = subdata
subcharts = [remove_data(c) for c in subcharts]
else:
# Top level has data; subchart data must be either
# undefined or identical to proceed.
if all(c.data is Undefined or c.data is data for c in subcharts):
subcharts = [remove_data(c) for c in subcharts]
return data, subcharts
@utils.use_signature(core.SequenceParams)
def sequence(start, stop=None, step=Undefined, as_=Undefined, **kwds):
"""Sequence generator."""
if stop is None:
start, stop = 0, start
params = core.SequenceParams(start=start, stop=stop, step=step, **{"as": as_})
return core.SequenceGenerator(sequence=params, **kwds)
@utils.use_signature(core.GraticuleParams)
def graticule(**kwds):
"""Graticule generator."""
if not kwds:
# graticule: True indicates default parameters
graticule = True
else:
graticule = core.GraticuleParams(**kwds)
return core.GraticuleGenerator(graticule=graticule)
def sphere():
"""Sphere generator."""
return core.SphereGenerator(sphere=True)
| bsd-3-clause |
Leguark/pygeomod | pygeomod/struct_data.py | 3 | 17593 | """Analysis and modification of structural data exported from GeoModeller
All structural data from an entire GeoModeller project can be exported into ASCII
files using the function in the GUI:
Export -> 3D Structural Data
This method generates files for defined geological parameters:
"Points" (i.e. formation contact points) and
"Foliations" (i.e. orientations/ potential field gradients).
Exported parameters include all those defined in sections as well as 3D data points.
This package contains methods to check, visualise, and extract/modify parts of these
exported data sets, for example to import them into a different Geomodeller project.
"""
# import os, sys
import numpy as np
import numpy.random
import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
class Struct3DPoints():
"""Class container for 3D structural points data sets"""
def __init__(self, **kwds):
"""Structural points data set
**Optional keywords**:
- *filename* = string : filename of csv file with exported points to load
"""
# store point information in purpose defined numpy record
self.ptype = np.dtype([('x', np.float32),
('y', np.float32),
('z', np.float32),
('formation', np.str_, 32)])
if kwds.has_key("filename"):
self.filename = kwds['filename']
# read data
self.parse()
self.get_formation_names()
self.get_range()
def parse(self):
"""Parse filename and load data into numpy record
The point information is stored in a purpose defined numpy record
self.points
"""
f = open(self.filename, "r")
lines = f.readlines()
self.header = lines[0]
# determine position of elements in header (for extension to foliations, etc.)
h_elem = np.array(self.header.rstrip().split(','))
x_id = np.where(h_elem == 'X')[0]
y_id = np.where(h_elem == 'Y')[0]
z_id = np.where(h_elem == 'Z')[0]
form_id = np.where(h_elem == 'formation')[0]
# print x_id
# create numpy array for points
self.len = (len(lines)-1)
self.points = np.ndarray(self.len, dtype = self.ptype)
for i,line in enumerate(lines[1:]):
l = line.rstrip().split(',')
self.points[i]['x'] = float(l[x_id])
self.points[i]['y'] = float(l[y_id])
self.points[i]['z'] = float(l[z_id])
self.points[i]['formation'] = l[form_id]
def get_formation_names(self):
"""Get names of all formations that have a point in this data set
and store in:
self.formation_names
"""
# self.formation_names = np.unique(self.formations)
self.formation_names = np.unique(self.points[:]['formation'])
def get_range(self):
"""Update min, max for all coordinate axes and store in
self.xmin, self.xmax, ..."""
self.xmin = np.min(self.points['x'])
self.ymin = np.min(self.points['y'])
self.zmin = np.min(self.points['z'])
self.xmax = np.max(self.points['x'])
self.ymax = np.max(self.points['y'])
self.zmax = np.max(self.points['z'])
def create_formation_subset(self, formation_names):
"""Create a subset (as another Struct3DPoints object) with specified formations only
**Arguments**:
- *formation_names* : list of formation names
**Returns**:
Struct3DPoints object with subset of points
"""
# create new object
# reference to own class type for consistency with Struct3DFoliations
pts_subset = self.__class__()
# determine ids for all points of these formations:
ids = np.ndarray((self.len), dtype='bool')
ids[:] = False
if type(formation_names) == list:
for formation in formation_names:
ids[self.points['formation'] == formation] = True
else:
ids[self.points['formation'] == formation_names] = True
# new length is identical to sum of ids bool array (all True elements)
pts_subset.len = np.sum(ids)
# extract points
pts_subset.points = self.points[ids]
# update range
pts_subset.get_range()
# update formation names
pts_subset.get_formation_names()
# get header from original
pts_subset.header = self.header
return pts_subset
def remove_formations(self, formation_names):
"""Remove points for specified formations from the point set
This function can be useful, for example, to remove one formation, perform
a thinning operation, and then add it back in with the `combine_with` function.
**Arguments**:
- *formation_names* = list of formations to be removed (or a single string to
remove only one formation)
"""
# Note: implementation is very similar to create_formation_subset, only inverse
# and changes in original point set!
# determine ids for all points of these formations:
ids = np.ndarray((self.len), dtype='bool')
ids[:] = True
if type(formation_names) == list:
for formation in formation_names:
ids[self.points['formation'] == formation] = False
else:
ids[self.points['formation'] == formation_names] = False
self.len = np.sum(ids)
# extract points
self.points = self.points[ids]
# update range
self.get_range()
# update formation names
self.get_formation_names()
def rename_formations(self, rename_dict):
"""Rename formation according to assignments in dictionary
Mapping in dictionary is of the form:
old_name_1 : new_name_1, old_name_2 : new_name_2, ...
"""
for k,v in rename_dict.items():
print("Change name from %s to %s" % (k,v))
for p in self.points:
if p['formation'] == k: p['formation'] = v
# update formation names
self.get_formation_names()
def extract_range(self, **kwds):
"""Extract subset for defined ranges
Pass ranges as keywords: from_x, to_x, from_y, to_y, from_z, to_z
All not defined ranges are simply kept as before
**Returns**:
pts_subset : Struct3DPoints data subset
"""
from_x = kwds.get("from_x", self.xmin)
from_y = kwds.get("from_y", self.ymin)
from_z = kwds.get("from_z", self.zmin)
to_x = kwds.get("to_x", self.xmax)
to_y = kwds.get("to_y", self.ymax)
to_z = kwds.get("to_z", self.zmax)
# create new object
# pts_subset = Struct3DPoints()
pts_subset = self.__class__()
# determine ids for points in range
ids = np.ndarray((self.len), dtype='bool')
ids[:] = False
ids[(self.points['x'] >= from_x) *
(self.points['y'] >= from_y) *
(self.points['z'] >= from_z) *
(self.points['x'] <= to_x) *
(self.points['y'] <= to_y) *
(self.points['z'] <= to_z)] = True
# new length is identical to sum of ids bool array (all True elements)
pts_subset.len = np.sum(ids)
# extract points
pts_subset.points = self.points[ids]
# update range
pts_subset.get_range()
# update formation names
pts_subset.get_formation_names()
# get header from original
pts_subset.header = self.header
return pts_subset
def thin(self, nx, ny, nz, **kwds):
"""Thin data for one formations on grid with defined number of cells and store as subset
**Arguments**:
- *nx*, *ny*, *nz* = int : number of cells in each direction for thinning grid
The thinning is performed on a raster and not 'formation-aware',
following this simple procedure:
(1) Iterate through grid
(2) If multiple points for formation in this cell: thin
(3a) If thin: Select one point in cell at random and keep this one!
(3b) else: if one point in raneg, keep it!
Note: Thinning is performed for all formations, so make sure to create a subset
for a single formation first!
**Returns**:
pts_subset = Struct3DPoints : subset with thinned data for formation
"""
# DEVNOTE: This would be an awesome function to parallelise! Should be quite simple!
# first step: generate subset
# pts_subset = self.create_formation_subset([formation])
# create new pointset:
# reference to own class type for consistency with Struct3DFoliations
pts_subset = self.__class__()
# determine cell boundaries of subset for thinning:
delx = np.ones(nx) * (self.xmax - self.xmin) / nx
bound_x = self.xmin + np.cumsum(delx)
dely = np.ones(ny) * (self.ymax - self.ymin) / ny
bound_y = self.ymin + np.cumsum(dely)
delz = np.ones(nz) * (self.zmax - self.zmin) / nz
bound_z = self.zmin + np.cumsum(delz)
ids_to_keep = []
for i in range(nx-1):
for j in range(ny-1):
for k in range(nz-1):
# determin number of points in this cell
ids = np.ndarray((self.len), dtype='bool')
ids[:] = False
ids[(self.points['x'] > bound_x[i]) *
(self.points['y'] > bound_y[j]) *
(self.points['z'] > bound_z[k]) *
(self.points['x'] < bound_x[i+1]) *
(self.points['y'] < bound_y[j+1]) *
(self.points['z'] < bound_z[k+1])] = True
if np.sum(ids) > 1:
# Thinning required!
# keep random point
ids_to_keep.append(numpy.random.choice(np.where(ids)[0]))
# pts_subset.points[nx * ny * i + ny * j + k] = self.points[id_to_keep]
# assign to new pointset:
elif np.sum(ids) == 1:
# keep the one point, of course!
# pts_subset.points[nx * ny * i + ny * j + k] = self.points[ids[0]]
ids_to_keep.append(ids[0])
# now get points for all those ids:
# extract points
pts_subset.points = self.points[np.array(ids_to_keep)]
# update range
pts_subset.get_range()
# update length
pts_subset.len = len(pts_subset.points)
# update formation names
pts_subset.get_formation_names()
# get header from original
pts_subset.header = self.header
return pts_subset
def combine_with(self, pts_set):
"""Combine this point set with another point set
**Arguments**:
- *pts_set* = Struct3DPoints : points set to combine
"""
self.points = np.concatenate((self.points, pts_set.points))
# update range and everything
self.get_range()
self.get_formation_names()
self.len = len(self.points)
def plot_plane(self, plane=('x','y'), **kwds):
"""Create 2-D plots for point distribution
**Arguments**:
- *plane* = tuple of plane axes directions, e.g. ('x','y') (default)
**Optional Keywords**:
- *ax* = matplotlib axis object: if provided, plot is attached to this axis
- *formation_names* = list of formations : plot only points for specific formations
"""
color = kwds.get("color", 'b')
if kwds.has_key("ax"):
# axis is provided, attach here
ax = kwds['ax']
else:
fig = plt.figure()
ax = fig.add_subplot(111)
if kwds.has_key("formation_names"):
pts_subset = self.create_formation_subset(kwds['formation_names'])
ax.plot(pts_subset.points[:][plane[0]], pts_subset.points[:][plane[1]], '.', color = color)
else:
ax.plot(self.points[:][plane[0]], self.points[:][plane[1]], '.', color = color)
def plot_3D(self, **kwds):
"""Create a plot of points in 3-D
**Optional keywords**:
- *ax* = matplotlib axis object: if provided, plot is attached to this axis
- *formation_names* = list of formations : plot only points for specific formations
"""
if kwds.has_key("ax"):
# axis is provided, attach here
ax = kwds['ax']
else:
fig = plt.figure(figsize = (10,8))
ax = fig.add_subplot(111, projection='3d')
if kwds.has_key("formation_names"):
# create a subset with speficied formations, only
pts_subset = self.create_formation_subset(kwds['formation_names'])
pts_subset.plot_3D(ax = ax)
else:
# plot all
ax.scatter(self.points['x'], self.points['y'], self.points['z'])
def save(self, filename):
"""Save points set to file
**Arguments**:
- *filename* = string : name of new file
"""
f = open(filename, 'w')
f.write(self.header)
for point in self.points:
f.write("%.2f,%.2f,%.3f,%s\n" % (point['x'], point['y'], point['z'], point['formation']))
f.close()
class Struct3DFoliations(Struct3DPoints):
"""Class container for foliations (i.e. orientations) exported from GeoModeller
Mainly based on Struct3DPoints as must required functionality
for location of elements - some functions overwritten, e.g. save and parse to read orientation data,
as well!
However, further methods might be added or adapted in the future, for example:
- downsampling according to (eigen)vector methods, e.g. the work from the Monash guys, etc.
- ploting of orientations in 2-D and 3-D
"""
def __init__(self, **kwds):
"""Structural points data set
**Optional keywords**:
- *filename* = string : filename of csv file with exported points to load
"""
# store point information in purpose defined numpy record
self.ftype = np.dtype([('x', np.float32),
('y', np.float32),
('z', np.float32),
('azimuth', np.float32),
('dip', np.float32),
('polarity', np.int),
('formation', np.str_, 32)])
if kwds.has_key("filename"):
self.filename = kwds['filename']
# read data
self.parse()
self.get_formation_names()
self.get_range()
def parse(self):
"""Parse filename and load data into numpy record
The point information is stored in a purpose defined numpy record
self.points
"""
f = open(self.filename, "r")
lines = f.readlines()
self.header = lines[0]
# determine position of elements in header (for extension to foliations, etc.)
h_elem = np.array(self.header.rstrip().split(','))
x_id = np.where(h_elem == 'X')[0]
y_id = np.where(h_elem == 'Y')[0]
z_id = np.where(h_elem == 'Z')[0]
azi_id = np.where(h_elem == 'azimuth')[0]
dip_id = np.where(h_elem == 'dip')[0]
pol_id = np.where(h_elem == 'polarity')[0]
form_id = np.where(h_elem == 'formation')[0]
# print x_id
# create numpy array for points
self.len = (len(lines)-1)
self.points = np.ndarray(self.len, dtype = self.ftype)
for i,line in enumerate(lines[1:]):
l = line.rstrip().split(',')
self.points[i]['x'] = float(l[x_id])
self.points[i]['y'] = float(l[y_id])
self.points[i]['z'] = float(l[z_id])
self.points[i]['azimuth'] = float(l[azi_id])
self.points[i]['dip'] = float(l[dip_id])
self.points[i]['polarity'] = float(l[pol_id])
self.points[i]['formation'] = l[form_id]
def save(self, filename):
"""Save points set to file
**Arguments**:
- *filename* = string : name of new file
"""
f = open(filename, 'w')
f.write(self.header)
for point in self.points:
f.write("%.2f,%.2f,%.3f,%.3f,%.3f,%d,%s\n" % (point['x'], point['y'], point['z'],
point['azimuth'], point['dip'], point['polarity'],
point['formation']))
f.close()
if __name__ == '__main__':
pass
| mit |
kubeflow/kfserving | python/kfserving/kfserving/models/v1beta1_model_spec.py | 1 | 6068 | # Copyright 2020 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
KFServing
Python SDK for KFServing # noqa: E501
The version of the OpenAPI document: v0.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kfserving.configuration import Configuration
class V1beta1ModelSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'framework': 'str',
'memory': 'ResourceQuantity',
'storage_uri': 'str'
}
attribute_map = {
'framework': 'framework',
'memory': 'memory',
'storage_uri': 'storageUri'
}
def __init__(self, framework=None, memory=None, storage_uri=None, local_vars_configuration=None): # noqa: E501
"""V1beta1ModelSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._framework = None
self._memory = None
self._storage_uri = None
self.discriminator = None
self.framework = framework
if memory is not None:
self.memory = memory
self.storage_uri = storage_uri
@property
def framework(self):
"""Gets the framework of this V1beta1ModelSpec. # noqa: E501
Machine Learning <framework name> The values could be: \"tensorflow\",\"pytorch\",\"sklearn\",\"onnx\",\"xgboost\", \"myawesomeinternalframework\" etc. # noqa: E501
:return: The framework of this V1beta1ModelSpec. # noqa: E501
:rtype: str
"""
return self._framework
@framework.setter
def framework(self, framework):
"""Sets the framework of this V1beta1ModelSpec.
Machine Learning <framework name> The values could be: \"tensorflow\",\"pytorch\",\"sklearn\",\"onnx\",\"xgboost\", \"myawesomeinternalframework\" etc. # noqa: E501
:param framework: The framework of this V1beta1ModelSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and framework is None: # noqa: E501
raise ValueError("Invalid value for `framework`, must not be `None`") # noqa: E501
self._framework = framework
@property
def memory(self):
"""Gets the memory of this V1beta1ModelSpec. # noqa: E501
:return: The memory of this V1beta1ModelSpec. # noqa: E501
:rtype: ResourceQuantity
"""
return self._memory
@memory.setter
def memory(self, memory):
"""Sets the memory of this V1beta1ModelSpec.
:param memory: The memory of this V1beta1ModelSpec. # noqa: E501
:type: ResourceQuantity
"""
self._memory = memory
@property
def storage_uri(self):
"""Gets the storage_uri of this V1beta1ModelSpec. # noqa: E501
Storage URI for the model repository # noqa: E501
:return: The storage_uri of this V1beta1ModelSpec. # noqa: E501
:rtype: str
"""
return self._storage_uri
@storage_uri.setter
def storage_uri(self, storage_uri):
"""Sets the storage_uri of this V1beta1ModelSpec.
Storage URI for the model repository # noqa: E501
:param storage_uri: The storage_uri of this V1beta1ModelSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and storage_uri is None: # noqa: E501
raise ValueError("Invalid value for `storage_uri`, must not be `None`") # noqa: E501
self._storage_uri = storage_uri
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1ModelSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1ModelSpec):
return True
return self.to_dict() != other.to_dict()
| apache-2.0 |
ZENGXH/scikit-learn | sklearn/datasets/twenty_newsgroups.py | 72 | 13586 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_tfidf` function will in addition do a simple tf-idf
vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warning("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warning("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
with open(archive_path, 'wb') as f:
f.write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = os.path.join(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = os.path.join(data_home, filebase + ".pk")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| bsd-3-clause |
DistrictDataLabs/yellowbrick | tests/test_features/test_pca.py | 1 | 12681 | # -*- coding: utf-8 -*-
# tests.test_features.test_pca
# Tests for the PCA based feature visualizer.
#
# Author: Carlo Morales
# Author: Raúl Peralta Lozada
# Author: Benjamin Bengfort
# Created: Tue May 23 18:34:27 2017 -0400
#
# Copyright (C) 2017 The scikit-yb developers.
# For license information, see LICENSE.txt
#
# ID: test_pca.py [] [email protected] $
"""
Tests for the PCA based feature visualizer.
"""
##########################################################################
## Imports
##########################################################################
import sys
import pytest
import numpy as np
import numpy.testing as npt
from unittest import mock
from tests.base import VisualTestCase, IS_WINDOWS_OR_CONDA
from yellowbrick.features.pca import *
from yellowbrick.exceptions import YellowbrickError, NotFitted
# Note: this can be removed when we deprecate mpl in #826
try:
# Only available in Matplotlib >= 2.0.2
from mpl_toolkits.axes_grid1 import make_axes_locatable
except ImportError:
make_axes_locatable = None
##########################################################################
# PCA Tests
##########################################################################
@pytest.mark.usefixtures("discrete", "continuous")
class TestPCA(VisualTestCase):
"""
Test the PCA visualizer
"""
def test_single(self):
"""
Test single target.
"""
visualizer = PCA(random_state=1998)
visualizer.fit(self.continuous.X)
visualizer.transform(self.continuous.X)
assert not hasattr(visualizer, "classes_")
assert not hasattr(visualizer, "range_")
self.assert_images_similar(visualizer)
@pytest.mark.xfail(IS_WINDOWS_OR_CONDA, reason="RMS of 10.205 on miniconda")
def test_continuous(self):
"""
Test continuous target
"""
visualizer = PCA(colormap="YlOrRd", random_state=2019)
assert not hasattr(visualizer, "range_")
visualizer.fit(*self.continuous)
visualizer.transform(*self.continuous)
assert hasattr(visualizer, "range_")
assert not hasattr(visualizer, "classes_")
visualizer.finalize()
visualizer.cax.set_yticklabels([])
# AppVeyor tests fail with RMS 10.085
self.assert_images_similar(visualizer, windows_tol=10.5)
def test_discrete(self):
"""
Test discrete target.
"""
classes = ["a", "b", "c", "d", "e"]
colors = ["r", "b", "g", "m", "c"]
visualizer = PCA(colors=colors, classes=classes, random_state=83)
assert not hasattr(visualizer, "classes_")
visualizer.fit(*self.discrete)
assert hasattr(visualizer, "classes_")
assert not hasattr(visualizer, "range_")
visualizer.transform(*self.discrete)
# Make sure that classes are set correctly.
npt.assert_array_equal(visualizer.classes_, classes)
self.assert_images_similar(visualizer)
def test_fit(self):
"""
Test that fit returns self.
"""
pca = PCA()
assert pca.fit(*self.discrete) is pca
@pytest.mark.parametrize("n_components", [2, 3])
def test_transform(self, n_components):
Xprime = PCA(projection=n_components).fit_transform(*self.continuous)
assert Xprime.shape == (500, n_components)
def test_transform_without_fit(self):
"""
Test that appropriate error is raised when transform called without fit.
"""
oz = PCA(projection=3)
msg = "instance is not fitted yet, please call fit"
with pytest.raises(NotFitted, match=msg):
oz.transform(*self.continuous)
@pytest.mark.xfail(IS_WINDOWS_OR_CONDA, reason="RMS of 12.115 on miniconda")
def test_pca_decomposition_quick_method(self):
"""
Test the quick method PCA visualizer
"""
visualizer = pca_decomposition(
*self.discrete, projection=2, scale=True, random_state=28, show=False
)
# AppVeyor tests fail with RMS 12.115
self.assert_images_similar(visualizer, windows_tol=12.5)
def test_scale_true_2d(self):
"""
Test the PCA visualizer 2 dimensions scaled.
"""
params = {"scale": True, "projection": 2, "random_state": 9932}
visualizer = PCA(**params).fit(*self.discrete)
pca_array = visualizer.transform(*self.discrete)
# Image comparison tests
self.assert_images_similar(visualizer)
# Assert PCA transformation occurred successfully
assert pca_array.shape == (self.discrete.X.shape[0], 2)
@pytest.mark.xfail(IS_WINDOWS_OR_CONDA, reason="RMS of 8.828 on miniconda")
def test_scale_false_2d(self):
"""
Test the PCA visualizer 2 dimensions non-scaled.
"""
params = {"scale": False, "projection": 2, "random_state": 1229}
visualizer = PCA(**params).fit(*self.continuous)
pca_array = visualizer.transform(*self.continuous)
visualizer.finalize()
visualizer.cax.set_yticklabels([])
# Image comparison tests
# AppVeyor tests fail with RMS 8.180
self.assert_images_similar(visualizer, tol=0.03, windows_tol=8.5)
# Assert PCA transformation occurred successfully
assert pca_array.shape == (self.continuous.X.shape[0], 2)
def test_biplot_2d(self):
"""
Test the PCA 2D biplot (proj_features).
"""
params = {
"features": list("ABCDEFGHIKLM"),
"random_state": 67,
"proj_features": True,
"projection": 2,
}
visualizer = PCA(**params).fit(self.discrete.X)
pca_array = visualizer.transform(self.discrete.X)
# Image comparison tests
self.assert_images_similar(visualizer, tol=5)
# Assert PCA transformation occurred successfully
assert pca_array.shape == (self.discrete.X.shape[0], 2)
def test_scale_true_3d(self):
"""
Test the PCA visualizer 3 dimensions scaled.
"""
params = {"scale": True, "projection": 3, "random_state": 7382}
visualizer = PCA(**params).fit(self.discrete.X)
pca_array = visualizer.transform(self.discrete.X)
# Image comparison tests
self.assert_images_similar(visualizer)
# Assert PCA transformation occurred successfully
assert pca_array.shape == (self.discrete.X.shape[0], 3)
def test_scale_false_3d(self):
"""
Test the PCA visualizer 3 dimensions non-scaled.
"""
params = {"scale": False, "projection": 3, "random_state": 98}
visualizer = PCA(**params).fit(self.discrete.X)
pca_array = visualizer.transform(self.discrete.X)
# Image comparison tests
self.assert_images_similar(visualizer)
# Assert PCA transformation occurred successfully
assert pca_array.shape == (self.discrete.X.shape[0], 3)
@pytest.mark.xfail(
sys.platform == "win32", reason="images not close on windows (RMSE=3)"
)
def test_biplot_3d(self):
"""
Test the PCA 3D biplot (proj_features).
"""
params = {
"features": list("ABCDEFGHIKLM"),
"random_state": 800,
"proj_features": True,
"projection": 3,
}
visualizer = PCA(**params).fit(*self.discrete)
pca_array = visualizer.transform(*self.discrete)
# Image comparison tests
self.assert_images_similar(visualizer, tol=5)
# Assert PCA transformation occurred successfully
assert pca_array.shape == (self.discrete.X.shape[0], 3)
def test_scale_true_4d_exception(self):
"""
Test PCA visualizer 4 dimensions scaled (catch YellowbrickError).
"""
params = {"scale": True, "projection": 4}
msg = "Projection dimensions must be either 2 or 3"
with pytest.raises(YellowbrickError, match=msg):
PCA(**params)
def test_scale_true_3d_exception(self):
"""
Test PCA visualizer 3 dims scaled on 2 dim data set (catch ValueError).
"""
X = np.random.normal(loc=2, size=(100, 2))
params = {"scale": True, "projection": 3}
e = r"n_components=3 must be between 0 and min\(n_samples, n_features\)=2"
with pytest.raises(ValueError, match=e):
pca = PCA(**params)
pca.fit(X)
@mock.patch("yellowbrick.features.pca.plt.sca", autospec=True)
def test_alpha_param(self, mock_sca):
"""
Test that the user can supply an alpha param on instantiation
"""
# Instantiate a prediction error plot, provide custom alpha
params = {"alpha": 0.3, "projection": 2, "random_state": 9932}
visualizer = PCA(**params).fit(self.discrete.X)
pca_array = visualizer.transform(self.discrete.X)
assert visualizer.alpha == 0.3
visualizer.ax = mock.MagicMock()
visualizer.fit(self.discrete.X)
visualizer.transform(self.discrete.X)
# Test that alpha was passed to internal matplotlib scatterplot
_, scatter_kwargs = visualizer.ax.scatter.call_args
assert "alpha" in scatter_kwargs
assert scatter_kwargs["alpha"] == 0.3
assert pca_array.shape == (self.discrete.X.shape[0], 2)
@pytest.mark.xfail(IS_WINDOWS_OR_CONDA, reason="RMS of 7.332 on miniconda")
def test_colorbar(self):
"""
Test the PCA visualizer's colorbar features.
"""
params = {
"scale": True,
"projection": 2,
"random_state": 7382,
"color": self.discrete.y,
"colorbar": True,
}
visualizer = PCA(**params).fit(*self.continuous)
visualizer.transform(self.continuous.X, self.continuous.y)
visualizer.finalize()
visualizer.cax.set_yticklabels([])
# Image comparison tests
# AppVeyor tests fail with RMS of 7.280
self.assert_images_similar(visualizer, windows_tol=7.5)
@pytest.mark.xfail(IS_WINDOWS_OR_CONDA, reason="RMS of 14.515 on miniconda")
def test_heatmap(self):
"""
Test the PCA visualizer's heatmap features.
"""
params = {
"scale": True,
"projection": 2,
"random_state": 7382,
"color": self.discrete.y,
"heatmap": True,
}
visualizer = PCA(**params).fit(self.discrete.X, self.discrete.y)
visualizer.transform(self.discrete.X, self.discrete.y)
visualizer.finalize()
# TODO: manually modifying ticks should be removed after #916 is fixed
visualizer.lax.set_xticks([])
visualizer.lax.set_yticks([])
visualizer.lax.set_xticks([], minor=True)
visualizer.uax.set_xticklabels([])
# Image comparison tests
# AppVeyor tests fail with RMS 14.492
self.assert_images_similar(visualizer, windows_tol=14.5)
@pytest.mark.xfail(IS_WINDOWS_OR_CONDA, reason="RMS of 10.987 on miniconda")
def test_colorbar_heatmap(self):
"""
Test the PCA visualizer with both colorbar and heatmap.
"""
params = {
"scale": True,
"projection": 2,
"random_state": 7382,
"color": self.discrete.y,
"colorbar": True,
"heatmap": True,
}
visualizer = PCA(**params).fit(self.continuous.X, self.continuous.y)
visualizer.transform(self.continuous.X, self.continuous.y)
visualizer.finalize()
# TODO: manually modifying ticks should be removed after #916 is fixed
visualizer.lax.set_xticks([])
visualizer.lax.set_yticks([])
visualizer.lax.set_xticks([], minor=True)
visualizer.uax.set_xticklabels([])
visualizer.cax.set_yticklabels([])
# Image comparison tests
# AppVeyor tests fail with RMS 10.331
self.assert_images_similar(visualizer, windows_tol=10.5)
def test_3d_heatmap_enabled_error(self):
"""
Assert an exception if colorbar and heatmap is enabled with 3-dimensions
"""
with pytest.raises(YellowbrickValueError):
PCA(projection=3, heatmap=True)
@pytest.mark.skipif(
make_axes_locatable is not None, reason="requires matplotlib <= 2.0.1"
)
def test_matplotlib_version_error():
"""
Assert an exception is raised with incompatible matplotlib versions
"""
with pytest.raises(YellowbrickValueError):
PCA(colorbar=True, heatmap=True)
| apache-2.0 |
pkonarzewski/data-processors | tests/test_generic.py | 1 | 2637 | # -- coding: utf-8 -*-
import unittest
from pandas import Series, DataFrame
from numpy import NaN
import pandas.util.testing as pdtest
from processors import generic
class TestBroadcast(unittest.TestCase):
def setUp(self):
self.s1 = Series([1, 2, 3])
self.s2 = Series([3, 2, 1])
self.s3 = Series([6, 6])
self.s4 = Series([10])
def test_broadcast_short_long(self):
pdtest.assert_series_equal(generic.broadcast_to(self.s4, self.s1),
Series([10, 10, 10]),
check_names=False)
def test_broadcast_same_len(self):
pdtest.assert_series_equal(generic.broadcast_to(self.s1, self.s2),
self.s1,
check_names=False)
def test_broadcast_incorect_shape(self):
with self.assertRaises(ValueError):
generic.broadcast_to(self.s3, self.s1)
class TestAlignSeries(unittest.TestCase):
def setUp(self):
self.s1 = Series(['a', 'b', 'c'])
self.s2 = Series(['d'])
self.s3 = Series(['z', 'x', 'y'])
self.s4 = Series(['u', 'u'])
def test_align_brodcast_right(self):
ns1, ns2 = generic.align_series(self.s1, self.s2)
assert len(ns1) == len(ns2), 'diff length'
pdtest.assert_series_equal(ns2, Series(['d', 'd', 'd']),
check_names=False)
def test_align_brodcast_left(self):
ns2, ns3 = generic.align_series(self.s2, self.s3)
assert len(ns2) == len(ns3), 'diff length'
pdtest.assert_series_equal(ns2, Series(['d', 'd', 'd']),
check_names=False)
def test_align_same_len(self):
ns1, ns3 = generic.align_series(self.s1, self.s3)
assert len(ns1) == len(ns3), 'diff length'
pdtest.assert_series_equal(ns1, self.s1, check_names=False)
pdtest.assert_series_equal(ns3, self.s3, check_names=False)
def test_invalid_align(self):
with self.assertRaises(ValueError):
generic.align_series(self.s3, self.s4)
class TestSplitNumbersFunc(unittest.TestCase):
def test_split_correct(self):
tested = Series(['123', '456', '789', '12', '1234'])
result = DataFrame([(1., 2., 3.),
(4., 5., 6.),
(7., 8., 9.),
(NaN, NaN, NaN),
(NaN, NaN, NaN)], columns=[0, 1, 2])
pdtest.assert_frame_equal(
generic.split_numbers_to_columns(tested, r'^(\d)(\d)(\d)$'),
result
)
| mit |
Ziqi-Li/bknqgis | geopandas/geopandas/tests/test_dissolve.py | 1 | 3126 | from __future__ import absolute_import
import tempfile
import shutil
import numpy as np
from shapely.geometry import Point
import geopandas
from geopandas import GeoDataFrame, read_file
from geopandas.tools import overlay
from .util import unittest
from pandas.util.testing import assert_frame_equal
from pandas import Index
import pandas as pd
class TestDataFrame(unittest.TestCase):
def setUp(self):
nybb_filename = geopandas.datasets.get_path('nybb')
self.polydf = read_file(nybb_filename)
self.polydf = self.polydf[['geometry', 'BoroName', 'BoroCode']]
self.polydf = self.polydf.rename(columns={'geometry':'myshapes'})
self.polydf = self.polydf.set_geometry('myshapes')
self.polydf['manhattan_bronx'] = 5
self.polydf.loc[3:4,'manhattan_bronx']=6
# Merged geometry
manhattan_bronx = self.polydf.loc[3:4,]
others = self.polydf.loc[0:2,]
collapsed = [others.geometry.unary_union, manhattan_bronx.geometry.unary_union]
merged_shapes = GeoDataFrame({'myshapes': collapsed}, geometry='myshapes',
index=Index([5,6], name='manhattan_bronx'))
# Different expected results
self.first = merged_shapes.copy()
self.first['BoroName'] = ['Staten Island', 'Manhattan']
self.first['BoroCode'] = [5, 1]
self.mean = merged_shapes.copy()
self.mean['BoroCode'] = [4,1.5]
def test_geom_dissolve(self):
test = self.polydf.dissolve('manhattan_bronx')
self.assertTrue(test.geometry.name == 'myshapes')
self.assertTrue(test.geom_almost_equals(self.first).all())
def test_dissolve_retains_existing_crs(self):
assert self.polydf.crs is not None
test = self.polydf.dissolve('manhattan_bronx')
assert test.crs is not None
def test_dissolve_retains_nonexisting_crs(self):
self.polydf.crs = None
test = self.polydf.dissolve('manhattan_bronx')
assert test.crs is None
def test_first_dissolve(self):
test = self.polydf.dissolve('manhattan_bronx')
assert_frame_equal(self.first, test, check_column_type=False)
def test_mean_dissolve(self):
test = self.polydf.dissolve('manhattan_bronx', aggfunc='mean')
assert_frame_equal(self.mean, test, check_column_type=False)
test = self.polydf.dissolve('manhattan_bronx', aggfunc=np.mean)
assert_frame_equal(self.mean, test, check_column_type=False)
def test_multicolumn_dissolve(self):
multi = self.polydf.copy()
multi['dup_col'] = multi.manhattan_bronx
multi_test = multi.dissolve(['manhattan_bronx', 'dup_col'], aggfunc='first')
first = self.first.copy()
first['dup_col'] = first.index
first = first.set_index([first.index, 'dup_col'])
assert_frame_equal(multi_test, first, check_column_type=False)
def test_reset_index(self):
test = self.polydf.dissolve('manhattan_bronx', as_index=False)
comparison = self.first.reset_index()
assert_frame_equal(comparison, test, check_column_type=False)
| gpl-2.0 |
xiaoxiamii/scikit-learn | sklearn/feature_extraction/text.py | 110 | 50157 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
vamsirajendra/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/__init__.py | 72 | 2225 |
import matplotlib
import inspect
import warnings
# ipython relies on interactive_bk being defined here
from matplotlib.rcsetup import interactive_bk
__all__ = ['backend','show','draw_if_interactive',
'new_figure_manager', 'backend_version']
backend = matplotlib.get_backend() # validates, to match all_backends
def pylab_setup():
'return new_figure_manager, draw_if_interactive and show for pylab'
# Import the requested backend into a generic module object
if backend.startswith('module://'):
backend_name = backend[9:]
else:
backend_name = 'backend_'+backend
backend_name = backend_name.lower() # until we banish mixed case
backend_name = 'matplotlib.backends.%s'%backend_name.lower()
backend_mod = __import__(backend_name,
globals(),locals(),[backend_name])
# Things we pull in from all backends
new_figure_manager = backend_mod.new_figure_manager
# image backends like pdf, agg or svg do not need to do anything
# for "show" or "draw_if_interactive", so if they are not defined
# by the backend, just do nothing
def do_nothing_show(*args, **kwargs):
frame = inspect.currentframe()
fname = frame.f_back.f_code.co_filename
if fname in ('<stdin>', '<ipython console>'):
warnings.warn("""
Your currently selected backend, '%s' does not support show().
Please select a GUI backend in your matplotlibrc file ('%s')
or with matplotlib.use()""" %
(backend, matplotlib.matplotlib_fname()))
def do_nothing(*args, **kwargs): pass
backend_version = getattr(backend_mod,'backend_version', 'unknown')
show = getattr(backend_mod, 'show', do_nothing_show)
draw_if_interactive = getattr(backend_mod, 'draw_if_interactive', do_nothing)
# Additional imports which only happen for certain backends. This section
# should probably disappear once all backends are uniform.
if backend.lower() in ['wx','wxagg']:
Toolbar = backend_mod.Toolbar
__all__.append('Toolbar')
matplotlib.verbose.report('backend %s version %s' % (backend,backend_version))
return new_figure_manager, draw_if_interactive, show
| agpl-3.0 |
citp/BlockSci | blockscipy/blocksci/__init__.py | 1 | 32553 | # -*- coding: utf-8 -*-
"""BlockSci Module
BlockSci enables fast and expressive analysis of Bitcoin’s and many
other blockchains.
"""
import tempfile
import importlib
import subprocess
import sys
import os
import logging
import inspect
import copy
import io
import re
import heapq
import operator
import time
from functools import reduce
import psutil
from multiprocess import Pool
import dateparser
from dateutil.relativedelta import relativedelta
import pandas as pd
from ._blocksci import *
from ._blocksci import _traverse
from .currency import *
from .blockchain_info import *
from .opreturn import label_application
from .pickler import *
VERSION = "0.7.0"
sys.modules['blocksci.proxy'] = proxy
sys.modules['blocksci.cluster'] = cluster
sys.modules['blocksci.heuristics'] = heuristics
sys.modules['blocksci.heuristics.change'] = heuristics.change
class _NoDefault:
def __repr__(self):
return '(no default)'
MISSING_PARAM = _NoDefault()
# Alert the user if the disk space is getting full
disk_info = os.statvfs("/")
free_space = (disk_info.f_frsize * disk_info.f_bavail) // (1024**3)
if free_space < 20:
logger = logging.getLogger()
logger.warning("Warning: You only have {}GB of free disk space left. Running out of disk space may crash the parser and corrupt the BlockSci data files.".format(free_space))
# UTC time zone is recommended
if time.tzname != ('UTC', 'UTC'):
logger = logging.getLogger()
logger.warning("Warning: Your system is set to a timezone other than UTC, leading to inconsistencies between datetime objects (which are adjusted to your local timezone) and datetime64 timestamps returned by iterators and ranges, or the fluent interface (which use UTC).")
def mapreduce_block_ranges(chain, map_func, reduce_func, init=MISSING_PARAM, start=None, end=None, cpu_count=psutil.cpu_count()):
"""Initialized multithreaded map reduce function over a stream of block ranges
"""
if start is None:
start = 0
if end is None:
end = len(chain)
elif isinstance(start, str):
blocks = chain.range(start, end)
start = blocks[0].height
end = blocks[-1].height
if cpu_count == 1:
return mapFunc(chain[start:end])
raw_segments = chain._segment_indexes(start, end, cpu_count)
segments = [(raw_segment, chain.config_location, len(chain)) for raw_segment in raw_segments]
def real_map_func(input):
local_chain = Blockchain(input[1], input[2])
file = io.BytesIO()
pickler = Pickler(file)
mapped = map_func(local_chain[input[0][0]:input[0][1]])
pickler.dump(mapped)
file.seek(0)
return file
with Pool(cpu_count - 1) as p:
results_future = p.map_async(real_map_func, segments[1:])
first = map_func(chain[raw_segments[0][0]:raw_segments[0][1]])
results = results_future.get()
results = [Unpickler(res, chain).load() for res in results]
results.insert(0, first)
if isinstance(init, type(MISSING_PARAM)):
return reduce(reduce_func, results)
else:
return reduce(reduce_func, results, init)
def mapreduce_blocks(chain, map_func, reduce_func, init=MISSING_PARAM, start=None, end=None, cpu_count=psutil.cpu_count()):
"""Initialized multithreaded map reduce function over a stream of blocks
"""
def map_range_func(blocks):
if isinstance(init, type(MISSING_PARAM)):
return reduce(reduce_func, (map_func(block) for block in blocks))
else:
return reduce(
reduce_func,
(map_func(block) for block in blocks),
copy.deepcopy(init)
)
return mapreduce_block_ranges(
chain,
map_range_func,
reduce_func,
init,
start,
end,
cpu_count
)
def mapreduce_txes(chain, map_func, reduce_func, init=MISSING_PARAM, start=None, end=None, cpu_count=psutil.cpu_count()):
"""Initialized multithreaded map reduce function over a stream of transactions
"""
def map_range_func(blocks):
if isinstance(init, type(MISSING_PARAM)):
return reduce(
reduce_func,
(map_func(tx) for block in blocks for tx in block)
)
else:
return reduce(
reduce_func,
(map_func(tx) for block in blocks for tx in block),
copy.deepcopy(init)
)
return mapreduce_block_ranges(
chain,
map_range_func,
reduce_func,
init,
start,
end,
cpu_count
)
def map_blocks(self, block_func, start=None, end=None, cpu_count=psutil.cpu_count()):
"""Runs the given function over each block in range and returns a list of the results
"""
def map_func(blocks):
return [block_func(block) for block in blocks]
def reduce_func(accum, new_val):
accum.extend(new_val)
return accum
return mapreduce_block_ranges(
self,
map_func,
reduce_func,
MISSING_PARAM,
start,
end,
cpu_count
)
def filter_blocks(
self, filter_func, start=None, end=None, cpu_count=psutil.cpu_count()
):
"""Return all blocks in range which match the given criteria
"""
def map_func(blocks):
return blocks.where(filter_func).to_list()
def reduce_func(accum, new_val):
accum.extend(new_val)
return accum
return mapreduce_block_ranges(
self, map_func, reduce_func, MISSING_PARAM, start, end, cpu_count=cpu_count
)
def filter_blocks_legacy(
self, filter_func, start=None, end=None, cpu_count=psutil.cpu_count()
):
"""Return all blocks in range which match the given criteria
"""
def map_func(blocks):
return [block for block in blocks if filter_func(block)]
def reduce_func(accum, new_val):
accum.extend(new_val)
return accum
return mapreduce_block_ranges(
self, map_func, reduce_func, MISSING_PARAM, start, end, cpu_count=cpu_count
)
def filter_txes(self, filter_func, start=None, end=None, cpu_count=psutil.cpu_count()):
"""Return all transactions in range which match the given criteria
"""
def map_func(blocks):
return blocks.txes.where(filter_func).to_list()
def reduce_func(accum, new_val):
accum.extend(new_val)
return accum
return mapreduce_block_ranges(
self, map_func, reduce_func, MISSING_PARAM, start, end, cpu_count
)
def filter_txes_legacy(
self, filter_func, start=None, end=None, cpu_count=psutil.cpu_count()
):
"""Return all transactions in range which match the given criteria
"""
def map_func(blocks):
return [tx for block in blocks for tx in block if filter_func(tx)]
def reduce_func(accum, new_val):
accum.extend(new_val)
return accum
return mapreduce_block_ranges(
self, map_func, reduce_func, MISSING_PARAM, start, end, cpu_count
)
Blockchain.map_blocks = map_blocks
Blockchain.filter_blocks = filter_blocks
Blockchain.filter_blocks_legacy = filter_blocks_legacy
Blockchain.filter_txes = filter_txes
Blockchain.filter_txes_legacy = filter_txes_legacy
Blockchain.mapreduce_block_ranges = mapreduce_block_ranges
Blockchain.mapreduce_blocks = mapreduce_blocks
Blockchain.mapreduce_txes = mapreduce_txes
def heights_to_dates(self, df):
"""
Convert a pandas data frame with a block height index into a frame with a block time index
"""
return df.set_index(df.index.to_series().apply(lambda x: self[x].time))
def block_range(self, start, end=None) -> BlockRange:
"""
Return the range of blocks mined between the given dates
"""
if self.block_times is None:
self.block_times = pd.DataFrame([block.time for block in self], columns=["date"])
self.block_times["height"] = self.block_times.index
self.block_times.index = self.block_times["date"]
del self.block_times["date"]
start_date = pd.to_datetime(start)
if end is None:
res = dateparser.DateDataParser().get_date_data(start)
if res['period'] == 'month':
end = start_date + relativedelta(months=1)
elif res['period'] == 'day':
end = start_date + relativedelta(days=1)
elif res['period'] == 'year':
end = start_date + relativedelta(years=1)
else:
end = pd.to_datetime(end)
oldest = self.block_times[self.block_times.index >= start_date].iloc[0][0]
newest = self.block_times[self.block_times.index <= end].iloc[-1][0] + 1
return self[oldest:newest]
old_init = Blockchain.__init__
def new_init(self, loc, max_block=0):
if max_block == 0:
old_init(self, loc)
else:
old_init(self, loc, max_block)
self.block_times = None
ec2_instance_path = "/home/ubuntu/BlockSci/IS_EC2"
tx_heated_path = "/home/ubuntu/BlockSci/TX_DATA_HEATED"
scripts_heated_path = "/home/ubuntu/BlockSci/SCRIPT_DATA_HEATED"
index_heated_path = "/home/ubuntu/BlockSci/INDEX_DATA_HEATED"
if os.path.exists(ec2_instance_path):
if not os.path.exists(tx_heated_path):
print("Note: this appears to be a fresh instance. Transaction data has not yet been cached locally. Most queries might be slow. Caching is currently ongoing in the background, and usually takes 20 minutes.")
elif not os.path.exists(scripts_heated_path):
print("Note: this appears to be a fresh instance. Script data has not yet been cached locally. Some queries might be slow. Caching is currently ongoing in the background, and usually takes 1.5 hours.")
elif not os.path.exists(index_heated_path):
print("Note: this appears to be a fresh instance. Index data has not yet been cached locally. A few queries might be slow. Caching is currently ongoing in the background, and usually takes 3.5 hours.")
def most_valuable_addresses(self, nlargest=100):
current_address_vals = self.blocks.outputs.where(lambda o: ~o.is_spent) \
.group_by( \
lambda output: output.address, \
lambda outputs: outputs.value.sum \
)
return heapq.nlargest(nlargest, current_address_vals.items(), key=operator.itemgetter(1))
Blockchain.__init__ = new_init
Blockchain.range = block_range
Blockchain.heights_to_dates = heights_to_dates
Blockchain.most_valuable_addresses = most_valuable_addresses
def traverse(proxy_func, val):
return _traverse(proxy_func(val._self_proxy), val)
def apply_map(prox, prop):
if prop.ptype == proxy.proxy_type.optional:
return prox._map_optional(prop)
elif prop.ptype == proxy.proxy_type.iterator:
return prox._map_sequence(prop)
elif prop.ptype == proxy.proxy_type.range:
return prox._map_sequence(prop)
else:
return prox._map(prop)
def setup_optional_proxy_map_funcs():
def optional_map_func(r, func):
p = func(r.nested_proxy)
return r._map(p)
optional_cls = [x for x in dir(proxy) if 'Optional' in x and x[0].isupper()]
for cl in optional_cls:
getattr(proxy, cl).map = optional_map_func
def setup_sequence_map_funcs():
def range_map_func(r, func):
p = func(r._self_proxy.nested_proxy)
return apply_map(r._self_proxy, p)(r)
def range_where_func(r, func):
p = func(r._self_proxy.nested_proxy)
return r._self_proxy._where(p)(r)
def range_max_func(r, func):
p = func(r._self_proxy.nested_proxy)
return r._self_proxy._max(p)(r)
def range_min_func(r, func):
p = func(r._self_proxy.nested_proxy)
return r._self_proxy._min(p)(r)
def range_any_func(r, func):
p = func(r._self_proxy.nested_proxy)
return r._self_proxy._any(p)(r)
def range_all_func(r, func):
p = func(r._self_proxy.nested_proxy)
return r._self_proxy._all(p)(r)
def range_group_by_func(r, grouper_func, evaler_func):
grouper = grouper_func(r._self_proxy.nested_proxy)
evaler = evaler_func(r._self_proxy.nested_proxy.range_proxy)
return r._group_by(grouper, evaler)
iterator_and_range_cls = [x for x in globals() if ('Iterator' in x or 'Range' in x) and x[0].isupper()]
for cl in iterator_and_range_cls:
globals()[cl].map = range_map_func
globals()[cl].select = range_map_func
globals()[cl].where = range_where_func
globals()[cl].group_by = range_group_by_func
globals()[cl].max = range_max_func
globals()[cl].min = range_min_func
globals()[cl].any = range_any_func
globals()[cl].all = range_all_func
def setup_sequence_proxy_map_funcs():
def range_map_func(r, func):
p = func(r.nested_proxy)
return apply_map(r, p)
def range_where_func(r, func):
p = func(r.nested_proxy)
return r._where(p)
def range_max_func(r, func):
p = func(r.nested_proxy)
return r._max(p)
def range_min_func(r, func):
p = func(r.nested_proxy)
return r._min(p)
def range_any_func(r, func):
p = func(r.nested_proxy)
return r._any(p)
def range_all_func(r, func):
p = func(r.nested_proxy)
return r._all(p)
iterator_and_range_cls = [x for x in dir(proxy) if ('Iterator' in x or 'Range' in x) and x[0].isupper()]
for cl in iterator_and_range_cls:
getattr(proxy, cl).map = range_map_func
getattr(proxy, cl).select = range_map_func
getattr(proxy, cl).where = range_where_func
getattr(proxy, cl).max = range_max_func
getattr(proxy, cl).min = range_min_func
getattr(proxy, cl).any = range_any_func
getattr(proxy, cl).all = range_all_func
non_copying_methods = set(["ptype", "iterator_proxy", "range_proxy", "optional_proxy", "output_type_name"])
def _get_core_functions_methods(obj):
return (attr for attr in obj.__dict__ if
not attr[:2] == '__' and attr not in non_copying_methods and
not isinstance(getattr(obj, attr, None), property))
def _get_core_properties_methods(obj):
return (attr for attr in obj.__dict__ if attr not in non_copying_methods and
isinstance(getattr(obj, attr, None), property))
def _get_functions_methods(obj):
return (attr for attr in dir(obj) if
not attr[:2] == '__' and attr not in non_copying_methods and
not isinstance(getattr(obj, attr, None), property))
def _get_properties_methods(obj):
return (attr for attr in dir(obj) if attr not in non_copying_methods and
isinstance(getattr(obj, attr, None), property))
# https://gist.github.com/carlsmith/b2e6ba538ca6f58689b4c18f46fef11c
def replace(string, substitutions):
substrings = sorted(substitutions, key=len, reverse=True)
regex = re.compile('|'.join(map(re.escape, substrings)))
return regex.sub(lambda match: substitutions[match.group(0)], string)
def fix_all_doc_def(doc):
doc = replace(doc, {
"blocksci.proxy.intIteratorProxy": "numpy.ndarray[int]",
"blocksci.proxy.intRangeProxy": "numpy.ndarray[int]",
"blocksci.proxy.boolIteratorProxy": "numpy.ndarray[bool]",
"blocksci.proxy.boolRangeProxy": "numpy.ndarray[bool]",
"blocksci.proxy.ClusterIteratorProxy": "blocksci.cluster.ClusterIterator",
"blocksci.proxy.TaggedClusterIteratorProxy": "blocksci.cluster.TaggedClusterIterator",
"blocksci.proxy.TaggedAddressIteratorProxy": "blocksci.cluster.TaggedAddressIterator"
})
doc = re.sub(r"(blocksci\.proxy\.)([a-zA-Z]+)(IteratorProxy)", r"blocksci.\2Iterator", doc)
return doc
def fix_self_doc_def(doc):
doc = fix_all_doc_def(doc)
doc = replace(doc, {
"blocksci.proxy.ProxyAddress": "blocksci.Address",
"blocksci.proxy.intProxy": "int",
"blocksci.proxy.boolProxy": "bool",
"blocksci.proxy.ClusterProxy": "blocksci.cluster.Cluster",
"blocksci.proxy.TaggedClusterProxy": "blocksci.cluster.TaggedCluster",
"blocksci.proxy.TaggedAddressProxy": "blocksci.cluster.TaggedAddress",
"blocksci.proxy.ClusterRangeProxy": "blocksci.cluster.ClusterRange",
"blocksci.proxy.TaggedClusterRangeProxy": "blocksci.cluster.TaggedClusterRange",
"blocksci.proxy.TaggedAddressRangeProxy": "blocksci.cluster.TaggedAddressRange"
})
doc = re.sub(r"(blocksci\.proxy\.Optional)([a-zA-Z]+)(Proxy)", r"Optional\[blocksci\.\2\]", doc)
doc = re.sub(r"(blocksci\.proxy\.)([a-zA-Z]+)(RangeProxy)", r"blocksci.\2Range", doc)
doc = re.sub(r"(blocksci\.proxy\.)([a-zA-Z]+)(Proxy)", r"blocksci.\2", doc)
return doc
def fix_sequence_doc_def(doc):
doc = fix_all_doc_def(doc)
doc = replace(doc, {
"blocksci.proxy.intProxy": "numpy.ndarray[int]",
"blocksci.proxy.boolProxy": "numpy.ndarray[bool]",
"blocksci.proxy.ClusterRangeProxy": "blocksci.cluster.ClusterIterator",
"blocksci.proxy.TaggedClusterRangeProxy": "blocksci.cluster.TaggedClusterIterator",
"blocksci.proxy.TaggedAddressRangeProxy": "blocksci.cluster.ClusterIterator"
})
doc = re.sub(r"(blocksci\.proxy\.Optional)([a-zA-Z]+)(Proxy)", r"blocksci\.\2Iterator", doc)
doc = re.sub(r"(blocksci\.proxy\.)([a-zA-Z]+)(RangeProxy)", r"blocksci.\2Iterator", doc)
return doc
def fix_iterator_doc_def(doc):
doc = fix_sequence_doc_def(doc)
doc = replace(doc, {
"blocksci.proxy.ClusterProxy": "blocksci.cluster.ClusterIterator",
"blocksci.proxy.TaggedClusterProxy": "blocksci.cluster.TaggedClusterIterator",
"blocksci.proxy.TaggedAddressProxy": "blocksci.cluster.TaggedAddressIterator"
})
doc = re.sub(r"(blocksci\.proxy\.)([a-zA-Z]+)(Proxy)", r"blocksci\.\2Iterator", doc)
return doc
def fix_range_doc_def(doc):
doc = fix_sequence_doc_def(doc)
doc = replace(doc, {
"blocksci.proxy.ClusterProxy": "blocksci.cluster.ClusterRange",
"blocksci.proxy.TaggedClusterProxy": "blocksci.cluster.TaggedClusterRange",
"blocksci.proxy.TaggedAddressProxy": "blocksci.cluster.TaggedAddressRange"
})
doc = re.sub(r"(blocksci\.proxy\.)([a-zA-Z]+)(Proxy)", r"blocksci\.\2Range", doc)
return doc
def setup_self_methods(main, proxy_obj_type=None, sample_proxy=None):
if proxy_obj_type is None:
proxy_obj_type = type(main._self_proxy)
if sample_proxy is None:
sample_proxy = main._self_proxy
# ignore properties that already exist (normal pybind11 binding)
existing_properties = set(dir(main))
def self_property_creator(name):
prop = property(lambda s: getattr(s._self_proxy, name)(s))
prop.__doc__ = str(getattr(proxy_obj_type, name).__doc__) + "\n\n:type: :class:`" + getattr(sample_proxy, name).output_type_name + "`"
return prop
def self_method_creator(name):
def method(s, *args):
return getattr(s._self_proxy, name)(*args)(s)
orig_doc = getattr(proxy_obj_type, name).__doc__
split = orig_doc.split("\n\n")
method.__doc__ = fix_self_doc_def(split[0]) + '\n\n' + split[1]
return method
core_properties_methods = set(_get_core_properties_methods(proxy_obj_type)) - existing_properties
core_functions_methods = set(_get_core_functions_methods(proxy_obj_type)) - existing_properties
for proxy_func in core_properties_methods:
setattr(main, proxy_func, self_property_creator(proxy_func))
for proxy_func in core_functions_methods:
setattr(main, proxy_func, self_method_creator(proxy_func))
def setup_iterator_methods(iterator, doc_func=fix_iterator_doc_def, nested_proxy_cl=None, sample_proxy=None):
if nested_proxy_cl is None:
nested_proxy_cl = type(iterator._self_proxy.nested_proxy)
sample_proxy = iterator._self_proxy
def iterator_creator(name):
def method(s):
return apply_map(s._self_proxy, getattr(s._self_proxy.nested_proxy, name))(s)
prop = property(method)
prop.__doc__ = "For each item: " + \
getattr(nested_proxy_cl, name).__doc__ + \
"\n\n:type: :class:`" + \
apply_map(sample_proxy, getattr(sample_proxy.nested_proxy, name)).output_type_name + \
"`"
return prop
def iterator_method_creator(name):
def method(rng, *args):
return apply_map(rng._self_proxy, getattr(rng._self_proxy.nested_proxy, name)(*args))(rng)
orig_doc = getattr(nested_proxy_cl, name).__doc__
split = orig_doc.split("\n\n")
if len(split) != 2:
print(iterator, name)
method.__doc__ = doc_func(split[0]) + '\n\nFor each item: ' + split[1]
return method
for proxy_func in _get_core_properties_methods(nested_proxy_cl):
setattr(iterator, proxy_func, iterator_creator(proxy_func))
for proxy_func in _get_core_functions_methods(nested_proxy_cl):
setattr(iterator, proxy_func, iterator_method_creator(proxy_func))
def setup_iterator_proxy_methods(iterator_proxy):
proxy_cl = type(iterator_proxy)
nested_proxy_cl = type(iterator_proxy.nested_proxy)
def iterator_proxy_creator(name):
def method(rng):
return apply_map(rng, getattr(rng.nested_proxy, name))
return property(method)
def iterator_proxy_method_creator(name):
def method(rng, *args):
return apply_map(rng, getattr(rng.nested_proxy, name)(*args))
return method
for proxy_func in _get_core_properties_methods(nested_proxy_cl):
setattr(proxy_cl, proxy_func, iterator_proxy_creator(proxy_func))
for proxy_func in _get_core_functions_methods(nested_proxy_cl):
setattr(proxy_cl, proxy_func, iterator_proxy_method_creator(proxy_func))
def setup_size_property(iterator):
iterator.count = property(lambda rng: rng._self_proxy.size(rng))
iterator.size = property(lambda rng: rng._self_proxy.size(rng))
def setup_range_methods(blocksci_range, nested_proxy_cl=None, sample_proxy=None):
setup_iterator_methods(blocksci_range, fix_range_doc_def, nested_proxy_cl, sample_proxy)
blocksci_range.__getitem__ = lambda rng, index: rng._self_proxy[index](rng)
def setup_iterator_and_proxy_methods(iterator):
setup_iterator_methods(iterator)
setup_iterator_proxy_methods(iterator._self_proxy)
setup_size_property(iterator)
def setup_range_and_proxy_methods(blocksci_range):
setup_range_methods(blocksci_range)
setup_iterator_proxy_methods(blocksci_range._self_proxy)
setup_size_property(blocksci_range)
setup_optional_proxy_map_funcs()
setup_sequence_proxy_map_funcs()
setup_sequence_map_funcs()
setup_self_methods(Block)
setup_self_methods(Tx)
setup_self_methods(Output)
setup_self_methods(Input)
setup_self_methods(EquivAddress)
setup_self_methods(Address, proxy.ProxyAddress, PubkeyAddress._self_proxy)
setup_self_methods(PubkeyAddress)
setup_self_methods(PubkeyHashAddress)
setup_self_methods(WitnessPubkeyHashAddress)
setup_self_methods(MultisigPubkey)
setup_self_methods(ScriptHashAddress)
setup_self_methods(WitnessScriptHashAddress)
setup_self_methods(MultisigAddress)
setup_self_methods(NonStandardAddress)
setup_self_methods(OpReturn)
setup_self_methods(WitnessUnknownAddress)
setup_self_methods(cluster.Cluster)
setup_self_methods(cluster.TaggedCluster)
setup_self_methods(cluster.TaggedAddress)
setup_iterator_and_proxy_methods(BlockIterator)
setup_iterator_and_proxy_methods(TxIterator)
setup_iterator_and_proxy_methods(OutputIterator)
setup_iterator_and_proxy_methods(InputIterator)
setup_iterator_and_proxy_methods(AddressIterator)
setup_iterator_and_proxy_methods(EquivAddressIterator)
setup_iterator_methods(GenericAddressIterator, proxy.ProxyAddress, PubkeyAddressIterator._self_proxy)
setup_iterator_and_proxy_methods(PubkeyAddressIterator)
setup_iterator_and_proxy_methods(PubkeyHashAddressIterator)
setup_iterator_and_proxy_methods(WitnessPubkeyHashAddressIterator)
setup_iterator_and_proxy_methods(MultisigPubkeyIterator)
setup_iterator_and_proxy_methods(ScriptHashAddressIterator)
setup_iterator_and_proxy_methods(WitnessScriptHashAddressIterator)
setup_iterator_and_proxy_methods(MultisigAddressIterator)
setup_iterator_and_proxy_methods(NonstandardAddressIterator)
setup_iterator_and_proxy_methods(OpReturnIterator)
setup_iterator_and_proxy_methods(cluster.ClusterIterator)
setup_iterator_and_proxy_methods(cluster.TaggedClusterIterator)
setup_iterator_and_proxy_methods(cluster.TaggedAddressIterator)
setup_range_and_proxy_methods(BlockRange)
setup_range_and_proxy_methods(TxRange)
setup_range_and_proxy_methods(OutputRange)
setup_range_and_proxy_methods(InputRange)
setup_range_and_proxy_methods(AddressRange)
setup_range_and_proxy_methods(EquivAddressRange)
setup_range_methods(GenericAddressRange, proxy.ProxyAddress, PubkeyAddressRange._self_proxy)
setup_range_and_proxy_methods(PubkeyAddressRange)
setup_range_and_proxy_methods(PubkeyHashAddressRange)
setup_range_and_proxy_methods(WitnessPubkeyHashAddressRange)
setup_range_and_proxy_methods(MultisigPubkeyRange)
setup_range_and_proxy_methods(ScriptHashAddressRange)
setup_range_and_proxy_methods(WitnessScriptHashAddressRange)
setup_range_and_proxy_methods(MultisigAddressRange)
setup_range_and_proxy_methods(NonstandardAddressRange)
setup_range_and_proxy_methods(OpReturnRange)
setup_range_and_proxy_methods(cluster.ClusterRange)
setup_range_and_proxy_methods(cluster.TaggedClusterRange)
setup_range_and_proxy_methods(cluster.TaggedAddressRange)
def txes_including_output_of_type(txes, typ):
return txes.where(lambda tx: tx.outputs.any(lambda o: o.address_type == typ))
TxIterator.including_output_of_type = txes_including_output_of_type
TxRange.including_output_of_type = txes_including_output_of_type
def inputs_sent_before_height(inputs, height: int) -> InputIterator:
"""Filter the inputs to include only inputs which spent an output created before the given height
"""
return inputs.where(lambda inp: inp.spent_tx.block.height < height)
def inputs_sent_after_height(inputs, height: int) -> InputIterator:
"""Filter the inputs to include only inputs which spent an output created after the given height
"""
return inputs.where(lambda inp: inp.spent_tx.block.height >= height)
def inputs_with_age_less_than(inputs, age: int) -> InputIterator:
"""Filter the inputs to include only inputs with age less than the given value
"""
return inputs.where(lambda inp: inp.tx.block_height - inp.spent_tx.block.height < age)
def inputs_with_age_greater_than(inputs, age: int) -> InputIterator:
"""Filter the inputs to include only inputs with age more than the given value
"""
return inputs.where(lambda inp: inp.tx.block_height - inp.spent_tx.block.height >= age)
def inputs_with_address_type(inputs, typ: address_type) -> InputIterator:
"""Filter the inputs to include only inputs that came from an address with the given type
"""
return inputs.where(lambda inp: inp.address_type == typ)
InputIterator.sent_before_height = inputs_sent_before_height
InputIterator.sent_after_height = inputs_sent_after_height
InputIterator.with_age_less_than = inputs_with_age_less_than
InputIterator.with_age_greater_than = inputs_with_age_greater_than
InputIterator.with_address_type = inputs_with_address_type
InputRange.sent_before_height = inputs_sent_before_height
InputRange.sent_after_height = inputs_sent_after_height
InputRange.with_age_less_than = inputs_with_age_less_than
InputRange.with_age_greater_than = inputs_with_age_greater_than
InputRange.with_address_type = inputs_with_address_type
def _outputAge(output):
return output.spending_tx.map(lambda tx: tx.block_height) - output.tx.block_height
def outputs_unspent(outputs, height = -1):
if height == -1:
return outputs.where(lambda output: ~output.is_spent)
else:
return outputs.where(lambda output: (~output.is_spent) | (output.spending_tx.block_height.or_value(0) > height))
def outputs_spent_before_height(outputs, height):
return outputs.where(lambda output: output.is_spent).where(lambda output: output.spending_tx.map(lambda tx: tx.block_height).or_value(0) < height)
def outputs_spent_after_height(outputs, height):
return outputs.where(lambda output: output.is_spent).where(lambda output: output.spending_tx.map(lambda tx: tx.block_height).or_value(0) >= height)
def outputs_spent_with_age_less_than(outputs, age):
return outputs.where(lambda output: output.is_spent).where(lambda output: _outputAge(output).or_value(0) < age)
def outputs_spent_with_age_greater_than(outputs, age):
return outputs.where(lambda output: output.is_spent).where(lambda output: _outputAge(output).or_value(0) >= age)
def outputs_with_address_type(outputs, typ):
return outputs.where(lambda output: output.address_type == typ)
OutputIterator.unspent = outputs_unspent
OutputIterator.spent_before_height = outputs_spent_before_height
OutputIterator.spent_after_height = outputs_spent_after_height
OutputIterator.spent_with_age_less_than = outputs_spent_with_age_less_than
OutputIterator.outputs_spent_with_age_greater_than = outputs_spent_with_age_greater_than
OutputIterator.with_address_type = outputs_with_address_type
OutputRange.unspent = outputs_unspent
OutputRange.spent_before_height = outputs_spent_before_height
OutputRange.spent_after_height = outputs_spent_after_height
OutputRange.spent_with_age_less_than = outputs_spent_with_age_less_than
OutputRange.outputs_spent_with_age_greater_than = outputs_spent_with_age_greater_than
OutputRange.with_address_type = outputs_with_address_type
def coinjoin_txes(txes):
return txes.where(heuristics.is_coinjoin)
def possible_coinjoin_txes(txes):
return txes.where(heuristics.is_possible_coinjoin)
def address_deanon_txes(txes):
return txes.where(heuristics.is_address_deanon)
def change_over_txes(txes):
return txes.where(heuristics.is_change_over)
def keyset_change_txes(txes):
return txes.where(heuristics.is_keyset_change)
old_power_of_ten_value = heuristics.change.power_of_ten_value
def new_power_of_ten_value(digits, tx=None):
if tx is None:
return old_power_of_ten_value(digits)
else:
return old_power_of_ten_value(digits)(tx)
heuristics.change.power_of_ten_value = new_power_of_ten_value
heuristics.coinjoin_txes = coinjoin_txes
heuristics.possible_coinjoin_txes = possible_coinjoin_txes
heuristics.address_deanon_txes = address_deanon_txes
heuristics.change_over_txes = change_over_txes
heuristics.keyset_change_txes = keyset_change_txes
first_miner_run = True
class DummyClass:
pass
loaderDirectory = os.path.dirname(os.path.abspath(inspect.getsourcefile(DummyClass)))
def get_miner(block) -> str:
"""
Get the miner of the block based on the text in the coinbase transaction
"""
global first_miner_run
global tagged_addresses
global pool_data
global coinbase_tag_re
if first_miner_run:
import json
with open(loaderDirectory + "/Blockchain-Known-Pools/pools.json") as f:
pool_data = json.load(f)
addresses = [block._access.address_from_string(addr_string) for addr_string in pool_data["payout_addresses"]]
tagged_addresses = {pointer: pool_data["payout_addresses"][address] for address in addresses if address in pool_data["payout_addresses"]}
coinbase_tag_re = re.compile('|'.join(map(re.escape, pool_data["coinbase_tags"])))
first_miner_run = False
coinbase = block.coinbase_param.decode("utf_8", "replace")
tag_matches = re.findall(coinbase_tag_re, coinbase)
if tag_matches:
return pool_data["coinbase_tags"][tag_matches[0]]["name"]
for txout in block.coinbase_tx.outs:
if txout.address in tagged_addresses:
return tagged_addresses[txout.address]["name"]
additional_miners = {
"EclipseMC": "EclipseMC",
"poolserverj": "poolserverj",
"/stratumPool/": "stratumPool",
"/stratum/": "stratum",
"/nodeStratum/": "nodeStratum",
"BitLC": "BitLC",
"/TangPool/": "TangPool",
"/Tangpool/": "TangPool",
"pool.mkalinin.ru": "pool.mkalinin.ru",
"For Pierce and Paul": "Pierce and Paul",
"50btc.com": "50btc.com",
"七彩神仙鱼": "F2Pool"
}
for miner in additional_miners:
if miner in coinbase:
return additional_miners[miner]
return "Unknown"
Block.miner = get_miner
| gpl-3.0 |
georgetown-analytics/machine-learning | archive/code/abaloneUtils.py | 5 | 4299 | # utils
# Utility functions for handling data
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Thu Feb 26 17:47:35 2015 -0500
#
# Copyright (C) 2015 District Data Labs
# For license information, see LICENSE.txt
#
# ID: utils.py [] [email protected] $
"""
Utility functions for handling data
"""
##########################################################################
## Imports
##########################################################################
import os
import csv
import time
import json
import numpy as np
from sklearn.datasets.base import Bunch
##########################################################################
## Module Constants
##########################################################################
SKL_DATA = "SCIKIT_LEARN_DATA"
BASE_DIR = os.path.normpath(os.path.join(os.path.dirname(__file__), ".."))
DATA_DIR = os.path.join(BASE_DIR, "data")
CODE_DIR = os.path.join(BASE_DIR, "code")
##########################################################################
## Helper Functions
##########################################################################
def timeit(func):
"""
Returns how long a function took to execute, along with the output
"""
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
return result, time.time() - start
return timeit
##########################################################################
## Dataset Loading
##########################################################################
def get_data_home(data_home=None):
"""
Returns the path of the data directory
"""
if data_home is None:
data_home = os.environ.get(SKL_DATA, DATA_DIR)
data_home = os.path.expanduser(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
return data_home
def load_data(path, descr=None, target_index=-1):
"""
Returns a scklearn dataset Bunch which includes several important
attributes that are used in modeling:
data: array of shape n_samples * n_features
target: array of length n_samples
feature_names: names of the features
target_names: names of the targets
filenames: names of the files that were loaded
DESCR: contents of the readme
This data therefore has the look and feel of the toy datasets.
Pass in a path usually just the name of the location in the data dir.
It will be joined with the result of `get_data_home`. The contents are:
path
- abalone.names # The file to load into DESCR
- meta.json # A file containing metadata to load
- dataset.txt # The numpy loadtxt file
- dataset.csv # The pandas read_csv file
You can specify another descr, another feature_names, and whether or
not the dataset has a header row. You can also specify the index of the
target, which by default is the last item in the row (-1)
"""
root = os.path.join(get_data_home(), path)
filenames = {
'meta': os.path.join(root, 'meta.json'),
'rdme': os.path.join(root, 'abalone.names'),
'data': os.path.join(root, 'dataset.csv'),
}
target_names = None
feature_names = None
DESCR = None
with open(filenames['meta'], 'r') as f:
meta = json.load(f)
target_names = meta['target_names']
feature_names = meta['feature_names']
with open(filenames['rdme'], 'r') as f:
DESCR = f.read()
# skip header from csv, load data
dataset = np.loadtxt(filenames['data'], delimiter=',', skiprows=1)
data = None
target = None
# Target assumed to be either last or first row
if target_index == -1:
data = dataset[:,0:-1]
target = dataset[:,-1]
elif target_index == 0:
data = dataset[:,1:]
target = dataset[:,0]
else:
raise ValueError("Target index must be either -1 or 0")
return Bunch(data=data,
target=target,
filenames=filenames,
target_names=target_names,
feature_names=feature_names,
DESCR=DESCR)
def load_abalone():
return load_data('abalone')
| mit |
festeh/BuildingMachineLearningSystemsWithPython | ch07/lr10k.py | 24 | 1228 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import numpy as np
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.datasets import load_svmlight_file
from sklearn.linear_model import LinearRegression
from sklearn.cross_validation import KFold
# Whether to use Elastic nets (otherwise, ordinary linear regression is used)
# Load data:
data, target = load_svmlight_file('data/E2006.train')
lr = LinearRegression()
# Compute error on training data to demonstrate that we can obtain near perfect
# scores:
lr.fit(data, target)
pred = lr.predict(data)
print('RMSE on training, {:.2}'.format(np.sqrt(mean_squared_error(target, pred))))
print('R2 on training, {:.2}'.format(r2_score(target, pred)))
print('')
pred = np.zeros_like(target)
kf = KFold(len(target), n_folds=5)
for train, test in kf:
lr.fit(data[train], target[train])
pred[test] = lr.predict(data[test])
print('RMSE on testing (5 fold), {:.2}'.format(np.sqrt(mean_squared_error(target, pred))))
print('R2 on testing (5 fold), {:.2}'.format(r2_score(target, pred)))
| mit |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/doc/mpl_examples/pylab_examples/fancybox_demo.py | 9 | 4619 | import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
from matplotlib.patches import FancyBboxPatch
# Bbox object around which the fancy box will be drawn.
bb = mtransforms.Bbox([[0.3, 0.4], [0.7, 0.6]])
def draw_bbox(ax, bb):
# boxstyle=square with pad=0, i.e. bbox itself.
p_bbox = FancyBboxPatch((bb.xmin, bb.ymin),
abs(bb.width), abs(bb.height),
boxstyle="square,pad=0.",
ec="k", fc="none", zorder=10.,
)
ax.add_patch(p_bbox)
def test1(ax):
# a fancy box with round corners. pad=0.1
p_fancy = FancyBboxPatch((bb.xmin, bb.ymin),
abs(bb.width), abs(bb.height),
boxstyle="round,pad=0.1",
fc=(1., .8, 1.),
ec=(1., 0.5, 1.))
ax.add_patch(p_fancy)
ax.text(0.1, 0.8,
r' boxstyle="round,pad=0.1"',
size=10, transform=ax.transAxes)
# draws control points for the fancy box.
#l = p_fancy.get_path().vertices
#ax.plot(l[:,0], l[:,1], ".")
# draw the original bbox in black
draw_bbox(ax, bb)
def test2(ax):
# bbox=round has two optional argument. pad and rounding_size.
# They can be set during the initialization.
p_fancy = FancyBboxPatch((bb.xmin, bb.ymin),
abs(bb.width), abs(bb.height),
boxstyle="round,pad=0.1",
fc=(1., .8, 1.),
ec=(1., 0.5, 1.))
ax.add_patch(p_fancy)
# boxstyle and its argument can be later modified with
# set_boxstyle method. Note that the old attributes are simply
# forgotten even if the boxstyle name is same.
p_fancy.set_boxstyle("round,pad=0.1, rounding_size=0.2")
#or
#p_fancy.set_boxstyle("round", pad=0.1, rounding_size=0.2)
ax.text(0.1, 0.8,
' boxstyle="round,pad=0.1\n rounding\\_size=0.2"',
size=10, transform=ax.transAxes)
# draws control points for the fancy box.
#l = p_fancy.get_path().vertices
#ax.plot(l[:,0], l[:,1], ".")
draw_bbox(ax, bb)
def test3(ax):
# mutation_scale determine overall scale of the mutation,
# i.e. both pad and rounding_size is scaled according to this
# value.
p_fancy = FancyBboxPatch((bb.xmin, bb.ymin),
abs(bb.width), abs(bb.height),
boxstyle="round,pad=0.1",
mutation_scale=2.,
fc=(1., .8, 1.),
ec=(1., 0.5, 1.))
ax.add_patch(p_fancy)
ax.text(0.1, 0.8,
' boxstyle="round,pad=0.1"\n mutation\\_scale=2',
size=10, transform=ax.transAxes)
# draws control points for the fancy box.
#l = p_fancy.get_path().vertices
#ax.plot(l[:,0], l[:,1], ".")
draw_bbox(ax, bb)
def test4(ax):
# When the aspect ratio of the axes is not 1, the fancy box may
# not be what you expected (green)
p_fancy = FancyBboxPatch((bb.xmin, bb.ymin),
abs(bb.width), abs(bb.height),
boxstyle="round,pad=0.2",
fc="none",
ec=(0., .5, 0.), zorder=4)
ax.add_patch(p_fancy)
# You can compensate this by setting the mutation_aspect (pink).
p_fancy = FancyBboxPatch((bb.xmin, bb.ymin),
abs(bb.width), abs(bb.height),
boxstyle="round,pad=0.3",
mutation_aspect=.5,
fc=(1., 0.8, 1.),
ec=(1., 0.5, 1.))
ax.add_patch(p_fancy)
ax.text(0.1, 0.8,
' boxstyle="round,pad=0.3"\n mutation\\_aspect=.5',
size=10, transform=ax.transAxes)
draw_bbox(ax, bb)
def test_all():
plt.clf()
ax = plt.subplot(2, 2, 1)
test1(ax)
ax.set_xlim(0., 1.)
ax.set_ylim(0., 1.)
ax.set_title("test1")
ax.set_aspect(1.)
ax = plt.subplot(2, 2, 2)
ax.set_title("test2")
test2(ax)
ax.set_xlim(0., 1.)
ax.set_ylim(0., 1.)
ax.set_aspect(1.)
ax = plt.subplot(2, 2, 3)
ax.set_title("test3")
test3(ax)
ax.set_xlim(0., 1.)
ax.set_ylim(0., 1.)
ax.set_aspect(1)
ax = plt.subplot(2, 2, 4)
ax.set_title("test4")
test4(ax)
ax.set_xlim(-0.5, 1.5)
ax.set_ylim(0., 1.)
ax.set_aspect(2.)
plt.draw()
plt.show()
test_all()
| mit |
jstraub/rtmf | python/linesearchFocalLength.py | 1 | 4149 | # Copyright (c) 2015, Julian Straub <[email protected]> Licensed
# under the MIT license. See the license file LICENSE.
#import matplotlib.cm as cm
import numpy as np
#import cv2
import scipy.io
import subprocess as subp
import os, re, time, random
import argparse
#from vpCluster.rgbd.rgbdframe import RgbdFrame
#from vpCluster.manifold.sphere import Sphere
#from js.utils.config import Config2String
#from js.utils.plot.pyplot import SaveFigureAsImage
def run(cfg,reRun):
print 'processing '+cfg['dataPath']+cfg['filePath']
print "output to "+cfg['outName']
args = ['../pod-build/bin/realtimeMF',
'--mode '+cfg['mode'],
'-i {}'.format(cfg['dataPath']+cfg['filePath']+"_d.png"),
'-o {}'.format(cfg['outName']),
'-f {}'.format(cfg["f_d"]),
'-B {}'.format(5),
'-T {}'.format(30),
]
if 'dt' in cfg.keys():
args.append('--dt {}'.format(cfg['dt']))
if 'tMax' in cfg.keys():
args.append('--tMax {}'.format(cfg['tMax']))
if 'nCGIter' in cfg.keys():
args.append('--nCGIter {}'.format(cfg['nCGIter']))
print "checking if " + cfg['outName']+"_f.csv"
if reRun or not os.path.isfile(cfg['outName']+"_f.csv"):
print ' '.join(args)
print ' --------------------- '
time.sleep(1)
err = subp.call(' '.join(args),shell=True)
if err:
print 'error when executing'
else:
print "skipping " + cfg['dataPath']+cfg['filePath']
with open(cfg['outName']+"_f.csv","r") as f:
cost = float(f.readline())
print "cost", cost
return cost
# raw_input()
# z = np.loadtxt(cfg['outName']+'.lbl',dtype=int,delimiter=' ')
# sil = np.loadtxt(cfg['outName']+'.lbl_measures.csv',delimiter=" ")
def config2Str(cfg):
use = ['mode','dt','tMax','nCGIter','f_d']
st = use[0]+'_'+str(cfg[use[0]])
for key in use[1::]:
if key in cfg.keys():
st += '-'+key+'_'+str(cfg[key])
return st
parser = argparse.ArgumentParser(description = 'rtmf extraction for NYU')
parser.add_argument('-m','--mode', default='vmfCF',
help='vmf, approx, direct')
args = parser.parse_args()
cfg=dict()
cfg['mode'] = args.mode;
cfg['resultsPath'] = '/data/vision/scratch/fisher/jstraub/rtmf/nyuFocal/'
cfg['dataPath'] = "/data/vision/fisher/data1/nyu_depth_v2/extracted/"
#cfg['resultsPath'] = './'
#cfg['dataPath'] = "../data/"
# for eval of the high quality results of the direct method
cfg['nCGIter'] = 25
cfg['dt'] = 0.05
cfg['tMax'] = 5.0
reRun = False
printCmd = True
N = 11
#fs = np.linspace(380,720,N) # with _300
fs = np.linspace(500,600,N)
print fs
import os.path
if True and os.path.isfile("focalLengthLines_1449.csv"):
error = np.loadtxt("focalLengthLines_1449.csv")
else:
names = []
for root, dirs, files in os.walk(cfg["dataPath"]):
for file in files:
name,ending = os.path.splitext(file)
if ending == '.png' and not re.search("_rgb",name) is None:
names.append(re.sub("_rgb","",name))
break
random.shuffle(names)
#names = names[:100]
#names = names[:300]
error = np.zeros((N,len(names)))
for i,name in enumerate(names):
cfg['filePath'] = name
for j,f in enumerate(fs):
cfg["f_d"] = f
cfg['outName'] = cfg['resultsPath']+cfg['filePath']+'_'+config2Str(cfg)
error[j,i] = run(cfg,reRun)
np.savetxt("focalLengthLines.csv", error)
#for i in range(error.shape[1]):
# plt.plot(fs,np.diff(error[:,i])) #,label=names[i])
f = error
df = np.diff(f,axis=0)
idOk = np.max(df,axis=0) < 400
ddf = np.diff(df,axis=0)
idOk = np.logical_and(idOk, np.min(ddf,axis=0) > 0.)
fsmin = fs[np.argmin(f,axis=0)]
idOk = np.logical_and(idOk, fsmin > 500)
idOk = np.logical_and(idOk, fsmin < 600)
print idOk.shape
print fsmin.shape
plt.figure()
emin = np.min(error[:,idOk],axis=0)
fmin = fs[np.argmin(error[:,idOk],axis=0)]
plt.plot(fmin, emin,'x')
plt.show()
fMean = np.mean(f[:,idOk],axis=1)
print "min: ", np.min(fMean), fs[np.argmin(fMean)]
import matplotlib.pyplot as plt
plt.figure()
plt.plot(fs,fMean)
plt.show()
plt.figure()
for i in range(error.shape[1]):
plt.plot(fs[:-1]+0.5*(fs[1]-fs[0]),np.diff(error[:,i])) #,label=names[i])
plt.legend()
plt.figure()
plt.plot(fs,np.mean(error,axis=1))
plt.show()
| mit |
IndraVikas/scikit-learn | sklearn/decomposition/__init__.py | 99 | 1331 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD']
| bsd-3-clause |
dspmeng/code | deeplearning/rnn.py | 1 | 4321 | from __future__ import print_function, division
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
num_epochs = 100
total_series_length = 50000
truncated_backprop_length = 15
state_size = 4
num_classes = 2
echo_step = 3
batch_size = 5
num_batches = total_series_length//batch_size//truncated_backprop_length
def generateData():
x = np.array(np.random.choice(2, total_series_length, p=[0.5, 0.5]))
y = np.roll(x, echo_step)
y[0:echo_step] = 0
x = x.reshape((batch_size, -1)) # The first index changing slowest, subseries as rows
y = y.reshape((batch_size, -1))
return (x, y)
batchX_placeholder = tf.placeholder(tf.float32, [batch_size, truncated_backprop_length])
batchY_placeholder = tf.placeholder(tf.int32, [batch_size, truncated_backprop_length])
init_state = tf.placeholder(tf.float32, [batch_size, state_size])
W = tf.Variable(np.random.rand(state_size+1, state_size), dtype=tf.float32)
b = tf.Variable(np.zeros((1,state_size)), dtype=tf.float32)
W2 = tf.Variable(np.random.rand(state_size, num_classes),dtype=tf.float32)
b2 = tf.Variable(np.zeros((1,num_classes)), dtype=tf.float32)
# Unpack columns
inputs_series = tf.unstack(batchX_placeholder, axis=1)
labels_series = tf.unstack(batchY_placeholder, axis=1)
# Forward pass
current_state = init_state
states_series = []
for current_input in inputs_series:
current_input = tf.reshape(current_input, [batch_size, 1])
input_and_state_concatenated = tf.concat([current_input, current_state], 1) # Increasing number of columns
next_state = tf.tanh(tf.matmul(input_and_state_concatenated, W) + b) # Broadcasted addition
states_series.append(next_state)
current_state = next_state
print('states_series:', len(states_series), 'of', states_series[0].shape)
logits_series = [tf.matmul(state, W2) + b2 for state in states_series] #Broadcasted addition
predictions_series = [tf.nn.softmax(logits) for logits in logits_series]
losses = [tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)
for logits, labels in zip(logits_series,labels_series)]
print('predictions_series:', len(predictions_series), 'of', predictions_series[0].shape)
print('losses:', len(losses), 'of', losses[0].shape)
total_loss = tf.reduce_mean(losses)
train_step = tf.train.AdagradOptimizer(0.3).minimize(total_loss)
def plot(loss_list, predictions_series, batchX, batchY):
plt.subplot(2, 3, 1)
plt.cla()
plt.plot(loss_list)
for batch_series_idx in range(5):
one_hot_output_series = np.array(predictions_series)[:,batch_series_idx,:]
single_output_series = np.array([(1 if out[0] < 0.5 else 0) for out in one_hot_output_series])
plt.subplot(2, 3, batch_series_idx + 2)
plt.cla()
plt.axis([0, truncated_backprop_length, 0, 2])
left_offset = range(truncated_backprop_length)
plt.bar(left_offset, batchX[batch_series_idx, :], width=1, color="blue")
plt.bar(left_offset, batchY[batch_series_idx, :] * 0.5, width=1, color="red")
plt.bar(left_offset, single_output_series * 0.3, width=1, color="green")
plt.draw()
plt.pause(0.0001)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
plt.ion()
plt.figure()
plt.show()
loss_list = []
for epoch_idx in range(num_epochs):
x,y = generateData()
_current_state = np.zeros((batch_size, state_size))
print("New data, epoch", epoch_idx)
for batch_idx in range(num_batches):
start_idx = batch_idx * truncated_backprop_length
end_idx = start_idx + truncated_backprop_length
batchX = x[:,start_idx:end_idx]
batchY = y[:,start_idx:end_idx]
_total_loss, _train_step, _current_state, _predictions_series = sess.run(
[total_loss, train_step, current_state, predictions_series],
feed_dict={
batchX_placeholder:batchX,
batchY_placeholder:batchY,
init_state:_current_state
})
loss_list.append(_total_loss)
if batch_idx%100 == 0:
print("Step",batch_idx, "Loss", _total_loss)
plot(loss_list, _predictions_series, batchX, batchY)
plt.ioff()
plt.show()
| apache-2.0 |
python-visualization/branca | branca/element.py | 1 | 21342 | """
Element
-------
A generic class for creating Elements.
"""
import base64
from html import escape
import json
import warnings
from collections import OrderedDict
from urllib.request import urlopen
from uuid import uuid4
from jinja2 import Environment, PackageLoader, Template
from .utilities import _camelify, _parse_size, none_max, none_min
ENV = Environment(loader=PackageLoader('branca', 'templates'))
class Element(object):
"""Basic Element object that does nothing.
Other Elements may inherit from this one.
Parameters
----------
template : str, default None
A jinaj2-compatible template string for rendering the element.
If None, template will be:
.. code-block:: jinja
{% for name, element in this._children.items() %}
{{element.render(**kwargs)}}
{% endfor %}
so that all the element's children are rendered.
template_name : str, default None
If no template is provided, you can also provide a filename.
"""
_template = Template(
'{% for name, element in this._children.items() %}\n'
' {{element.render(**kwargs)}}'
'{% endfor %}'
)
def __init__(self, template=None, template_name=None):
self._name = 'Element'
self._id = uuid4().hex
self._env = ENV
self._children = OrderedDict()
self._parent = None
if template is not None:
self._template = Template(template)
elif template_name is not None:
self._template = ENV.get_template(template_name)
def get_name(self):
"""Returns a string representation of the object.
This string has to be unique and to be a python and
javascript-compatible
variable name.
"""
return _camelify(self._name) + '_' + self._id
def _get_self_bounds(self):
"""Computes the bounds of the object itself (not including it's children)
in the form [[lat_min, lon_min], [lat_max, lon_max]]
"""
return [[None, None], [None, None]]
def get_bounds(self):
"""Computes the bounds of the object and all it's children
in the form [[lat_min, lon_min], [lat_max, lon_max]].
"""
bounds = self._get_self_bounds()
for child in self._children.values():
child_bounds = child.get_bounds()
bounds = [
[
none_min(bounds[0][0], child_bounds[0][0]),
none_min(bounds[0][1], child_bounds[0][1]),
],
[
none_max(bounds[1][0], child_bounds[1][0]),
none_max(bounds[1][1], child_bounds[1][1]),
],
]
return bounds
def add_children(self, child, name=None, index=None):
"""Add a child."""
warnings.warn('Method `add_children` is deprecated. Please use `add_child` instead.',
FutureWarning, stacklevel=2)
return self.add_child(child, name=name, index=index)
def add_child(self, child, name=None, index=None):
"""Add a child."""
if name is None:
name = child.get_name()
if index is None:
self._children[name] = child
else:
items = [item for item in self._children.items()
if item[0] != name]
items.insert(int(index), (name, child))
self._children = OrderedDict(items)
child._parent = self
return self
def add_to(self, parent, name=None, index=None):
"""Add element to a parent."""
parent.add_child(self, name=name, index=index)
return self
def to_dict(self, depth=-1, ordered=True, **kwargs):
"""Returns a dict representation of the object."""
if ordered:
dict_fun = OrderedDict
else:
dict_fun = dict
out = dict_fun()
out['name'] = self._name
out['id'] = self._id
if depth != 0:
out['children'] = dict_fun([(name, child.to_dict(depth=depth-1))
for name, child in self._children.items()]) # noqa
return out
def to_json(self, depth=-1, **kwargs):
"""Returns a JSON representation of the object."""
return json.dumps(self.to_dict(depth=depth, ordered=True), **kwargs)
def get_root(self):
"""Returns the root of the elements tree."""
if self._parent is None:
return self
else:
return self._parent.get_root()
def render(self, **kwargs):
"""Renders the HTML representation of the element."""
return self._template.render(this=self, kwargs=kwargs)
def save(self, outfile, close_file=True, **kwargs):
"""Saves an Element into a file.
Parameters
----------
outfile : str or file object
The file (or filename) where you want to output the html.
close_file : bool, default True
Whether the file has to be closed after write.
"""
if isinstance(outfile, str) or isinstance(outfile, bytes):
fid = open(outfile, 'wb')
else:
fid = outfile
root = self.get_root()
html = root.render(**kwargs)
fid.write(html.encode('utf8'))
if close_file:
fid.close()
class Link(Element):
"""An abstract class for embedding a link in the HTML."""
def get_code(self):
"""Opens the link and returns the response's content."""
if self.code is None:
self.code = urlopen(self.url).read()
return self.code
def to_dict(self, depth=-1, **kwargs):
"""Returns a dict representation of the object."""
out = super(Link, self).to_dict(depth=-1, **kwargs)
out['url'] = self.url
return out
class JavascriptLink(Link):
"""Create a JavascriptLink object based on a url.
Parameters
----------
url : str
The url to be linked
download : bool, default False
Whether the target document shall be loaded right now.
"""
_template = Template(
'{% if kwargs.get("embedded",False) %}'
'<script>{{this.get_code()}}</script>'
'{% else %}'
'<script src="{{this.url}}"></script>'
'{% endif %}'
)
def __init__(self, url, download=False):
super(JavascriptLink, self).__init__()
self._name = 'JavascriptLink'
self.url = url
self.code = None
if download:
self.get_code()
class CssLink(Link):
"""Create a CssLink object based on a url.
Parameters
----------
url : str
The url to be linked
download : bool, default False
Whether the target document shall be loaded right now.
"""
_template = Template(
'{% if kwargs.get("embedded",False) %}'
'<style>{{this.get_code()}}</style>'
'{% else %}'
'<link rel="stylesheet" href="{{this.url}}"/>'
'{% endif %}'
)
def __init__(self, url, download=False):
super(CssLink, self).__init__()
self._name = 'CssLink'
self.url = url
self.code = None
if download:
self.get_code()
class Figure(Element):
"""Create a Figure object, to plot things into it.
Parameters
----------
width : str, default "100%"
The width of the Figure.
It may be a percentage or pixel value (like "300px").
height : str, default None
The height of the Figure.
It may be a percentage or a pixel value (like "300px").
ratio : str, default "60%"
A percentage defining the aspect ratio of the Figure.
It will be ignored if height is not None.
title : str, default None
Figure title.
figsize : tuple of two int, default None
If you're a matplotlib addict, you can overwrite width and
height. Values will be converted into pixels in using 60 dpi.
For example figsize=(10, 5) will result in
width="600px", height="300px".
"""
_template = Template(
'<!DOCTYPE html>\n'
'<head>'
'{% if this.title %}<title>{{this.title}}</title>{% endif %}'
' {{this.header.render(**kwargs)}}\n'
'</head>\n'
'<body>'
' {{this.html.render(**kwargs)}}\n'
'</body>\n'
'<script>'
' {{this.script.render(**kwargs)}}\n'
'</script>\n'
)
def __init__(self, width='100%', height=None, ratio='60%', title=None, figsize=None):
super(Figure, self).__init__()
self._name = 'Figure'
self.header = Element()
self.html = Element()
self.script = Element()
self.header._parent = self
self.html._parent = self
self.script._parent = self
self.width = width
self.height = height
self.ratio = ratio
self.title = title
if figsize is not None:
self.width = str(60*figsize[0])+'px'
self.height = str(60*figsize[1])+'px'
# Create the meta tag.
self.header.add_child(Element(
'<meta http-equiv="content-type" content="text/html; charset=UTF-8" />'), # noqa
name='meta_http')
def to_dict(self, depth=-1, **kwargs):
"""Returns a dict representation of the object."""
out = super(Figure, self).to_dict(depth=depth, **kwargs)
out['header'] = self.header.to_dict(depth=depth-1, **kwargs)
out['html'] = self.html.to_dict(depth=depth-1, **kwargs)
out['script'] = self.script.to_dict(depth=depth-1, **kwargs)
return out
def get_root(self):
"""Returns the root of the elements tree."""
return self
def render(self, **kwargs):
"""Renders the HTML representation of the element."""
for name, child in self._children.items():
child.render(**kwargs)
return self._template.render(this=self, kwargs=kwargs)
def _repr_html_(self, **kwargs):
"""Displays the Figure in a Jupyter notebook."""
html = escape(self.render(**kwargs))
if self.height is None:
iframe = (
'<div style="width:{width};">'
'<div style="position:relative;width:100%;height:0;padding-bottom:{ratio};">' # noqa
'<span style="color:#565656">Make this Notebook Trusted to load map: File -> Trust Notebook</span>' # noqa
'<iframe srcdoc="{html}" style="position:absolute;width:100%;height:100%;left:0;top:0;' # noqa
'border:none !important;" '
'allowfullscreen webkitallowfullscreen mozallowfullscreen>'
'</iframe>'
'</div></div>'
).format(html=html, width=self.width, ratio=self.ratio)
else:
iframe = (
'<iframe srcdoc="{html}" width="{width}" height="{height}"'
'style="border:none !important;" '
'"allowfullscreen" "webkitallowfullscreen" "mozallowfullscreen">'
'</iframe>'
).format(html=html, width=self.width, height=self.height)
return iframe
def add_subplot(self, x, y, n, margin=0.05):
"""Creates a div child subplot in a matplotlib.figure.add_subplot style.
Parameters
----------
x : int
The number of rows in the grid.
y : int
The number of columns in the grid.
n : int
The cell number in the grid, counted from 1 to x*y.
Example:
>>> fig.add_subplot(3,2,5)
# Create a div in the 5th cell of a 3rows x 2columns
grid(bottom-left corner).
"""
width = 1./y
height = 1./x
left = ((n-1) % y)*width
top = ((n-1)//y)*height
left = left+width*margin
top = top+height*margin
width = width*(1-2.*margin)
height = height*(1-2.*margin)
div = Div(position='absolute',
width='{}%'.format(100.*width),
height='{}%'.format(100.*height),
left='{}%'.format(100.*left),
top='{}%'.format(100.*top),
)
self.add_child(div)
return div
class Html(Element):
"""Create an HTML div object for embedding data.
Parameters
----------
data : str
The HTML data to be embedded.
script : bool
If True, data will be embedded without escaping
(suitable for embedding html-ready code)
width : int or str, default '100%'
The width of the output div element.
Ex: 120 , '80%'
height : int or str, default '100%'
The height of the output div element.
Ex: 120 , '80%'
"""
_template = Template(
'<div id="{{this.get_name()}}" '
'style="width: {{this.width[0]}}{{this.width[1]}}; height: {{this.height[0]}}{{this.height[1]}};">' # noqa
'{% if this.script %}{{this.data}}{% else %}{{this.data|e}}{% endif %}</div>'
) # noqa
def __init__(self, data, script=False, width='100%', height='100%'):
super(Html, self).__init__()
self._name = 'Html'
self.script = script
self.data = data
self.width = _parse_size(width)
self.height = _parse_size(height)
class Div(Figure):
"""Create a Div to be embedded in a Figure.
Parameters
----------
width: int or str, default '100%'
The width of the div in pixels (int) or percentage (str).
height: int or str, default '100%'
The height of the div in pixels (int) or percentage (str).
left: int or str, default '0%'
The left-position of the div in pixels (int) or percentage (str).
top: int or str, default '0%'
The top-position of the div in pixels (int) or percentage (str).
position: str, default 'relative'
The position policy of the div.
Usual values are 'relative', 'absolute', 'fixed', 'static'.
"""
_template = Template(
'{% macro header(this, kwargs) %}'
'<style> #{{this.get_name()}} {\n'
' position : {{this.position}};\n'
' width : {{this.width[0]}}{{this.width[1]}};\n'
' height: {{this.height[0]}}{{this.height[1]}};\n'
' left: {{this.left[0]}}{{this.left[1]}};\n'
' top: {{this.top[0]}}{{this.top[1]}};\n'
' </style>'
'{% endmacro %}'
'{% macro html(this, kwargs) %}'
'<div id="{{this.get_name()}}">{{this.html.render(**kwargs)}}</div>'
'{% endmacro %}'
)
def __init__(self, width='100%', height='100%',
left='0%', top='0%', position='relative'):
super(Figure, self).__init__()
self._name = 'Div'
# Size Parameters.
self.width = _parse_size(width)
self.height = _parse_size(height)
self.left = _parse_size(left)
self.top = _parse_size(top)
self.position = position
self.header = Element()
self.html = Element(
'{% for name, element in this._children.items() %}'
'{{element.render(**kwargs)}}'
'{% endfor %}'
)
self.script = Element()
self.header._parent = self
self.html._parent = self
self.script._parent = self
def get_root(self):
"""Returns the root of the elements tree."""
return self
def render(self, **kwargs):
"""Renders the HTML representation of the element."""
figure = self._parent
assert isinstance(figure, Figure), ('You cannot render this Element '
'if it is not in a Figure.')
for name, element in self._children.items():
element.render(**kwargs)
for name, element in self.header._children.items():
figure.header.add_child(element, name=name)
for name, element in self.script._children.items():
figure.script.add_child(element, name=name)
header = self._template.module.__dict__.get('header', None)
if header is not None:
figure.header.add_child(Element(header(self, kwargs)),
name=self.get_name())
html = self._template.module.__dict__.get('html', None)
if html is not None:
figure.html.add_child(Element(html(self, kwargs)),
name=self.get_name())
script = self._template.module.__dict__.get('script', None)
if script is not None:
figure.script.add_child(Element(script(self, kwargs)),
name=self.get_name())
def _repr_html_(self, **kwargs):
"""Displays the Div in a Jupyter notebook."""
if self._parent is None:
self.add_to(Figure())
out = self._parent._repr_html_(**kwargs)
self._parent = None
else:
out = self._parent._repr_html_(**kwargs)
return out
class IFrame(Element):
"""Create a Figure object, to plot things into it.
Parameters
----------
html : str, default None
Eventual HTML code that you want to put in the frame.
width : str, default "100%"
The width of the Figure.
It may be a percentage or pixel value (like "300px").
height : str, default None
The height of the Figure.
It may be a percentage or a pixel value (like "300px").
ratio : str, default "60%"
A percentage defining the aspect ratio of the Figure.
It will be ignored if height is not None.
figsize : tuple of two int, default None
If you're a matplotlib addict, you can overwrite width and
height. Values will be converted into pixels in using 60 dpi.
For example figsize=(10, 5) will result in
width="600px", height="300px".
"""
def __init__(self, html=None, width='100%', height=None, ratio='60%',
figsize=None):
super(IFrame, self).__init__()
self._name = 'IFrame'
self.width = width
self.height = height
self.ratio = ratio
if figsize is not None:
self.width = str(60*figsize[0])+'px'
self.height = str(60*figsize[1])+'px'
if isinstance(html, str) or isinstance(html, bytes):
self.add_child(Element(html))
elif html is not None:
self.add_child(html)
def render(self, **kwargs):
"""Renders the HTML representation of the element."""
html = super(IFrame, self).render(**kwargs)
html = "data:text/html;charset=utf-8;base64," + base64.b64encode(html.encode('utf8')).decode('utf8') # noqa
if self.height is None:
iframe = (
'<div style="width:{width};">'
'<div style="position:relative;width:100%;height:0;padding-bottom:{ratio};">' # noqa
'<iframe src="{html}" style="position:absolute;width:100%;height:100%;left:0;top:0;' # noqa
'border:none !important;">'
'</iframe>'
'</div></div>'
).format(html=html, width=self.width, ratio=self.ratio)
else:
iframe = (
'<iframe src="{html}" width="{width}" style="border:none !important;" '
'height="{height}"></iframe>'
).format(html=html, width=self.width, height=self.height)
return iframe
class MacroElement(Element):
"""This is a parent class for Elements defined by a macro template.
To compute your own element, all you have to do is:
* To inherit from this class
* Overwrite the '_name' attribute
* Overwrite the '_template' attribute with something of the form::
{% macro header(this, kwargs) %}
...
{% endmacro %}
{% macro html(this, kwargs) %}
...
{% endmacro %}
{% macro script(this, kwargs) %}
...
{% endmacro %}
"""
_template = Template(u'')
def __init__(self):
super(MacroElement, self).__init__()
self._name = 'MacroElement'
def render(self, **kwargs):
"""Renders the HTML representation of the element."""
figure = self.get_root()
assert isinstance(figure, Figure), ('You cannot render this Element '
'if it is not in a Figure.')
header = self._template.module.__dict__.get('header', None)
if header is not None:
figure.header.add_child(Element(header(self, kwargs)),
name=self.get_name())
html = self._template.module.__dict__.get('html', None)
if html is not None:
figure.html.add_child(Element(html(self, kwargs)),
name=self.get_name())
script = self._template.module.__dict__.get('script', None)
if script is not None:
figure.script.add_child(Element(script(self, kwargs)),
name=self.get_name())
for name, element in self._children.items():
element.render(**kwargs)
| mit |
kgori/treeCl | treeCl/utils/silhouette.py | 1 | 5872 | import numpy as np
import pandas as pd
from ..partition import Partition
class Silhouette(object):
def __init__(self, dm):
self._pvec = None
self.distances = dm
self.groups = None
self.neighbours = None
self.scores = None
@staticmethod
def __get_indices_for_groups_by_index(ix, jx):
if len(ix) == len(jx) == 1 and ix == jx:
return [list(ix)], [list(jx)]
row_indices = [[i for j in jx if i != j] for i in ix]
column_indices = [[j for j in jx if j != i] for i in ix]
return row_indices, column_indices
@staticmethod
def __silhouette_calc(ingroup, outgroup):
if len(ingroup) == 1:
return 0
max_ = np.array([ingroup, outgroup]).max(axis=0)
return (outgroup - ingroup) / max_
def get_indices_for_group(self, group):
return np.where(self.pvec == group)[0]
def get_indices_for_groups(self, group1, group2):
ix = np.where(self.pvec == group1)[0]
jx = np.where(self.pvec == group2)[0]
return self.__get_indices_for_groups_by_index(ix, jx)
def get_mean_dissimilarities_for_group(self, group):
outgroups = self.groups[self.groups != group]
within_indices = self.get_indices_for_groups(group, group)
within_distances = self.distances[within_indices].mean(axis=1)
dissimilarities = []
for outgroup in outgroups:
between_indices = self.get_indices_for_groups(group, outgroup)
between_distances = self.distances[between_indices]
dissimilarities.append(between_distances.mean(axis=1))
return within_distances, np.array(dissimilarities), outgroups
def run(self):
if len(self.groups) == 1:
raise ValueError("Silhouette is not defined for singleton clusters")
for ingroup in self.groups:
ingroup_ix = self.get_indices_for_group(ingroup)
within, between, outgroups = self.get_mean_dissimilarities_for_group(ingroup)
between_min = between.min(axis=0)
outgroup_ix, neighbours_ix = np.where(between == between_min)
neighbours = np.zeros(neighbours_ix.shape)
neighbours[neighbours_ix] = outgroups[outgroup_ix]
self.neighbours[ingroup_ix] = neighbours
self.scores[ingroup_ix] = self.__silhouette_calc(within, between_min)
@property
def pvec(self):
return self._pvec
@pvec.setter
def pvec(self, partition):
if isinstance(partition, Partition):
self._pvec = np.array(partition.partition_vector)
else:
self._pvec = np.array(partition)
self.groups = np.unique(self._pvec)
self.neighbours = np.zeros(self._pvec.shape)
self.scores = np.zeros(self._pvec.shape)
def __call__(self, partition):
self.pvec = partition
self.run()
return self.neighbours, self.scores
def add_silhouettes_to_dataframe(path_to_distances, path_to_table, **kwargs):
table = pd.read_csv(path_to_table, **kwargs)
dm = np.loadtxt(path_to_distances)
if __name__ == '__main__':
dm = np.array(
[[0., 0.352, 0.23, 0.713, 0.426, 0.653, 0.481, 0.554, 1.533, 1.549, 1.505, 1.46],
[0.352, 0., 0.249, 0.772, 0.625, 0.909, 0.668, 0.725, 1.613, 1.623, 1.568, 1.523],
[0.23, 0.249, 0., 0.811, 0.417, 0.751, 0.456, 0.52, 1.489, 1.501, 1.446, 1.396],
[0.713, 0.772, 0.811, 0., 0.962, 0.894, 1.025, 1.068, 1.748, 1.782, 1.724, 1.72],
[0.426, 0.625, 0.417, 0.962, 0., 0.644, 0.083, 0.216, 1.424, 1.439, 1.398, 1.339],
[0.653, 0.909, 0.751, 0.894, 0.644, 0., 0.685, 0.659, 1.467, 1.502, 1.448, 1.416],
[0.481, 0.668, 0.456, 1.025, 0.083, 0.685, 0., 0.203, 1.419, 1.432, 1.394, 1.331],
[0.554, 0.725, 0.52, 1.068, 0.216, 0.659, 0.203, 0., 1.503, 1.53, 1.472, 1.416],
[1.533, 1.613, 1.489, 1.748, 1.424, 1.467, 1.419, 1.503, 0., 0.288, 0.299, 0.262],
[1.549, 1.623, 1.501, 1.782, 1.439, 1.502, 1.432, 1.53, 0.288, 0., 0.296, 0.185],
[1.505, 1.568, 1.446, 1.724, 1.398, 1.448, 1.394, 1.472, 0.299, 0.296, 0., 0.197],
[1.46, 1.523, 1.396, 1.72, 1.339, 1.416, 1.331, 1.416, 0.262, 0.185, 0.197, 0.]])
plist = [Partition((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)),
Partition((1, 2, 3, 4, 5, 6, 5, 7, 8, 9, 10, 11)),
Partition((1, 2, 3, 4, 5, 6, 5, 7, 8, 9, 10, 9)),
Partition((1, 2, 1, 3, 4, 5, 4, 6, 7, 8, 9, 8)),
Partition((1, 2, 1, 3, 4, 5, 4, 4, 6, 7, 8, 7)),
Partition((1, 2, 1, 3, 4, 5, 4, 4, 6, 7, 7, 7)),
Partition((1, 2, 1, 3, 4, 5, 4, 4, 6, 6, 6, 6)),
Partition((1, 1, 1, 2, 3, 4, 3, 3, 5, 5, 5, 5)),
Partition((1, 1, 1, 2, 3, 3, 3, 3, 4, 4, 4, 4)),
Partition((1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3)),
Partition((1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2)),
Partition((1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1))]
s = Silhouette(dm)
skips = 0
for p in plist:
try:
neighbours, scores = s(p)
print("{} clusters: avg score = {}".format(len(p), scores.mean()))
except ValueError:
print("{} clusters: skipping".format(len(p)))
skips += 1
print ("{} tests, {} skipped".format(len(plist), skips))
import treeCl
cl = treeCl.Clustering(dm)
skips = 0
for p in plist:
try:
anosim = cl.anosim(p)
except ValueError:
skips += 1
continue
try:
permanova = cl.permanova(p)
except ValueError:
skips += 1
continue
print ("{} clusters: anosim = {}; permanova = {}".format(len(p), anosim.p_value, permanova.p_value))
print ("{} tests, {} skipped".format(2*len(plist), skips))
| mit |
samuel1208/scikit-learn | sklearn/utils/tests/test_murmurhash.py | 261 | 2836 | # Author: Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from nose.tools import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
| bsd-3-clause |
DavidTingley/ephys-processing-pipeline | installation/klustaviewa-0.3.0/build/lib.linux-x86_64-2.7/klustaviewa/control/processor.py | 2 | 8264 | """The Controller offers high-level methods to change the data."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import inspect
import numpy as np
import pandas as pd
from kwiklib.utils import logger as log
from kwiklib.dataio.selection import get_indices, select
from kwiklib.dataio.tools import get_array
from kwiklib.utils.colors import random_color
# -----------------------------------------------------------------------------
# Processor
# -----------------------------------------------------------------------------
class Processor(object):
"""Implement actions.
An Action object is:
(method_name, args, kwargs)
"""
def __init__(self, loader):
self.loader = loader
# Actions.
# --------
# Merge.
def merge_clusters(self, clusters_old, cluster_groups, cluster_colors,
cluster_merged):
# Get spikes in clusters to merge.
# spikes = self.loader.get_spikes(clusters=clusters_to_merge)
spikes = get_indices(clusters_old)
clusters_to_merge = get_indices(cluster_groups)
group = np.max(get_array(cluster_groups))
# color_old = get_array(cluster_colors)[0]
color_new = random_color()
self.loader.add_cluster(cluster_merged, group, color_new)
# Set the new cluster to the corresponding spikes.
self.loader.set_cluster(spikes, cluster_merged)
# Remove old clusters.
for cluster in clusters_to_merge:
self.loader.remove_cluster(cluster)
self.loader.unselect()
return dict(clusters_to_merge=clusters_to_merge,
cluster_merged=cluster_merged,
cluster_merged_colors=(color_new, color_new),)
def merge_clusters_undo(self, clusters_old, cluster_groups,
cluster_colors, cluster_merged):
# Get spikes in clusters to merge.
spikes = self.loader.get_spikes(clusters=cluster_merged)
clusters_to_merge = get_indices(cluster_groups)
# Add old clusters.
for cluster, group, color in zip(
clusters_to_merge, cluster_groups, cluster_colors):
self.loader.add_cluster(cluster, group, color)
# Set the new clusters to the corresponding spikes.
self.loader.set_cluster(spikes, clusters_old)
# Remove merged cluster.
self.loader.remove_cluster(cluster_merged)
self.loader.unselect()
color_old = self.loader.get_cluster_color(clusters_to_merge[0])
color_old2 = self.loader.get_cluster_color(clusters_to_merge[1])
return dict(clusters_to_merge=clusters_to_merge,
cluster_merged=cluster_merged,
cluster_to_merge_colors=(color_old, color_old2),
)
# Split.
def split_clusters(self, clusters, clusters_old, cluster_groups,
cluster_colors, clusters_new):
if not hasattr(clusters, '__len__'):
clusters = [clusters]
spikes = get_indices(clusters_old)
# Find groups and colors of old clusters.
cluster_indices_old = np.unique(clusters_old)
cluster_indices_new = np.unique(clusters_new)
# Get group and color of the new clusters, from the old clusters.
groups = self.loader.get_cluster_groups(cluster_indices_old)
# colors = self.loader.get_cluster_colors(cluster_indices_old)
# Add clusters.
self.loader.add_clusters(cluster_indices_new,
# HACK: take the group of the first cluster for all new clusters
get_array(groups)[0]*np.ones(len(cluster_indices_new)),
random_color(len(cluster_indices_new)))
# Set the new clusters to the corresponding spikes.
self.loader.set_cluster(spikes, clusters_new)
# Remove empty clusters.
clusters_empty = self.loader.remove_empty_clusters()
self.loader.unselect()
clusters_to_select = sorted(set(cluster_indices_old).union(
set(cluster_indices_new)) - set(clusters_empty))
return dict(clusters_to_split=clusters,
clusters_split=get_array(cluster_indices_new),
clusters_empty=clusters_empty)
def split_clusters_undo(self, clusters, clusters_old, cluster_groups,
cluster_colors, clusters_new):
if not hasattr(clusters, '__len__'):
clusters = [clusters]
spikes = get_indices(clusters_old)
# Find groups and colors of old clusters.
cluster_indices_old = np.unique(clusters_old)
cluster_indices_new = np.unique(clusters_new)
# Add clusters that were removed after the split operation.
clusters_empty = sorted(set(cluster_indices_old) -
set(cluster_indices_new))
self.loader.add_clusters(
clusters_empty,
select(cluster_groups, clusters_empty),
select(cluster_colors, clusters_empty))
# Set the new clusters to the corresponding spikes.
self.loader.set_cluster(spikes, clusters_old)
# Remove empty clusters.
clusters_empty = self.loader.remove_empty_clusters()
self.loader.unselect()
return dict(clusters_to_split=clusters,
clusters_split=get_array(cluster_indices_new),
# clusters_empty=clusters_empty
)
# Change cluster color.
def change_cluster_color(self, cluster, color_old, color_new,
clusters_selected):
self.loader.set_cluster_colors(cluster, color_new)
return dict(clusters=clusters_selected, cluster=cluster,
color_old=color_old, color_new=color_new)
def change_cluster_color_undo(self, cluster, color_old, color_new,
clusters_selected):
self.loader.set_cluster_colors(cluster, color_old)
return dict(clusters=clusters_selected, cluster=cluster,
color_old=color_old, color_new=color_new)
# Move clusters.
def move_clusters(self, clusters, groups_old, group_new):
# Get next cluster to select.
next_cluster = self.loader.get_next_cluster(clusters[-1])
self.loader.set_cluster_groups(clusters, group_new)
# to_compute=[] to force refreshing the correlation matrix
# return dict(to_select=[next_cluster], to_compute=[])
return dict(clusters=clusters, groups_old=groups_old, group=group_new,
next_cluster=next_cluster)
def move_clusters_undo(self, clusters, groups_old, group_new):
self.loader.set_cluster_groups(clusters, groups_old)
# to_compute=[] to force refreshing the correlation matrix
# return dict(to_select=clusters, to_compute=[])
return dict(clusters=clusters, groups_old=groups_old, group=group_new)
# Change group color.
def change_group_color(self, group, color_old, color_new):
self.loader.set_group_colors(group, color_new)
return dict(groups=[group])
def change_group_color_undo(self, group, color_old, color_new):
self.loader.set_group_colors(group, color_old)
return dict(groups=[group])
# Add group.
def add_group(self, group, name, color):
self.loader.add_group(group, name, color)
def add_group_undo(self, group, name, color):
self.loader.remove_group(group)
# Rename group.
def rename_group(self, group, name_old, name_new):
self.loader.set_group_names(group, name_new)
def rename_group_undo(self, group, name_old, name_new):
self.loader.set_group_names(group, name_old)
# Remove group.
def remove_group(self, group, name, color):
self.loader.remove_group(group)
def remove_group_undo(self, group, name, color):
self.loader.add_group(group, name, color)
| gpl-3.0 |
maniteja123/scipy | scipy/interpolate/ndgriddata.py | 39 | 7457 | """
Convenience interface to N-D interpolation
.. versionadded:: 0.9
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
CloughTocher2DInterpolator, _ndim_coords_from_arrays
from scipy.spatial import cKDTree
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
'CloughTocher2DInterpolator']
#------------------------------------------------------------------------------
# Nearest-neighbour interpolation
#------------------------------------------------------------------------------
class NearestNDInterpolator(NDInterpolatorBase):
"""
NearestNDInterpolator(points, values)
Nearest-neighbour interpolation in N dimensions.
.. versionadded:: 0.9
Methods
-------
__call__
Parameters
----------
x : (Npoints, Ndims) ndarray of floats
Data point coordinates.
y : (Npoints,) ndarray of float or complex
Data values.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
tree_options : dict, optional
Options passed to the underlying ``cKDTree``.
.. versionadded:: 0.17.0
Notes
-----
Uses ``scipy.spatial.cKDTree``
"""
def __init__(self, x, y, rescale=False, tree_options=None):
NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
need_contiguous=False,
need_values=False)
if tree_options is None:
tree_options = dict()
self.tree = cKDTree(self.points, **tree_options)
self.values = y
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
"""
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
xi = self._check_call_shape(xi)
xi = self._scale_x(xi)
dist, i = self.tree.query(xi)
return self.values[i]
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan,
rescale=False):
"""
Interpolate unstructured D-dimensional data.
Parameters
----------
points : ndarray of floats, shape (n, D)
Data point coordinates. Can either be an array of
shape (n, D), or a tuple of `ndim` arrays.
values : ndarray of float or complex, shape (n,)
Data values.
xi : ndarray of float, shape (M, D)
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
``nearest``
return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
``linear``
tesselate the input point set to n-dimensional
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
``cubic`` (1-D)
return the value determined from a cubic
spline.
``cubic`` (2-D)
return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
rescale : bool, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Notes
-----
.. versionadded:: 0.9
Examples
--------
Suppose we want to interpolate the 2-D function
>>> def func(x, y):
... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> points = np.random.rand(1000, 2)
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
"""
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from .interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
# Sort points/values together, necessary as input for interp1d
idx = np.argsort(points)
points = points[idx]
values = values[idx]
if method == 'nearest':
fill_value = 'extrapolate'
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values, rescale=rescale)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
else:
raise ValueError("Unknown interpolation method %r for "
"%d dimensional data" % (method, ndim))
| bsd-3-clause |
cpmech/CIVL4250py | Workspace/Poisson2D/hotplate.py | 1 | 1507 | import numpy as np
import scipy.linalg as la
import matplotlib.pyplot as plt
from tb_grid2dV2 import Grid2D
# input data
Nx, Ny = 5, 4
lx, ly = 5.0, 4.0
kx, ky = 1.0, 1.0
# grid
g = Grid2D(Nx, Ny, lx, ly, 0.0, 0.0)
# auxiliary variables
dxx, dyy = g.dx**2., g.dy**2. # sq increments
alp = 2.*(kx/dxx+ky/dyy) # alpha
bet, gam = -kx/dxx, -ky/dyy # beta, gamma
mol = [alp, bet, bet, gam, gam] # molecule
# assemble K matrix
N = Nx * Ny
K = np.zeros((N,N)) # K matrix
F = np.zeros(N) # RHS array
for n in range(N): # for each eq
i, j = n%Nx, n/Nx # col and rows
I = [n, n-1, n+1, n-g.nx, n+g.nx] # nodes
if i==0: I[1] = I[2] # left bry
if i==Nx-1: I[2] = I[1] # right bry
if j==0: I[3] = I[4] # bottom bry
if j==Ny-1: I[4] = I[3] # top bry
for p, k in enumerate(I): # each contrib
K[n,k] += mol[p] # set K matrix
F[n] = 0.0
# prescribed values
pn = np.hstack([g.L, g.R, g.B, g.T])
U = np.zeros(N)
U[g.B] = 0.0
U[g.R] = 0.0
U[g.T] = 50.0
U[g.L] = 50.0
# solution
eqs = np.arange(N)
eq2 = eqs[pn]
eq1 = np.delete(eqs,eq2)
K1_ = K [eq1,:]
K11 = K1_[:,eq1]
K12 = K1_[:,eq2]
U[eq1] = la.solve(K11, F[eq1] - np.dot(K12, U[eq2]))
# post-processing
Umat = np.zeros((Nx, Ny))
for n in range(N):
i, j = n%Nx, n/Nx
Umat[i,j] = U[n]
cf1 = plt.contourf(g.X, g.Y, Umat, cmap=plt.get_cmap('bwr'))
cf2 = plt.contour(g.X, g.Y, Umat, colors='black')
plt.colorbar(cf1)
plt.clabel(cf2, inline=True)
plt.show() | mit |
thermokarst/q2-taxa | q2_taxa/tests/test_methods.py | 1 | 4442 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import pandas as pd
import pandas.util.testing as pdt
from q2_taxa import collapse
class CollapseTests(unittest.TestCase):
def assert_index_equal(self, a, b):
# this method is derived from scikit-bio 0.5.1
pdt.assert_index_equal(a, b,
exact=True,
check_names=True,
check_exact=True)
def assert_data_frame_almost_equal(self, left, right):
# this method is derived from scikit-bio 0.5.1
pdt.assert_frame_equal(left, right,
check_dtype=True,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False)
self.assert_index_equal(left.index, right.index)
def test_collapse(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat2'])
actual = collapse(table, taxonomy, 1)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 2)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 3)
expected = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b;c', 'a;b;d'])
self.assert_data_frame_almost_equal(actual, expected)
def test_collapse_missing_level(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b', 'a; b; d'],
index=['feat1', 'feat2'])
actual = collapse(table, taxonomy, 1)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 2)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 3)
expected = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b;__', 'a;b;d'])
self.assert_data_frame_almost_equal(actual, expected)
def test_collapse_bad_level(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat2'])
with self.assertRaisesRegex(ValueError, 'of 42 is larger'):
collapse(table, taxonomy, 42)
with self.assertRaisesRegex(ValueError, 'of 0 is too low'):
collapse(table, taxonomy, 0)
| bsd-3-clause |
abhishekgahlot/scikit-learn | sklearn/cross_decomposition/cca_.py | 18 | 3129 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
PatrickChrist/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
abergeron/pylearn2 | pylearn2/cross_validation/tests/test_subset_iterators.py | 49 | 2411 | """
Test subset iterators.
"""
import numpy as np
from pylearn2.testing.skip import skip_if_no_sklearn
def test_validation_k_fold():
"""Test ValidationKFold."""
skip_if_no_sklearn()
from pylearn2.cross_validation.subset_iterators import ValidationKFold
n = 30
# test with indices
cv = ValidationKFold(n)
for train, valid, test in cv:
assert np.unique(np.concatenate((train, valid, test))).size == n
assert valid.size == n / cv.n_folds
assert test.size == n / cv.n_folds
def test_stratified_validation_k_fold():
"""Test StratifiedValidationKFold."""
skip_if_no_sklearn()
from pylearn2.cross_validation.subset_iterators import (
StratifiedValidationKFold)
n = 30
y = np.concatenate((np.zeros(n / 2, dtype=int), np.ones(n / 2, dtype=int)))
# test with indices
cv = StratifiedValidationKFold(y)
for train, valid, test in cv:
assert np.unique(np.concatenate((train, valid, test))).size == n
assert valid.size == n / cv.n_folds
assert test.size == n / cv.n_folds
assert np.count_nonzero(y[valid]) == (n / 2) * (1. / cv.n_folds)
assert np.count_nonzero(y[test]) == (n / 2) * (1. / cv.n_folds)
def test_validation_shuffle_split():
"""Test ValidationShuffleSplit."""
skip_if_no_sklearn()
from pylearn2.cross_validation.subset_iterators import (
ValidationShuffleSplit)
n = 30
# test with indices
cv = ValidationShuffleSplit(n)
for train, valid, test in cv:
assert np.unique(np.concatenate((train, valid, test))).size == n
assert valid.size == n * cv.test_size
assert test.size == n * cv.test_size
def test_stratified_validation_shuffle_split():
"""Test StratifiedValidationShuffleSplit."""
skip_if_no_sklearn()
from pylearn2.cross_validation.subset_iterators import (
StratifiedValidationShuffleSplit)
n = 60
y = np.concatenate((np.zeros(n / 2, dtype=int), np.ones(n / 2, dtype=int)))
# test with indices
cv = StratifiedValidationShuffleSplit(y)
for train, valid, test in cv:
assert np.unique(np.concatenate((train, valid, test))).size == n
assert valid.size == n * cv.test_size
assert test.size == n * cv.test_size
assert np.count_nonzero(y[valid]) == (n / 2) * cv.test_size
assert np.count_nonzero(y[test]) == (n / 2) * cv.test_size
| bsd-3-clause |
nat13ejo/NUMA12 | homework_6/code/hwktask5.py | 1 | 1274 | import numpy as np
from matplotlib import pyplot as plt
# data definition
X = [0, 1, 2, 3, 4, 5, 6];
f = [-35, -56, 0, -16, -3, 4, 10];
w = np.ones(7)
#sets Z and complements of Z
Z = [];
# Construction of all possible Z.
for i in range(len(X)):
for j in range(i+1, len(X)):
Z.append([i, j]);
all_errors = [];
all_approx = []
for k in range(len(Z)):
i, j = Z[k];
#construct approximation
p = [(f[j] * X[i] - f[i] * X[j]) / (X[i] - X[j]),
(f[i] - f[j]) / float(X[i] - X[j])]
# Calculate error
error = 0;
for l in range(len(X)):
error += w[l] * np.abs(f[l] - (p[1] * X[l] + p[0]) );
all_errors.append(error);
all_approx.append(p);
best_approx_index = np.argmin(np.array(all_errors))
p_star = all_approx[best_approx_index]
approx = []
for x in X:
approx.append(p_star[1] * x + p_star[0])
print("data: " + str(f))
print("approx: " + str(approx))
print("polynom: " + str(p_star[1]) + "*x + "
+ str(p_star[0]))
approx = []p
grid = np.linspace(X[0], X[-1], 100)
for x in grid:
approx.append(p_star[1] * x + p_star[0])
plt.plot(approx, grid);
plt.plot(f, X, "*");
plt.legend(["$p^*(x)$ = " + str(p_star[1]) + "x " + str(p_star[0]), "data points"])
plt.savefig("task_5.png")
| gpl-3.0 |
eladnoor/small-molecule-regulation | oldcode/map_regulators_to_SMRN.py | 1 | 1742 | # Map activators and inhibitors to ecoli metabolic model
# 1. NB: For now, we are using all possible mappings of EC numbers,
# and many of them map to multiple reactions.
# May need to think about how to deal with this.
# 2. NB: If there is activation and inhibition, we have to deal with this carefully
# 3. NB: There are multiple BRENDA entries indicating the same reaction. We should treat this as multiple lines of supporting evidence.
import os
import pandas as pd
import settings
#%% Read in the data for activators and inhibitors from BRENDA
act = pd.read_csv(os.path.join(settings.CACHE_DIR, 'ecoli_activating_bigg.csv'),
header=0, index_col=0)
act['Value'] = 1
inh = pd.read_csv(os.path.join(settings.CACHE_DIR, 'ecoli_ki_bigg.csv'),
header=0, index_col=0)
inh['Value'] = -1
effectors = pd.concat([act, inh])
effectors.drop('Commentary', 1, inplace=True)
#%% load BiGG reaction to EC number mapping
bigg2ec = pd.DataFrame.from_csv(os.path.join(settings.CACHE_DIR, 'bigg2ec.csv'))
# Read BIGG model
model, S = settings.get_ecoli_json()
# Set to lower case
mnames = map(unicode.lower, S.index)
rnames = map(unicode.lower, S.columns)
#%% merge BRENDA effector data with the bigg2ec table to get the BiGG reaction IDs
bigg_effectors = pd.merge(effectors, bigg2ec, how='inner', on='EC_number')
# Write to file
bigg_effectors.to_csv(os.path.join(settings.CACHE_DIR, 'iJO1366_SMRN.csv'))
#%% group by the reaction and metabolite IDs and write to another CSV file
bigg_effectors_grouped = bigg_effectors.groupby(['bigg.reaction', 'bigg.metabolite']).sum().reset_index()
bigg_effectors_grouped.to_csv(os.path.join(settings.CACHE_DIR, 'iJO1366_SMRN_grouped.csv'))
| mit |
navaro1/deep-learning | weight-initialization/helper.py | 153 | 3649 | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def hist_dist(title, distribution_tensor, hist_range=(-4, 4)):
"""
Display histogram of a TF distribution
"""
with tf.Session() as sess:
values = sess.run(distribution_tensor)
plt.title(title)
plt.hist(values, np.linspace(*hist_range, num=len(values)/2))
plt.show()
def _get_loss_acc(dataset, weights):
"""
Get losses and validation accuracy of example neural network
"""
batch_size = 128
epochs = 2
learning_rate = 0.001
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
learn_rate = tf.placeholder(tf.float32)
biases = [
tf.Variable(tf.zeros([256])),
tf.Variable(tf.zeros([128])),
tf.Variable(tf.zeros([dataset.train.labels.shape[1]]))
]
# Layers
layer_1 = tf.nn.relu(tf.matmul(features, weights[0]) + biases[0])
layer_2 = tf.nn.relu(tf.matmul(layer_1, weights[1]) + biases[1])
logits = tf.matmul(layer_2, weights[2]) + biases[2]
# Training loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# Optimizer
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(loss)
# Accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Measurements use for graphing loss
loss_batch = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
batch_count = int((dataset.train.num_examples / batch_size))
# The training cycle
for epoch_i in range(epochs):
for batch_i in range(batch_count):
batch_features, batch_labels = dataset.train.next_batch(batch_size)
# Run optimizer and get loss
session.run(
optimizer,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
l = session.run(
loss,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
loss_batch.append(l)
valid_acc = session.run(
accuracy,
feed_dict={features: dataset.validation.images, labels: dataset.validation.labels, learn_rate: 1.0})
# Hack to Reset batches
dataset.train._index_in_epoch = 0
dataset.train._epochs_completed = 0
return loss_batch, valid_acc
def compare_init_weights(
dataset,
title,
weight_init_list,
plot_n_batches=100):
"""
Plot loss and print stats of weights using an example neural network
"""
colors = ['r', 'b', 'g', 'c', 'y', 'k']
label_accs = []
label_loss = []
assert len(weight_init_list) <= len(colors), 'Too many inital weights to plot'
for i, (weights, label) in enumerate(weight_init_list):
loss, val_acc = _get_loss_acc(dataset, weights)
plt.plot(loss[:plot_n_batches], colors[i], label=label)
label_accs.append((label, val_acc))
label_loss.append((label, loss[-1]))
plt.title(title)
plt.xlabel('Batches')
plt.ylabel('Loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
print('After 858 Batches (2 Epochs):')
print('Validation Accuracy')
for label, val_acc in label_accs:
print(' {:7.3f}% -- {}'.format(val_acc*100, label))
print('Loss')
for label, loss in label_loss:
print(' {:7.3f} -- {}'.format(loss, label))
| mit |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/dask/dataframe/io/parquet.py | 2 | 41179 | from __future__ import absolute_import, division, print_function
import re
import copy
import json
import os
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from ..core import DataFrame, Series
from ..utils import (clear_known_categories, strip_unknown_categories,
UNKNOWN_CATEGORIES)
from ...bytes.compression import compress
from ...base import tokenize
from ...compatibility import PY3, string_types
from ...delayed import delayed
from ...bytes.core import get_fs_token_paths
from ...bytes.utils import infer_storage_options
from ...utils import import_required, natural_sort_key
from .utils import _get_pyarrow_dtypes, _meta_from_dtypes
__all__ = ('read_parquet', 'to_parquet')
def _parse_pandas_metadata(pandas_metadata):
"""Get the set of names from the pandas metadata section
Parameters
----------
pandas_metadata : dict
Should conform to the pandas parquet metadata spec
Returns
-------
index_names : list
List of strings indicating the actual index names
column_names : list
List of strings indicating the actual column names
storage_name_mapping : dict
Pairs of storage names (e.g. the field names for
PyArrow) and actual names. The storage and field names will
differ for index names for certain writers (pyarrow > 0.8).
column_indexes_names : list
The names for ``df.columns.name`` or ``df.columns.names`` for
a MultiIndex in the columns
Notes
-----
This should support metadata written by at least
* fastparquet>=0.1.3
* pyarrow>=0.7.0
"""
index_storage_names = pandas_metadata['index_columns']
index_name_xpr = re.compile('__index_level_\d+__')
# older metadatas will not have a 'field_name' field so we fall back
# to the 'name' field
pairs = [(x.get('field_name', x['name']), x['name'])
for x in pandas_metadata['columns']]
# Need to reconcile storage and real names. These will differ for
# pyarrow, which uses __index_leveL_d__ for the storage name of indexes.
# The real name may be None (e.g. `df.index.name` is None).
pairs2 = []
for storage_name, real_name in pairs:
if real_name and index_name_xpr.match(real_name):
real_name = None
pairs2.append((storage_name, real_name))
index_names = [name for (storage_name, name) in pairs2
if name != storage_name]
# column_indexes represents df.columns.name
# It was added to the spec after pandas 0.21.0+, and implemented
# in PyArrow 0.8. It's not currently impelmented in fastparquet.
column_index_names = pandas_metadata.get("column_indexes", [{'name': None}])
column_index_names = [x['name'] for x in column_index_names]
# Now we need to disambiguate between columns and index names. PyArrow
# 0.8.0+ allows for duplicates between df.index.names and df.columns
if not index_names:
# For PyArrow < 0.8, Any fastparquet. This relies on the facts that
# 1. Those versions used the real index name as the index storage name
# 2. Those versions did not allow for duplicate index / column names
# So we know that if a name is in index_storage_names, it must be an
# index name
index_names = list(index_storage_names) # make a copy
index_storage_names2 = set(index_storage_names)
column_names = [name for (storage_name, name)
in pairs if name not in index_storage_names2]
else:
# For newer PyArrows the storage names differ from the index names
# iff it's an index level. Though this is a fragile assumption for
# other systems...
column_names = [name for (storage_name, name) in pairs2
if name == storage_name]
storage_name_mapping = dict(pairs2) # TODO: handle duplicates gracefully
return index_names, column_names, storage_name_mapping, column_index_names
def _normalize_index_columns(user_columns, data_columns, user_index, data_index):
"""Normalize user and file-provided column and index names
Parameters
----------
user_columns : None, str or list of str
data_columns : list of str
user_index : None, str, or list of str
data_index : list of str
Returns
-------
column_names : list of str
index_names : list of str
out_type : {pd.Series, pd.DataFrame}
"""
specified_columns = user_columns is not None
specified_index = user_index is not None
out_type = DataFrame
if user_columns is None:
user_columns = list(data_columns)
elif isinstance(user_columns, string_types):
user_columns = [user_columns]
out_type = Series
else:
user_columns = list(user_columns)
if user_index is None:
user_index = data_index
elif user_index is False:
# When index is False, use no index and all fields should be treated as
# columns (unless `columns` provided).
user_index = []
data_columns = data_index + data_columns
elif isinstance(user_index, string_types):
user_index = [user_index]
else:
user_index = list(user_index)
if specified_index and not specified_columns:
# Only `index` provided. Use specified index, and all column fields
# that weren't specified as indices
index_names = user_index
column_names = [x for x in data_columns if x not in index_names]
elif specified_columns and not specified_index:
# Only `columns` provided. Use specified columns, and all index fields
# that weren't specified as columns
column_names = user_columns
index_names = [x for x in data_index if x not in column_names]
elif specified_index and specified_columns:
# Both `index` and `columns` provided. Use as specified, but error if
# they intersect.
column_names = user_columns
index_names = user_index
if set(column_names).intersection(index_names):
raise ValueError("Specified index and column names must not "
"intersect")
else:
# Use default columns and index from the metadata
column_names = data_columns
index_names = data_index
return column_names, index_names, out_type
# ----------------------------------------------------------------------
# Fastparquet interface
def _read_fastparquet(fs, fs_token, paths, columns=None, filters=None,
categories=None, index=None, infer_divisions=None):
import fastparquet
from fastparquet.util import check_column_names
if isinstance(paths,fastparquet.api.ParquetFile):
pf = paths
elif len(paths) > 1:
pf = fastparquet.ParquetFile(paths, open_with=fs.open, sep=fs.sep)
else:
try:
pf = fastparquet.ParquetFile(paths[0] + fs.sep + '_metadata',
open_with=fs.open,
sep=fs.sep)
except Exception:
pf = fastparquet.ParquetFile(paths[0], open_with=fs.open, sep=fs.sep)
# Validate infer_divisions
if os.path.split(pf.fn)[-1] != '_metadata' and infer_divisions is True:
raise NotImplementedError("infer_divisions=True is not supported by the fastparquet engine for datasets "
"that do not contain a global '_metadata' file")
check_column_names(pf.columns, categories)
if isinstance(columns, tuple):
# ensure they tokenize the same
columns = list(columns)
if pf.fmd.key_value_metadata:
pandas_md = [x.value for x in pf.fmd.key_value_metadata if x.key == 'pandas']
else:
pandas_md = []
if len(pandas_md) == 0:
# Fall back to the storage information
index_names = pf._get_index()
if not isinstance(index_names, list):
index_names = [index_names]
column_names = pf.columns + list(pf.cats)
storage_name_mapping = {k: k for k in column_names}
elif len(pandas_md) == 1:
index_names, column_names, storage_name_mapping, column_index_names = (
_parse_pandas_metadata(json.loads(pandas_md[0]))
)
column_names.extend(pf.cats)
else:
raise ValueError("File has multiple entries for 'pandas' metadata")
# Normalize user inputs
if filters is None:
filters = []
column_names, index_names, out_type = _normalize_index_columns(
columns, column_names, index, index_names)
if categories is None:
categories = pf.categories
elif isinstance(categories, string_types):
categories = [categories]
else:
categories = list(categories)
# TODO: write partition_on to pandas metadata...
all_columns = list(column_names)
all_columns.extend(x for x in index_names if x not in column_names)
rgs = [rg for rg in pf.row_groups if
not (fastparquet.api.filter_out_stats(rg, filters, pf.schema)) and
not (fastparquet.api.filter_out_cats(rg, filters))]
dtypes = pf._dtypes(categories)
dtypes = {storage_name_mapping.get(k, k): v for k, v in dtypes.items()}
meta = _meta_from_dtypes(all_columns, dtypes, index_names, [None])
# fastparquet doesn't handle multiindex
if len(index_names) > 1:
raise ValueError("Cannot read DataFrame with MultiIndex.")
elif len(index_names) == 0:
index_names = None
for cat in categories:
if cat in meta:
meta[cat] = pd.Series(pd.Categorical([],
categories=[UNKNOWN_CATEGORIES]),
index=meta.index)
for catcol in pf.cats:
if catcol in meta.columns:
meta[catcol] = meta[catcol].cat.set_categories(pf.cats[catcol])
elif meta.index.name == catcol:
meta.index = meta.index.set_categories(pf.cats[catcol])
if out_type == Series:
assert len(meta.columns) == 1
meta = meta[meta.columns[0]]
name = 'read-parquet-' + tokenize(fs_token, paths, all_columns, filters,
categories)
dsk = {(name, i): (_read_parquet_row_group, fs, pf.row_group_filename(rg),
index_names, all_columns, rg, out_type == Series,
categories, pf.schema, pf.cats, pf.dtypes,
pf.file_scheme, storage_name_mapping)
for i, rg in enumerate(rgs)}
if not dsk:
# empty dataframe
dsk = {(name, 0): meta}
divisions = (None, None)
return out_type(dsk, name, meta, divisions)
if index_names and infer_divisions is not False:
index_name = meta.index.name
try:
# is https://github.com/dask/fastparquet/pull/371 available in
# current fastparquet installation?
minmax = fastparquet.api.sorted_partitioned_columns(pf, filters)
except TypeError:
minmax = fastparquet.api.sorted_partitioned_columns(pf)
if index_name in minmax:
divisions = minmax[index_name]
divisions = divisions['min'] + [divisions['max'][-1]]
else:
if infer_divisions is True:
raise ValueError(
("Unable to infer divisions for index of '{index_name}' because it is not known to be "
"sorted across partitions").format(index_name=index_name))
divisions = (None,) * (len(rgs) + 1)
else:
if infer_divisions is True:
raise ValueError(
'Unable to infer divisions for because no index column was discovered')
divisions = (None,) * (len(rgs) + 1)
if isinstance(divisions[0], np.datetime64):
divisions = [pd.Timestamp(d) for d in divisions]
return out_type(dsk, name, meta, divisions)
def _read_parquet_row_group(fs, fn, index, columns, rg, series, categories,
schema, cs, dt, scheme, storage_name_mapping, *args):
from fastparquet.api import _pre_allocate
from fastparquet.core import read_row_group_file
from collections import OrderedDict
name_storage_mapping = {v: k for k, v in storage_name_mapping.items()}
if not isinstance(columns, (tuple, list)):
columns = [columns,]
series = True
if index:
index, = index
if index not in columns:
columns = columns + [index]
columns = [name_storage_mapping.get(col, col) for col in columns]
index = name_storage_mapping.get(index, index)
cs = OrderedDict([(k, v) for k, v in cs.items() if k in columns])
df, views = _pre_allocate(rg.num_rows, columns, categories, index, cs, dt)
read_row_group_file(fn, rg, columns, categories, schema, cs,
open=fs.open, assign=views, scheme=scheme)
if df.index.nlevels == 1:
if index:
df.index.name = storage_name_mapping.get(index, index)
else:
if index:
df.index.names = [storage_name_mapping.get(name, name)
for name in index]
df.columns = [storage_name_mapping.get(col, col)
for col in columns
if col != index]
if series:
return df[df.columns[0]]
else:
return df
def _write_partition_fastparquet(df, fs, path, filename, fmd, compression,
partition_on):
from fastparquet.writer import partition_on_columns, make_part_file
import fastparquet
# Fastparquet mutates this in a non-threadsafe manner. For now we just copy
# it before forwarding to fastparquet.
fmd = copy.copy(fmd)
if not len(df):
# Write nothing for empty partitions
rgs = None
elif partition_on:
if LooseVersion(fastparquet.__version__) >= '0.1.4':
rgs = partition_on_columns(df, partition_on, path, filename, fmd,
compression, fs.open, fs.mkdirs)
else:
rgs = partition_on_columns(df, partition_on, path, filename, fmd,
fs.sep, compression, fs.open, fs.mkdirs)
else:
# Fastparquet current doesn't properly set `num_rows` in the output
# metadata. Set it here to fix that.
fmd.num_rows = len(df)
with fs.open(fs.sep.join([path, filename]), 'wb') as fil:
rgs = make_part_file(fil, df, fmd.schema, compression=compression,
fmd=fmd)
return rgs
def _write_fastparquet(df, fs, fs_token, path, write_index=None, append=False,
ignore_divisions=False, partition_on=None,
compression=None, **kwargs):
import fastparquet
fs.mkdirs(path)
sep = fs.sep
object_encoding = kwargs.pop('object_encoding', 'utf8')
if object_encoding == 'infer' or (isinstance(object_encoding, dict) and 'infer' in object_encoding.values()):
raise ValueError('"infer" not allowed as object encoding, '
'because this required data in memory.')
divisions = df.divisions
if write_index is True or write_index is None and df.known_divisions:
df = df.reset_index()
index_cols = [df.columns[0]]
else:
ignore_divisions = True
index_cols = []
if append:
try:
pf = fastparquet.api.ParquetFile(path, open_with=fs.open, sep=sep)
except (IOError, ValueError):
# append for create
append = False
if append:
if pf.file_scheme not in ['hive', 'empty', 'flat']:
raise ValueError('Requested file scheme is hive, '
'but existing file scheme is not.')
elif ((set(pf.columns) != set(df.columns) - set(partition_on)) or (set(partition_on) != set(pf.cats))):
raise ValueError('Appended columns not the same.\n'
'New: {} | Previous: {}'
.format(pf.columns, list(df.columns)))
elif set(pf.dtypes[c] for c in pf.columns) != set(df[pf.columns].dtypes):
raise ValueError('Appended dtypes differ.\n{}'
.format(set(pf.dtypes.items()) ^
set(df.dtypes.iteritems())))
else:
df = df[pf.columns + partition_on]
fmd = pf.fmd
i_offset = fastparquet.writer.find_max_part(fmd.row_groups)
if not ignore_divisions:
minmax = fastparquet.api.sorted_partitioned_columns(pf)
old_end = minmax[index_cols[0]]['max'][-1]
if divisions[0] < old_end:
raise ValueError(
'Appended divisions overlapping with the previous ones.\n'
'New: {} | Previous: {}'.format(old_end, divisions[0]))
else:
fmd = fastparquet.writer.make_metadata(df._meta,
object_encoding=object_encoding,
index_cols=index_cols,
ignore_columns=partition_on,
**kwargs)
i_offset = 0
filenames = ['part.%i.parquet' % (i + i_offset)
for i in range(df.npartitions)]
write = delayed(_write_partition_fastparquet, pure=False)
writes = [write(part, fs, path, filename, fmd, compression, partition_on)
for filename, part in zip(filenames, df.to_delayed())]
return delayed(_write_metadata)(writes, filenames, fmd, path, fs, sep)
def _write_metadata(writes, filenames, fmd, path, fs, sep):
""" Write Parquet metadata after writing all row groups
See Also
--------
to_parquet
"""
import fastparquet
fmd = copy.copy(fmd)
for fn, rg in zip(filenames, writes):
if rg is not None:
if isinstance(rg, list):
for r in rg:
fmd.row_groups.append(r)
else:
for chunk in rg.columns:
chunk.file_path = fn
fmd.row_groups.append(rg)
fn = sep.join([path, '_metadata'])
fastparquet.writer.write_common_metadata(fn, fmd, open_with=fs.open,
no_row_groups=False)
fn = sep.join([path, '_common_metadata'])
fastparquet.writer.write_common_metadata(fn, fmd, open_with=fs.open)
# ----------------------------------------------------------------------
# PyArrow interface
def _read_pyarrow(fs, fs_token, paths, columns=None, filters=None,
categories=None, index=None, infer_divisions=None):
from ...bytes.core import get_pyarrow_filesystem
import pyarrow.parquet as pq
# In pyarrow, the physical storage field names may differ from
# the actual dataframe names. This is true for Index names when
# PyArrow >= 0.8.
# We would like to resolve these to the correct dataframe names
# as soon as possible.
if filters is not None:
raise NotImplementedError("Predicate pushdown not implemented")
if isinstance(categories, string_types):
categories = [categories]
elif categories is None:
categories = []
else:
categories = list(categories)
if isinstance(columns, tuple):
columns = list(columns)
dataset = pq.ParquetDataset(paths, filesystem=get_pyarrow_filesystem(fs))
if dataset.partitions is not None:
partitions = [n for n in dataset.partitions.partition_names
if n is not None]
else:
partitions = []
schema = dataset.schema.to_arrow_schema()
has_pandas_metadata = schema.metadata is not None and b'pandas' in schema.metadata
if has_pandas_metadata:
pandas_metadata = json.loads(schema.metadata[b'pandas'].decode('utf8'))
index_names, column_names, storage_name_mapping, column_index_names = (
_parse_pandas_metadata(pandas_metadata)
)
else:
index_names = []
column_names = schema.names
storage_name_mapping = {k: k for k in column_names}
column_index_names = [None]
column_names += [p for p in partitions if p not in column_names]
column_names, index_names, out_type = _normalize_index_columns(
columns, column_names, index, index_names)
all_columns = index_names + column_names
# Find non-empty pieces
non_empty_pieces = []
# Determine valid pieces
_open = lambda fn: pq.ParquetFile(fs.open(fn, mode='rb'))
for piece in dataset.pieces:
pf = piece.get_metadata(_open)
# non_empty_pieces.append(piece)
if pf.num_row_groups > 0:
non_empty_pieces.append(piece)
# Sort pieces naturally
# If a single input path resulted in multiple dataset pieces, then sort
# the pieces naturally. If multiple paths were supplied then we leave
# the order of the resulting pieces unmodified
if len(paths) == 1 and len(dataset.pieces) > 1:
non_empty_pieces = sorted(
non_empty_pieces, key=lambda piece: natural_sort_key(piece.path))
# Determine divisions
if len(index_names) == 1:
# Look up storage name of the single index column
divisions_names = [storage_name for storage_name, name
in storage_name_mapping.items()
if index_names[0] == name]
if divisions_names:
divisions_name = divisions_names[0]
else:
divisions_name = None
else:
divisions_name = None
divisions = _get_pyarrow_divisions(non_empty_pieces, divisions_name,
schema, infer_divisions)
# Build task
dtypes = _get_pyarrow_dtypes(schema, categories)
dtypes = {storage_name_mapping.get(k, k): v for k, v in dtypes.items()}
meta = _meta_from_dtypes(all_columns, dtypes, index_names,
column_index_names)
meta = clear_known_categories(meta, cols=categories)
if out_type == Series:
assert len(meta.columns) == 1
meta = meta[meta.columns[0]]
task_name = 'read-parquet-' + tokenize(fs_token, paths, all_columns)
if non_empty_pieces:
task_plan = {
(task_name, i): (_read_pyarrow_parquet_piece,
fs,
piece,
column_names,
index_names,
out_type == Series,
dataset.partitions,
categories)
for i, piece in enumerate(non_empty_pieces)
}
else:
meta = strip_unknown_categories(meta)
task_plan = {(task_name, 0): meta}
return out_type(task_plan, task_name, meta, divisions)
def _to_ns(val, unit):
"""
Convert an input time in the specified units to nanoseconds
Parameters
----------
val: int
Input time value
unit : str
Time units of `val`.
One of 's', 'ms', 'us', 'ns'
Returns
-------
int
Time val in nanoseconds
"""
factors = {'s': int(1e9), 'ms': int(1e6), 'us': int(1e3), 'ns': 1}
try:
factor = factors.get(unit)
except KeyError:
raise ValueError("Unsupported time unit '{unit}'".format(unit=unit))
return val * factor
def _get_pyarrow_divisions(pa_pieces, divisions_name, pa_schema, infer_divisions):
"""
Compute DataFrame divisions from a list of pyarrow dataset pieces
Parameters
----------
pa_pieces : list[pyarrow.parquet.ParquetDatasetPiece]
List of dataset pieces. Each piece corresponds to a single partition in the eventual dask DataFrame
divisions_name : str|None
The name of the column to compute divisions for
pa_schema : pyarrow.lib.Schema
The pyarrow schema for the dataset
infer_divisions : bool or None
If True divisions must be inferred (otherwise an exception is raised). If False or None divisions are not
inferred
Returns
-------
list
"""
# Local imports
import pyarrow as pa
import pyarrow.parquet as pq
if infer_divisions is True and pa.__version__ < LooseVersion('0.9.0'):
raise NotImplementedError('infer_divisions=True requires pyarrow >=0.9.0')
# Check whether divisions_name is in the schema
# Note: get_field_index returns -1 if not found, but it does not accept None
if infer_divisions is True:
divisions_name_in_schema = divisions_name is not None and pa_schema.get_field_index(divisions_name) >= 0
if divisions_name_in_schema is False and infer_divisions is True:
raise ValueError(
'Unable to infer divisions for because no index column was discovered')
else:
divisions_name_in_schema = None
if pa_pieces and divisions_name_in_schema:
# We have pieces and a valid division column.
# Compute min/max for column in each row group
min_maxs = []
last_max = None
# Initialize index of divisions column within the row groups.
# To be computed during while processing the first piece below
for piece in pa_pieces:
pf = piece.get_metadata(pq.ParquetFile)
rg = pf.row_group(0)
# Compute division column index
rg_paths = [rg.column(i).path_in_schema for i in range(rg.num_columns)]
try:
divisions_col_index = rg_paths.index(divisions_name)
except ValueError:
# Divisions not valid
min_maxs = None
break
col_meta = rg.column(divisions_col_index)
stats = col_meta.statistics
if stats.has_min_max and (last_max is None or last_max < stats.min):
min_maxs.append((stats.min, stats.max))
last_max = stats.max
else:
# Divisions not valid
min_maxs = None
break
if min_maxs:
# We have min/max pairs
divisions = [mn for mn, mx in min_maxs] + [min_maxs[-1][1]]
# Handle conversion to pandas timestamp divisions
index_field = pa_schema.field_by_name(divisions_name)
if pa.types.is_timestamp(index_field.type):
time_unit = index_field.type.unit
divisions_ns = [_to_ns(d, time_unit) for d in
divisions]
divisions = [pd.Timestamp(ns) for ns in divisions_ns]
# Handle encoding of bytes string
if index_field.type == pa.string():
# Parquet strings are always encoded as utf-8
encoding = 'utf-8'
divisions = [d.decode(encoding).strip() for d in divisions]
else:
if infer_divisions is True:
raise ValueError(
("Unable to infer divisions for index of '{index_name}' because it is not known to be "
"sorted across partitions").format(index_name=divisions_name_in_schema))
divisions = (None,) * (len(pa_pieces) + 1)
elif pa_pieces:
divisions = (None,) * (len(pa_pieces) + 1)
else:
divisions = (None, None)
return divisions
def _read_pyarrow_parquet_piece(fs, piece, columns, index_cols, is_series,
partitions, categories):
import pyarrow as pa
with fs.open(piece.path, mode='rb') as f:
table = piece.read(columns=index_cols + columns,
partitions=partitions,
use_pandas_metadata=True,
file=f)
if pa.__version__ < LooseVersion('0.9.0'):
df = table.to_pandas()
for cat in categories:
df[cat] = df[cat].astype('category')
else:
df = table.to_pandas(categories=categories)
has_index = not isinstance(df.index, pd.RangeIndex)
if not has_index and index_cols:
# Index should be set, but it isn't
df = df.set_index(index_cols)
elif has_index and df.index.names != index_cols:
# Index is set, but isn't correct
# This can happen when reading in not every column in a multi-index
df = df.reset_index(drop=False)
if index_cols:
df = df.set_index(index_cols)
drop = list(set(df.columns).difference(columns))
if drop:
df = df.drop(drop, axis=1)
# Ensure proper ordering
df = df.reindex(columns=columns, copy=False)
if is_series:
return df[df.columns[0]]
else:
return df[columns]
_pyarrow_write_table_kwargs = {'row_group_size', 'version', 'use_dictionary',
'compression', 'use_deprecated_int96_timestamps',
'coerce_timestamps', 'flavor', 'chunk_size'}
_pyarrow_write_metadata_kwargs = {'version', 'use_deprecated_int96_timestamps',
'coerce_timestamps'}
def _write_pyarrow(df, fs, fs_token, path, write_index=None, append=False,
ignore_divisions=False, partition_on=None, **kwargs):
if append:
raise NotImplementedError("`append` not implemented for "
"`engine='pyarrow'`")
if ignore_divisions:
raise NotImplementedError("`ignore_divisions` not implemented for "
"`engine='pyarrow'`")
# We can check only write_table kwargs, as it is a superset of kwargs for write functions
if set(kwargs).difference(_pyarrow_write_table_kwargs):
msg = ("Unexpected keyword arguments: " +
"%r" % list(set(kwargs).difference(_pyarrow_write_table_kwargs)))
raise TypeError(msg)
if write_index is None and df.known_divisions:
write_index = True
fs.mkdirs(path)
template = fs.sep.join([path, 'part.%i.parquet'])
write = delayed(_write_partition_pyarrow, pure=False)
first_kwargs = kwargs.copy()
first_kwargs['metadata_path'] = fs.sep.join([path, '_common_metadata'])
writes = [write(part, path, fs, template % i, write_index, partition_on,
**(kwargs if i else first_kwargs))
for i, part in enumerate(df.to_delayed())]
return delayed(writes)
def _write_partition_pyarrow(df, path, fs, filename, write_index,
partition_on, metadata_path=None, **kwargs):
import pyarrow as pa
from pyarrow import parquet
t = pa.Table.from_pandas(df, preserve_index=write_index)
if partition_on:
parquet.write_to_dataset(t, path, partition_cols=partition_on,
preserve_index=write_index,
filesystem=fs, **kwargs)
else:
with fs.open(filename, 'wb') as fil:
parquet.write_table(t, fil, **kwargs)
if metadata_path is not None:
with fs.open(metadata_path, 'wb') as fil:
# Get only arguments specified in the function
kwargs_meta = {k: v for k, v in kwargs.items()
if k in _pyarrow_write_metadata_kwargs}
parquet.write_metadata(t.schema, fil, **kwargs_meta)
# ----------------------------------------------------------------------
# User API
_ENGINES = {}
def get_engine(engine):
"""Get the parquet engine backend implementation.
Parameters
----------
engine : {'auto', 'fastparquet', 'pyarrow'}, default 'auto'
Parquet reader library to use. Default is first installed in this list.
Returns
-------
A dict containing a ``'read'`` and ``'write'`` function.
"""
if engine in _ENGINES:
return _ENGINES[engine]
if engine == 'auto':
for eng in ['fastparquet', 'pyarrow']:
try:
return get_engine(eng)
except RuntimeError:
pass
else:
raise RuntimeError("Please install either fastparquet or pyarrow")
elif engine == 'fastparquet':
import_required('fastparquet', "`fastparquet` not installed")
_ENGINES['fastparquet'] = eng = {'read': _read_fastparquet,
'write': _write_fastparquet}
return eng
elif engine == 'pyarrow':
pa = import_required('pyarrow', "`pyarrow` not installed")
if LooseVersion(pa.__version__) < '0.8.0':
raise RuntimeError("PyArrow version >= 0.8.0 required")
_ENGINES['pyarrow'] = eng = {'read': _read_pyarrow,
'write': _write_pyarrow}
return eng
else:
raise ValueError('Unsupported engine: "{0}".'.format(engine) +
' Valid choices include "pyarrow" and "fastparquet".')
def read_parquet(path, columns=None, filters=None, categories=None, index=None,
storage_options=None, engine='auto', infer_divisions=None):
"""
Read ParquetFile into a Dask DataFrame
This reads a directory of Parquet data into a Dask.dataframe, one file per
partition. It selects the index among the sorted columns if any exist.
Parameters
----------
path : string, list or fastparquet.ParquetFile
Source directory for data, or path(s) to individual parquet files.
Prefix with a protocol like ``s3://`` to read from alternative
filesystems. To read from multiple files you can pass a globstring or a
list of paths, with the caveat that they must all have the same
protocol.
Alternatively, also accepts a previously opened
fastparquet.ParquetFile()
columns : string, list or None (default)
Field name(s) to read in as columns in the output. By default all
non-index fields will be read (as determined by the pandas parquet
metadata, if present). Provide a single field name instead of a list to
read in the data as a Series.
filters : list
List of filters to apply, like ``[('x', '>', 0), ...]``. This implements
row-group (partition) -level filtering only, i.e., to prevent the
loading of some chunks of the data, and only if relevant statistics
have been included in the metadata.
index : string, list, False or None (default)
Field name(s) to use as the output frame index. By default will be
inferred from the pandas parquet file metadata (if present). Use False
to read all fields as columns.
categories : list, dict or None
For any fields listed here, if the parquet encoding is Dictionary,
the column will be created with dtype category. Use only if it is
guaranteed that the column is encoded as dictionary in all row-groups.
If a list, assumes up to 2**16-1 labels; if a dict, specify the number
of labels expected; if None, will load categories automatically for
data written by dask/fastparquet, not otherwise.
storage_options : dict
Key/value pairs to be passed on to the file-system backend, if any.
engine : {'auto', 'fastparquet', 'pyarrow'}, default 'auto'
Parquet reader library to use. If only one library is installed, it
will use that one; if both, it will use 'fastparquet'
infer_divisions : bool or None (default).
By default, divisions are inferred if the read `engine` supports
doing so efficiently and the `index` of the underlying dataset is
sorted across the individual parquet files. Set to ``True`` to
force divisions to be inferred in all cases. Note that this may
require reading metadata from each file in the dataset, which may
be expensive. Set to ``False`` to never infer divisions.
Examples
--------
>>> df = dd.read_parquet('s3://bucket/my-parquet-data') # doctest: +SKIP
See Also
--------
to_parquet
"""
is_ParquetFile = False
try:
import fastparquet
if isinstance(path, fastparquet.api.ParquetFile):
if path.open != fastparquet.util.default_open:
assert (re.match('.*://', path.fn)), \
("ParquetFile: Path must contain protocol" +
" (e.g., s3://...) when using other than the default" +
" LocalFileSystem. Path given: " + path.fn)
assert (engine in ['auto', 'fastparquet']), \
("'engine' should be set to 'auto' or 'fastparquet' " +
'when reading from fastparquet.ParquetFile')
is_ParquetFile = True
except ImportError:
pass
if is_ParquetFile:
read = get_engine('fastparquet')['read']
if path.fn.endswith('_metadata'):
# remove '_metadata' from path
urlpath = path.fn[:-len('_metadata')]
else:
urlpath = path.fn
fs, fs_token, paths = get_fs_token_paths(
urlpath,
mode='rb',
storage_options=storage_options
)
else:
read = get_engine(engine)['read']
fs, fs_token, paths = get_fs_token_paths(
path, mode='rb',
storage_options=storage_options
)
if isinstance(path, string_types) and len(paths) > 1:
# Sort paths naturally if multiple paths resulted from a single
# specification (by '*' globbing)
paths = sorted(paths, key=natural_sort_key)
return read(fs, fs_token, paths, columns=columns, filters=filters,
categories=categories, index=index, infer_divisions=infer_divisions)
def to_parquet(df, path, engine='auto', compression='default', write_index=None,
append=False, ignore_divisions=False, partition_on=None,
storage_options=None, compute=True, **kwargs):
"""Store Dask.dataframe to Parquet files
Notes
-----
Each partition will be written to a separate file.
Parameters
----------
df : dask.dataframe.DataFrame
path : string
Destination directory for data. Prepend with protocol like ``s3://``
or ``hdfs://`` for remote data.
engine : {'auto', 'fastparquet', 'pyarrow'}, default 'auto'
Parquet library to use. If only one library is installed, it will use
that one; if both, it will use 'fastparquet'.
compression : string or dict, optional
Either a string like ``"snappy"`` or a dictionary mapping column names
to compressors like ``{"name": "gzip", "values": "snappy"}``. The
default is ``"default"``, which uses the default compression for
whichever engine is selected.
write_index : boolean, optional
Whether or not to write the index. Defaults to True *if* divisions are
known.
append : bool, optional
If False (default), construct data-set from scratch. If True, add new
row-group(s) to an existing data-set. In the latter case, the data-set
must exist, and the schema must match the input data.
ignore_divisions : bool, optional
If False (default) raises error when previous divisions overlap with
the new appended divisions. Ignored if append=False.
partition_on : list, optional
Construct directory-based partitioning by splitting on these fields'
values. Each dask partition will result in one or more datafiles,
there will be no global groupby.
storage_options : dict, optional
Key/value pairs to be passed on to the file-system backend, if any.
compute : bool, optional
If True (default) then the result is computed immediately. If False
then a ``dask.delayed`` object is returned for future computation.
**kwargs
Extra options to be passed on to the specific backend.
Examples
--------
>>> df = dd.read_csv(...) # doctest: +SKIP
>>> to_parquet('/path/to/output/', df, compression='snappy') # doctest: +SKIP
See Also
--------
read_parquet: Read parquet data to dask.dataframe
"""
partition_on = partition_on or []
if set(partition_on) - set(df.columns):
raise ValueError('Partitioning on non-existent column')
if compression != 'default':
kwargs['compression'] = compression
elif 'snappy' in compress:
kwargs['compression'] = 'snappy'
write = get_engine(engine)['write']
fs, fs_token, _ = get_fs_token_paths(path, mode='wb',
storage_options=storage_options)
# Trim any protocol information from the path before forwarding
path = infer_storage_options(path)['path']
out = write(df, fs, fs_token, path, write_index=write_index, append=append,
ignore_divisions=ignore_divisions, partition_on=partition_on,
**kwargs)
if compute:
out.compute()
return None
return out
if PY3:
DataFrame.to_parquet.__doc__ = to_parquet.__doc__
| gpl-3.0 |
has2k1/plotnine | plotnine/themes/seaborn_rcmod.py | 1 | 14980 | """Functions that alter the matplotlib rc dictionary on the fly."""
import matplotlib as _mpl
import functools
# https://github.com/mwaskom/seaborn/seaborn/rcmod.py
# commit: d19fff8
#
# Modifications
# ---------------
# modified set()
# removed set_palette(), reset_defaults(), reset_orig()
# set mpl_ge_150, mpl_ge_2 for MPL > 3
#
# We (plotnine) do not want to modify the rcParams
# on the matplotlib instance, so we create a dummy object
# The set_* function work on the rcParams dict on that
# object and then set() returns it. Then outside this
# file we only need to call the set() function.
class dummy:
pass
mpl = dummy()
mpl.__version__ = _mpl.__version__
mpl_ge_150 = False
mpl_ge_2 = True
_style_keys = [
"axes.facecolor",
"axes.edgecolor",
"axes.grid",
"axes.axisbelow",
"axes.labelcolor",
"figure.facecolor",
"grid.color",
"grid.linestyle",
"text.color",
"xtick.color",
"ytick.color",
"xtick.direction",
"ytick.direction",
"lines.solid_capstyle",
"patch.edgecolor",
"image.cmap",
"font.family",
"font.sans-serif",
]
if mpl_ge_2:
_style_keys.extend([
"patch.force_edgecolor",
"xtick.bottom",
"xtick.top",
"ytick.left",
"ytick.right",
"axes.spines.left",
"axes.spines.bottom",
"axes.spines.right",
"axes.spines.top",
])
_context_keys = [
"font.size",
"axes.labelsize",
"axes.titlesize",
"xtick.labelsize",
"ytick.labelsize",
"legend.fontsize",
"axes.linewidth",
"grid.linewidth",
"lines.linewidth",
"lines.markersize",
"patch.linewidth",
"xtick.major.width",
"ytick.major.width",
"xtick.minor.width",
"ytick.minor.width",
"xtick.major.size",
"ytick.major.size",
"xtick.minor.size",
"ytick.minor.size",
]
def set(context="notebook", style="darkgrid", palette="deep",
font="sans-serif", font_scale=1, color_codes=False, rc=None):
"""Set aesthetic parameters in one step.
Each set of parameters can be set directly or temporarily, see the
referenced functions below for more information.
Parameters
----------
context : string or dict
Plotting context parameters, see :func:`plotting_context`
style : string or dict
Axes style parameters, see :func:`axes_style`
palette : string or sequence
Color palette, see :func:`color_palette`
font : string
Font family, see matplotlib font manager.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
color_codes : bool
If ``True`` and ``palette`` is a seaborn palette, remap the shorthand
color codes (e.g. "b", "g", "r", etc.) to the colors from this palette.
rc : dict or None
Dictionary of rc parameter mappings to override the above.
"""
mpl.rcParams = {}
set_context(context, font_scale)
set_style(style, rc={"font.family": font})
if rc is not None:
mpl.rcParams.update(rc)
return mpl.rcParams
def axes_style(style=None, rc=None):
"""Return a parameter dict for the aesthetic style of the plots.
This affects things like the color of the axes, whether a grid is
enabled by default, and other aesthetic elements.
This function returns an object that can be used in a ``with`` statement
to temporarily change the style parameters.
Parameters
----------
style : dict, None, or one of {darkgrid, whitegrid, dark, white, ticks}
A dictionary of parameters or the name of a preconfigured set.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
style dictionaries. This only updates parameters that are
considered part of the style definition.
Examples
--------
>>> st = axes_style("whitegrid")
>>> set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
>>> import matplotlib.pyplot as plt
>>> with axes_style("white"):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_style : set the matplotlib parameters for a seaborn theme
plotting_context : return a parameter dict to to scale plot elements
color_palette : define the color palette for a plot
"""
if style is None:
style_dict = {k: mpl.rcParams[k] for k in _style_keys}
elif isinstance(style, dict):
style_dict = style
else:
styles = ["white", "dark", "whitegrid", "darkgrid", "ticks"]
if style not in styles:
raise ValueError("style must be one of %s" % ", ".join(styles))
# Define colors here
dark_gray = ".15"
light_gray = ".8"
# Common parameters
style_dict = {
"figure.facecolor": "white",
"axes.labelcolor": dark_gray,
"xtick.direction": "out",
"ytick.direction": "out",
"xtick.color": dark_gray,
"ytick.color": dark_gray,
"axes.axisbelow": True,
"grid.linestyle": "-",
"text.color": dark_gray,
"font.family": ["sans-serif"],
"font.sans-serif": ["Arial", "DejaVu Sans", "Liberation Sans",
"Bitstream Vera Sans", "sans-serif"],
"lines.solid_capstyle": "round",
"patch.edgecolor": "w",
"patch.force_edgecolor": True,
"image.cmap": "Greys",
"xtick.top": False,
"ytick.right": False,
}
# Set grid on or off
if "grid" in style:
style_dict.update({
"axes.grid": True,
})
else:
style_dict.update({
"axes.grid": False,
})
# Set the color of the background, spines, and grids
if style.startswith("dark"):
style_dict.update({
"axes.facecolor": "#EAEAF2",
"axes.edgecolor": "white",
"grid.color": "white",
"axes.spines.left": True,
"axes.spines.bottom": True,
"axes.spines.right": True,
"axes.spines.top": True,
})
elif style == "whitegrid":
style_dict.update({
"axes.facecolor": "white",
"axes.edgecolor": light_gray,
"grid.color": light_gray,
"axes.spines.left": True,
"axes.spines.bottom": True,
"axes.spines.right": True,
"axes.spines.top": True,
})
elif style in ["white", "ticks"]:
style_dict.update({
"axes.facecolor": "white",
"axes.edgecolor": dark_gray,
"grid.color": light_gray,
"axes.spines.left": True,
"axes.spines.bottom": True,
"axes.spines.right": True,
"axes.spines.top": True,
})
# Show or hide the axes ticks
if style == "ticks":
style_dict.update({
"xtick.bottom": True,
"ytick.left": True,
})
else:
style_dict.update({
"xtick.bottom": False,
"ytick.left": False,
})
# Remove entries that are not defined in the base list of valid keys
# This lets us handle matplotlib <=/> 2.0
style_dict = {k: v for k, v in style_dict.items() if k in _style_keys}
# Override these settings with the provided rc dictionary
if rc is not None:
rc = {k: v for k, v in rc.items() if k in _style_keys}
style_dict.update(rc)
# Wrap in an _AxesStyle object so this can be used in a with statement
style_object = _AxesStyle(style_dict)
return style_object
def set_style(style=None, rc=None):
"""Set the aesthetic style of the plots.
This affects things like the color of the axes, whether a grid is
enabled by default, and other aesthetic elements.
Parameters
----------
style : dict, None, or one of {darkgrid, whitegrid, dark, white, ticks}
A dictionary of parameters or the name of a preconfigured set.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
style dictionaries. This only updates parameters that are
considered part of the style definition.
Examples
--------
>>> set_style("whitegrid")
>>> set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
See Also
--------
axes_style : return a dict of parameters or use in a ``with`` statement
to temporarily set the style.
set_context : set parameters to scale plot elements
set_palette : set the default color palette for figures
"""
style_object = axes_style(style, rc)
mpl.rcParams.update(style_object)
def plotting_context(context=None, font_scale=1, rc=None):
"""Return a parameter dict to scale elements of the figure.
This affects things like the size of the labels, lines, and other
elements of the plot, but not the overall style. The base context
is "notebook", and the other contexts are "paper", "talk", and "poster",
which are version of the notebook parameters scaled by .8, 1.3, and 1.6,
respectively.
This function returns an object that can be used in a ``with`` statement
to temporarily change the context parameters.
Parameters
----------
context : dict, None, or one of {paper, notebook, talk, poster}
A dictionary of parameters or the name of a preconfigured set.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
context dictionaries. This only updates parameters that are
considered part of the context definition.
Examples
--------
>>> c = plotting_context("poster")
>>> c = plotting_context("notebook", font_scale=1.5)
>>> c = plotting_context("talk", rc={"lines.linewidth": 2})
>>> import matplotlib.pyplot as plt
>>> with plotting_context("paper"):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_context : set the matplotlib parameters to scale plot elements
axes_style : return a dict of parameters defining a figure style
color_palette : define the color palette for a plot
"""
if context is None:
context_dict = {k: mpl.rcParams[k] for k in _context_keys}
elif isinstance(context, dict):
context_dict = context
else:
contexts = ["paper", "notebook", "talk", "poster"]
if context not in contexts:
raise ValueError("context must be in %s" % ", ".join(contexts))
# Set up dictionary of default parameters
base_context = {
"font.size": 12,
"axes.labelsize": 12,
"axes.titlesize": 12,
"xtick.labelsize": 11,
"ytick.labelsize": 11,
"legend.fontsize": 11,
"axes.linewidth": 1.25,
"grid.linewidth": 1,
"lines.linewidth": 1.5,
"lines.markersize": 6,
"patch.linewidth": 1,
"xtick.major.width": 1.25,
"ytick.major.width": 1.25,
"xtick.minor.width": 1,
"ytick.minor.width": 1,
"xtick.major.size": 6,
"ytick.major.size": 6,
"xtick.minor.size": 4,
"ytick.minor.size": 4,
}
# Scale all the parameters by the same factor depending on the context
scaling = dict(paper=.8, notebook=1, talk=1.5, poster=2)[context]
context_dict = {k: v * scaling for k, v in base_context.items()}
# Now independently scale the fonts
font_keys = ["axes.labelsize", "axes.titlesize", "legend.fontsize",
"xtick.labelsize", "ytick.labelsize", "font.size"]
font_dict = {k: context_dict[k] * font_scale for k in font_keys}
context_dict.update(font_dict)
# Override these settings with the provided rc dictionary
if rc is not None:
rc = {k: v for k, v in rc.items() if k in _context_keys}
context_dict.update(rc)
# Wrap in a _PlottingContext object so this can be used in a with statement
context_object = _PlottingContext(context_dict)
return context_object
def set_context(context=None, font_scale=1, rc=None):
"""Set the plotting context parameters.
This affects things like the size of the labels, lines, and other
elements of the plot, but not the overall style. The base context
is "notebook", and the other contexts are "paper", "talk", and "poster",
which are version of the notebook parameters scaled by .8, 1.3, and 1.6,
respectively.
Parameters
----------
context : dict, None, or one of {paper, notebook, talk, poster}
A dictionary of parameters or the name of a preconfigured set.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
context dictionaries. This only updates parameters that are
considered part of the context definition.
Examples
--------
>>> set_context("paper")
>>> set_context("talk", font_scale=1.4)
>>> set_context("talk", rc={"lines.linewidth": 2})
See Also
--------
plotting_context : return a dictionary of rc parameters, or use in
a ``with`` statement to temporarily set the context.
set_style : set the default parameters for figure style
set_palette : set the default color palette for figures
"""
context_object = plotting_context(context, font_scale, rc)
mpl.rcParams.update(context_object)
class _RCAesthetics(dict):
def __enter__(self):
rc = mpl.rcParams
self._orig = {k: rc[k] for k in self._keys}
self._set(self)
def __exit__(self, exc_type, exc_value, exc_tb):
self._set(self._orig)
def __call__(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
with self:
return func(*args, **kwargs)
return wrapper
class _AxesStyle(_RCAesthetics):
"""Light wrapper on a dict to set style temporarily."""
_keys = _style_keys
_set = staticmethod(set_style)
class _PlottingContext(_RCAesthetics):
"""Light wrapper on a dict to set context temporarily."""
_keys = _context_keys
_set = staticmethod(set_context)
| gpl-2.0 |
KarlClinckspoor/SAXS_treatment | ESRF data treatment/Subtract_water.py | 1 | 3831 | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 31 17:10:46 2017
@author: Karl Jan Clinckspoor
[email protected] [email protected]
Made at iNANO at Aarhus University
In a collaboration project with the University of Campinas.
Last modified: 01/09/2017
"""
import glob
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import sys
#%%
def subtract_water():
experiments = []
all_experiments = []
experiment_lengths = []
count = 1
while True:
expnumber = input("What is the experimental number of run number %d?\n(quit to end program, enter nothing to continue): " % (count))
if expnumber == '':
break
if expnumber.lower() == 'quit':
sys.exit()
if not expnumber.isdecimal():
print('invalid experiment number: ', expnumber)
continue
expnumber = expnumber.zfill(5)
if expnumber in experiments:
print('You already selected this experiment!')
continue
files = glob.glob('sc1470*%s*.dat'%expnumber)
for file in files:
if file.find('_ave') != -1:
files.remove(file)
all_experiments.append(files)
length = len(files)
experiment_lengths.append(length)
print('Found',length,'files for that experiment, not counting files ending with _ave.')
if length == 0:
print ('Oops. No experiment found. Do you want to select another? If not, the program will quit.')
do_select = input ('Y/n: ')
if do_select == 'Y':
continue
elif do_select != 'Y':
sys.exit()
experiments.append(expnumber)
count += 1
#%%
pdas_allfiles = []
for experiment in all_experiments:
pdas_allfiles.append(pd.read_table(experiment,names=['q','int','err'],dtype=np.float64, header=0))
#%%
water_file_number = input('What is the file number for water?')
water_file_name = glob.glob('sc1470*%s*'%water_file_number)
if water_file_name == []:
print('No file found')
print ('Found file(s): ', water_file_name)
if len(water_file_name) > 1:
selectedfile = input('More than one file was found. Which one to select? (number, beginning from 0)')
try:
water_file_pd = pd.read_table(water_file_name[int(selectedfile)],names=['q','int','err'],dtype=np.float64, header=0)
except:
print('Could this file file. Opening the first one')
water_file_pd = pd.read_table(water_file_name[0],names=['q','int','err'],dtype=np.float64, header=0)
elif len(water_file_name) == 1:
water_file_pd = pd.read_table(water_file_name[0],names=['q','int','err'],dtype=np.float64, header=0)
print('Successfully subtracted')
#%%
print('Subtracting')
#filename = input('What will the destination filename be?')
want_to_plot = input('Do you want to show a plot of all averaged curves? (Y/n) ')
for file,name in zip(pdas_allfiles, all_experiments):
file['int'] = file['int'] - water_file_pd['int']
file['err'] = (file['err']**2+water_file_pd['err']**2)**(1/2)
file.to_csv( (name+'_minus_water.csv'),sep='\t',index=False)
if want_to_plot == 'Y':
plt.errorbar(file['q'],file['int'], yerr=file['err'])
if want_to_plot == 'Y':
plt.xscale('log')
plt.yscale('log')
plt.show()
print ('This script is to subtract water from SAXS scattering curves')
if __name__ == '__main__':
while True:
do_want = input('Do you want to subtract water from SAXS scattering curves? (y)/n\n')
if do_want == 'n':
break
else:
subtract_water()
print('-'*10+'Done. Bye!'+'-'*10)
| gpl-3.0 |
carrillo/scikit-learn | sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
miloharper/neural-network-animation | matplotlib/sphinxext/ipython_console_highlighting.py | 11 | 4601 | """reST directive for syntax-highlighting ipython interactive sessions.
XXX - See what improvements can be made based on the new (as of Sept 2009)
'pycon' lexer for the python console. At the very least it will give better
highlighted tracebacks.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
#-----------------------------------------------------------------------------
# Needed modules
# Standard library
import re
# Third party
from pygments.lexer import Lexer, do_insertions
from pygments.lexers.agile import (PythonConsoleLexer, PythonLexer,
PythonTracebackLexer)
from pygments.token import Comment, Generic
from sphinx import highlighting
import matplotlib
matplotlib.cbook.warn_deprecated("1.4", """
The Sphinx extension ipython_console_highlighting has moved from
matplotlib to IPython, and its use in matplotlib is deprecated.
Change your import from 'matplotlib.sphinxext.ipython_directive' to
'IPython.sphinxext.ipython_directive.""")
#-----------------------------------------------------------------------------
# Global constants
line_re = re.compile('.*?\n')
#-----------------------------------------------------------------------------
# Code begins - classes and functions
class IPythonConsoleLexer(Lexer):
"""
For IPython console output or doctests, such as:
.. sourcecode:: ipython
In [1]: a = 'foo'
In [2]: a
Out[2]: 'foo'
In [3]: print a
foo
In [4]: 1 / 0
Notes:
- Tracebacks are not currently supported.
- It assumes the default IPython prompts, not customized ones.
"""
name = 'IPython console session'
aliases = ['ipython']
mimetypes = ['text/x-ipython-console']
input_prompt = re.compile("(In \[[0-9]+\]: )|( \.\.\.+:)")
output_prompt = re.compile("(Out\[[0-9]+\]: )|( \.\.\.+:)")
continue_prompt = re.compile(" \.\.\.+:")
tb_start = re.compile("\-+")
def get_tokens_unprocessed(self, text):
pylexer = PythonLexer(**self.options)
tblexer = PythonTracebackLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
input_prompt = self.input_prompt.match(line)
continue_prompt = self.continue_prompt.match(line.rstrip())
output_prompt = self.output_prompt.match(line)
if line.startswith("#"):
insertions.append((len(curcode),
[(0, Comment, line)]))
elif input_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, input_prompt.group())]))
curcode += line[input_prompt.end():]
elif continue_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, continue_prompt.group())]))
curcode += line[continue_prompt.end():]
elif output_prompt is not None:
# Use the 'error' token for output. We should probably make
# our own token, but error is typicaly in a bright color like
# red, so it works fine for our output prompts.
insertions.append((len(curcode),
[(0, Generic.Error, output_prompt.group())]))
curcode += line[output_prompt.end():]
else:
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
def setup(app):
"""Setup as a sphinx extension."""
# This is only a lexer, so adding it below to pygments appears sufficient.
# But if somebody knows that the right API usage should be to do that via
# sphinx, by all means fix it here. At least having this setup.py
# suppresses the sphinx warning we'd get without it.
pass
#-----------------------------------------------------------------------------
# Register the extension as a valid pygments lexer
highlighting.lexers['ipython'] = IPythonConsoleLexer()
| mit |
liberatorqjw/scikit-learn | sklearn/semi_supervised/label_propagation.py | 15 | 15050 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
rainwoodman/pypm | examples/nbody.py | 3 | 10288 | from mpi4py import MPI
import numpy
from argparse import ArgumentParser
from nbodykit.cosmology import Planck15
from nbodykit.cosmology import EHPower
from nbodykit.cosmology.perturbation import PerturbationGrowth
from scipy.integrate import quad
PowerSpectrum = EHPower(Planck15, redshift=0.0)
pt = PerturbationGrowth(Planck15.clone(Tcmb0=0))
class FastPM:
def K(ai, af, ar):
return 1 / (ar ** 2 * pt.E(ar)) * (pt.Gf(af) - pt.Gf(ai)) / pt.gf(ar)
def D(ai, af, ar):
return 1 / (ar ** 3 * pt.E(ar)) * (pt.Gp(af) - pt.Gp(ai)) / pt.gp(ar)
class FastPM1:
def K(ai, af, ar):
def func(a):
return 1.0 / (a * a * pt.E(a))
return quad(func, ai, af)[0]
def D(ai, af, ar):
return 1 / (ar ** 3 * pt.E(ar)) * (pt.Gp(af) - pt.Gp(ai)) / pt.gp(ar)
class FastPM2:
def K(ai, af, ar):
return 1 / (ar ** 2 * pt.E(ar)) * (pt.Gf(af) - pt.Gf(ai)) / pt.gf(ar)
def D(ai, af, ar):
def func(a):
return 1.0 / (a * a * a * pt.E(a))
return quad(func, ai, af)[0]
class Quinn:
def K(ai, af, ar):
def func(a):
return 1.0 / (a * a * pt.E(a))
return quad(func, ai, af)[0]
def D(ai, af, ar):
def func(a):
return 1.0 / (a * a * a * pt.E(a))
return quad(func, ai, af)[0]
class TVE:
""" split H = T + (E + V); drift has no explicit time dependency """
def K(ai, af, ar):
def func(a):
return 1.0 / (a * a * pt.E(a))
return quad(func, ai, af)[0]
def D(ai, af, ar):
def func(a):
return 1.0 / (a * pt.E(a))
return ar ** -2 * quad(func, ai, af)[0]
class VTE:
""" split H = (T + E) + V; kick has no explicit time dependency """
def K(ai, af, ar):
def func(a):
return 1.0 / (a * pt.E(a))
return ar ** -1 * quad(func, ai, af)[0]
def D(ai, af, ar):
def func(a):
return 1.0 / (a * a * a * pt.E(a))
return quad(func, ai, af)[0]
class Naive:
def K(ai, af, ar):
def func(a):
return 1.0 / (a * a * pt.E(a))
return func(ar) * (af - ai)
def D(ai, af, ar):
def func(a):
return 1.0 / (a * a * a * pt.E(a))
return func(ar) * (af - ai)
class State:
def __init__(self, Q, S, V):
self.Q = Q
self.S = S
self.V = V
def symp2(pm, state, time_steps, factors):
K = factors.K
D = factors.D
Q = state.Q
V = state.V
S = state.S
F = force(pm, Q, S)
E = 0
for ai, af in zip(time_steps[:-1], time_steps[1:]):
ac = (ai * af) ** 0.5
V[...] += F * K(ai, ac, ai)
S[...] += V * D(ai, af, ac)
F[...] = force(pm, Q, S)
V[...] += F * K(ac, af, af)
print(af)
#E = energy(pm, Q, S, V, af)
#print('E = ', E, af)
def symp3(pm, state, time_steps, factors):
K = factors.K
D = factors.D
Q = state.Q
V = state.V
S = state.S
F = force(pm, Q, S)
for ai, af in zip(time_steps[:-1], time_steps[1:]):
Dloga = numpy.log(af) - numpy.log(ai)
ac1 = af
ac2 = ac1 * numpy.exp(- 2.0 / 3.0 * Dloga)
ac3 = af
ad1 = ai * numpy.exp(- 1. / 24 * Dloga)
ad2 = ad1 * numpy.exp(3. / 4 * Dloga)
ad3 = af
S[...] += V * D(ai, ac1, ai)
F[...] = force(pm, Q, S)
V[...] += F * K(ai, ad1, af)
S[...] += V * D(af, ac2, ad1)
F[...] = force(pm, Q, S)
V[...] += F * K(ad1, ad2, ac2)
S[...] += V * D(ac2, ac3, ad2)
F[...] = force(pm, Q, S)
V[...] += F * K(ad2, ad3, ac3)
print(af)
#E = energy(pm, Q, S, V, af)
#print('E = ', E, af)
def symp1(pm, state, time_steps, factors):
K = factors.K
D = factors.D
Q = state.Q
V = state.V
S = state.S
F = force(pm, Q, S)
for ai, af in zip(time_steps[:-1], time_steps[1:]):
F = force(pm, Q, S)
V[...] += F * K(ai, af, ai)
S[...] += V * D(ai, af, af)
F[...] = force(pm, Q, S)
#E = energy(pm, Q, S, V, af)
#print('E = ', E, af)
print(af)
def dx1_transfer(direction):
def filter(k, v):
k2 = sum(ki ** 2 for ki in k)
k2[k2 == 0] = 1.0
kfinite = k[direction]
return 1j * kfinite / k2 * v
return filter
def force_transfer(direction):
def filter(k, v):
k2 = sum(ki ** 2 for ki in k)
k2[k2 == 0] = 1.0
C = (v.BoxSize / v.Nmesh)[direction]
w = k[direction] * C
kfinite = 1.0 / C * 1 / 6.0 * (8 * numpy.sin (w) - numpy.sin (2 * w));
return 1j * kfinite / k2 * v
return filter
def pot_transfer(k, v):
k2 = sum(ki ** 2 for ki in k)
k2[k2 == 0] = 1.0
return -1. / k2 * v
def lowpass_transfer(r):
def filter(k, v):
k2 = sum(ki ** 2 for ki in k)
return numpy.exp(-0.5 * k2 * r**2) * v
return filter
from pmesh.pm import ParticleMesh
def main(ns):
comm = MPI.COMM_WORLD
result = simulate(comm, ns)
pm = ParticleMesh(BoxSize=ns.BoxSize, Nmesh=[ns.Nmesh, ns.Nmesh, ns.Nmesh], dtype='f8', comm=comm)
report = analyze(pm, result)
if comm.rank == 0:
write_report(ns.output, report)
class Result(object): pass
def force(pm, Q, S):
rho1 = pm.create('real')
X = S + Q
layout = pm.decompose(X, smoothing=1.0 * pm.resampler.support)
rho1.paint(X, layout=layout, hold=False)
N = pm.comm.allreduce(len(X))
fac = 1.0 * pm.Nmesh.prod() / N
rho1[...] *= fac
rhok1 = rho1.r2c()
rhok = rhok1
#rhok.apply(CompensateTSCAliasing, kind='circular', out=Ellipsis)
#print(fac, rhok.cgetitem([0, 0, 0]), rhok.cgetitem([1, 1, 1]))
F = numpy.empty_like(Q)
for d in range(pm.ndim):
F[..., d] = rhok.apply(force_transfer(d)) \
.c2r().readout(X, layout=layout)
return 1.5 * pt.Om0 * F
def energy(pm, Q, S, V, a):
rho1 = pm.create('real')
X = S + Q
layout = pm.decompose(X, smoothing=1.0 * pm.resampler.support)
rho1.paint(X, layout=layout, hold=False)
N = pm.comm.allreduce(len(X))
fac = 1.0 * pm.Nmesh.prod() / N
rho1[...] *= fac
rhok1 = rho1.r2c()
phi = rhok1.apply(pot_transfer) \
.apply(lowpass_transfer(pm.BoxSize[0] / pm.Nmesh[0] * 4)) \
.c2r().readout(X, layout=layout)
U = 1.5 * pt.Om0 * pm.comm.allreduce(phi.sum() / a)
T = 0
for d in range(pm.ndim):
rho1.paint(Q, mass=V[:, d], hold=False)
V1 = rho1.r2c() \
.apply(lowpass_transfer(pm.BoxSize[0] / pm.Nmesh[0] * 4)) \
.c2r().readout(Q)
T = T + pm.comm.allreduce((V1 ** 2).sum() / (2 * a**2))
return T + U
def simulate(comm, ns):
pm = ParticleMesh(BoxSize=ns.BoxSize, Nmesh=[ns.Nmesh, ns.Nmesh, ns.Nmesh], dtype='f8', comm=comm)
gaussian = pm.generate_whitenoise(ns.seed, unitary=True)
time_steps = numpy.linspace(ns.ainit, ns.afinal, ns.steps, endpoint=True)
Q = pm.generate_uniform_particle_grid(shift=0)
print(Q.min(axis=0), Q.max(axis=0))
def convolve(k, v):
kmag = sum(ki**2 for ki in k) ** 0.5
ampl = (PowerSpectrum(kmag) / v.BoxSize.prod()) ** 0.5
return v * ampl
dlinear = gaussian.apply(convolve)
DX1 = numpy.zeros_like(Q)
layout = pm.decompose(Q)
# Fill it in one dimension at a time.
for d in range(pm.ndim):
DX1[..., d] = dlinear \
.apply(dx1_transfer(d)) \
.c2r().readout(Q, layout=layout)
a0 = time_steps[0]
# 1-LPT Displacement and Veloicty; scaled back from z=0 to the first time step.
S = DX1 * pt.D1(a=a0)
V = S * a0 ** 2 * pt.f1(a0) * pt.E(a0)
state = State(Q, S, V)
fpm = ParticleMesh(BoxSize=pm.BoxSize, Nmesh=pm.Nmesh * ns.boost, resampler='tsc', dtype='f8')
ns.scheme(fpm, state, time_steps, ns.factors)
r = Result()
r.Q = Q
r.DX1 = DX1
r.S = S
r.V = V
r.dlinear = dlinear
return r
def analyze(pm, r):
from nbodykit.algorithms.fftpower import FFTPower
from nbodykit.source import ArrayCatalog
from nbodykit.source import MemoryMesh
DataPM = numpy.empty(len(r.Q), dtype=[('Position', ('f8', 3))])
DataPM['Position'][:] = r.Q + r.S
Data1LPT = DataPM.copy()
Data1LPT['Position'][:] = r.Q + r.DX1
DataPM = ArrayCatalog(DataPM, BoxSize=pm.BoxSize, Nmesh=pm.Nmesh)
Data1LPT = ArrayCatalog(Data1LPT, BoxSize=pm.BoxSize, Nmesh=pm.Nmesh)
DataLinear = MemoryMesh(r.dlinear)
r = Result()
r.Ppm = FFTPower(DataPM, mode='1d')
r.P1lpt = FFTPower(Data1LPT, mode='1d')
r.Pl = FFTPower(DataLinear, mode='1d')
return r
def write_report(reportname, r):
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
fig = Figure(figsize=(6, 6))
ax = fig.add_subplot(111)
ax.plot(r.Ppm.power['k'], r.Ppm.power['power'] / r.Pl.power['power'] - 1, label='Multistep')
ax.plot(r.P1lpt.power['k'], r.P1lpt.power['power'] / r.Pl.power['power'] - 1, label='1-LPT')
ax.set_xscale('log')
ax.axhline(0.0, color='k', ls='--')
ax.set_ylim(-0.03, 0.03)
ax.set_xlim(0.003, 0.04)
ax.grid()
ax.set_xlabel('k [h/Mpc]')
ax.set_ylabel(r'P(k) / <P_l(k)>')
ax.set_title(r"Comparing Linear theory and 1-LPT")
ax.legend()
# numpy.savez(reportname.replace('.png', '.npz', r.__dict__)
canvas = FigureCanvasAgg(fig)
fig.savefig(reportname)
if __name__ == '__main__':
ap = ArgumentParser()
ap.add_argument("--Nmesh", type=int, default=64)
ap.add_argument("--BoxSize", type=float, default=200.)
ap.add_argument("--steps", type=int, default=5)
ap.add_argument("--ainit", type=float, default=0.1)
ap.add_argument("--afinal", type=float, default=1.0)
ap.add_argument("--seed", type=int, default=120577)
ap.add_argument("--boost", type=int, default=2)
ap.add_argument("--scheme", choices=[symp2, symp1, symp3], default=symp2, type=lambda n: globals()[n])
ap.add_argument("--factors", choices=[FastPM, FastPM1, FastPM2, Quinn, VTE, TVE, Naive], default=FastPM,
type=lambda n : globals()[n])
ap.add_argument("output", type=str)
ns = ap.parse_args()
main(ns)
| gpl-3.0 |
mzwiessele/applygpy | applygpy/tests/test_modelselection.py | 1 | 3575 | '''
Created on 30 Sep 2015
@author: Max Zwiessele
'''
import unittest, numpy as np, pandas as pd # @UnresolvedImport
import GPy, GPy.kern as kern
from applygpy.model_selection import cross_validate
from GPy.models.sparse_gp_regression import SparseGPRegression
from GPy.models.sparse_gp_classification import SparseGPClassification
from GPy.core.gp import GP
from GPy.likelihoods.gaussian import Gaussian
from GPy.inference.latent_function_inference.exact_gaussian_inference import ExactGaussianInference
class Test(unittest.TestCase):
def setUp(self):
np.random.seed(11111)
self.X = np.linspace(-1, 1, 20)[:,None]
k = GPy.kern.Matern32(1, lengthscale=1, variance=1)
self.sim_model = 'Mat+Lin'
self.mf = GPy.mappings.Linear(1, 1)
self.mf[:] = .01
self.mu = self.mf.f(self.X)
self.Y = np.random.multivariate_normal(np.zeros(self.X.shape[0]), k.K(self.X))[:,None]
self.mf.randomize()
self.test_models = [
['Mat+Lin', kern.Matern32(self.X.shape[1]) + kern.Linear(self.X.shape[1], variances=.01) + kern.Bias(self.X.shape[1])],
['Lin', kern.Linear(self.X.shape[1], variances=.01) + kern.Bias(self.X.shape[1])],
]
self.verbose = True
def testCrossval(self):
def model_builder(X, Y, kernel):
return GP(X, Y, kernel=kernel, likelihood=Gaussian(), mean_function=self.mf.copy(), inference_method=ExactGaussianInference())
res = cross_validate(self.X, self.Y+self.mu, verbose=self.verbose)#, kernels_models=self.test_models)#, model_builder=model_builder)
tmp = (res['error'] / res['test_size'])
self.assertEqual(tmp.loc['RMSE'].mean().argmin(), self.sim_model)
self.assertEqual(tmp.loc['log likelihood multivariate'].mean().argmax(), self.sim_model)
def testCrossvalSparse(self):
def model_builder(X, Y, kernel):
m = SparseGPRegression(X, Y, kernel=kernel)
m.Z.fix()
return m
import scipy.sparse as sparse
res = cross_validate(sparse.csr_matrix(self.X), self.Y, sparse=True, verbose=self.verbose,
kernels_models=self.test_models,
k=2,
#model_builder=model_builder
)
tmp = (res['error'] / res['test_size'])
self.assertEqual(tmp.loc['RMSE'].mean().argmin(), self.sim_model)
self.assertEqual(tmp.loc['log likelihood multivariate'].mean().argmax(), self.sim_model)
def testCrossvalClass(self):
res = cross_validate(self.X, self.Y>self.Y.mean(), verbose=self.verbose,
kernels_models=self.test_models,
#, model_builder=model_builder
k=2,
)
tmp = (res['error'] / res['test_size'])
self.assertEqual(tmp.loc['RMSE'].mean().argmin(), self.sim_model)
def testCrossvalSparseClass(self):
res = cross_validate(self.X, self.Y>self.Y.mean(), sparse=True, verbose=self.verbose,
kernels_models=self.test_models,
#model_builder=model_builder,
k=2,
)
tmp = (res['error'] / res['test_size'])
self.assertEqual(tmp.loc['RMSE'].mean().argmin(), self.sim_model)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testCrossval']
unittest.main() | bsd-3-clause |
pazeshun/jsk_apc | demos/instance_occlsegm/instance_occlsegm_lib/contrib/synthetic2d/extensions/instance_segmentation_voc_evaluator.py | 2 | 2288 | import copy
import chainer
from chainer import reporter
from chainercv.utils import apply_to_iterator
import pandas
import six
import tqdm
from ..evaluations import eval_instseg_voc
class InstanceSegmentationVOCEvaluator(chainer.training.extensions.Evaluator):
name = 'validation'
def __init__(self, iterator, target, device=None,
use_07_metric=False, label_names=None, show_progress=False):
super(InstanceSegmentationVOCEvaluator, self).__init__(
iterator=iterator, target=target, device=device)
self.use_07_metric = use_07_metric
self.label_names = label_names
self._show_progress = show_progress
def evaluate(self):
target = self._targets['main']
iterators = six.itervalues(self._iterators)
total = len(self._iterators)
if self._show_progress:
iterators = tqdm.tqdm(iterators, total=total, leave=False)
reports = []
for iterator in iterators:
report = self._evaluate_one(target, iterator)
reports.append(report)
report = pandas.DataFrame(reports).mean(skipna=True).to_dict()
observation = dict()
with reporter.report_scope(observation):
reporter.report(report, target)
return observation
def _evaluate_one(self, target, iterator):
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
if self._show_progress:
it = tqdm.tqdm(it, total=len(it.dataset), leave=False)
in_values, out_values, rest_values = apply_to_iterator(
target.predict, it)
imgs, = in_values
pred_bboxes, pred_masks, pred_labels, pred_scores = out_values
if len(rest_values) == 4:
gt_bboxes, gt_labels, gt_masks, gt_difficults = rest_values
elif len(rest_values) == 3:
gt_bboxes, gt_labels, gt_masks = rest_values
gt_difficults = None
else:
raise ValueError
# evaluate
result = eval_instseg_voc(
pred_masks, pred_labels, pred_scores,
gt_masks, gt_labels, gt_difficults,
use_07_metric=self.use_07_metric)
return result
| bsd-3-clause |
pyNLO/PyNLO | config.py | 2 | 6254 | # -*- coding: utf-8 -*-
import sys, os
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
templates_path = ['/home/docs/checkouts/readthedocs.org/readthedocs/templates/sphinx', 'templates', '_templates', '.templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'PyNLO'
copyright = u''
version = 'latest'
release = 'latest'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
htmlhelp_basename = 'pynlo'
file_insertion_enabled = False
latex_documents = [
('index', 'pynlo.tex', u'PyNLO Documentation',
u'', 'manual'),
]
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
if sys.version[0] == '3': # Python 3
from unittest.mock import MagicMock
elif sys.version[0] == '2': # Python 2
from mock import Mock as MagicMock
else:
raise ImportError("Don't know how to import MagicMock.")
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['pyfftw', 'scipy', 'numpy', 'matplotlib', 'matplotlib.pyplot']
print "Mocking ", MOCK_MODULES
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
###########################################################################
# auto-created readthedocs.org specific configuration #
###########################################################################
#
# The following code was added during an automated build on readthedocs.org
# It is auto created and injected for every build. The result is based on the
# conf.py.tmpl file found in the readthedocs.org codebase:
# https://github.com/rtfd/readthedocs.org/blob/master/readthedocs/doc_builder/templates/doc_builder/conf.py.tmpl
#
import sys
import os.path
from six import string_types
from sphinx import version_info
from recommonmark.parser import CommonMarkParser
# Only Sphinx 1.3+
if version_info[0] == 1 and version_info[1] > 2:
# Markdown Support
if 'source_suffix' in globals():
if isinstance(source_suffix, string_types) and source_suffix != '.md':
source_suffix = [source_suffix, '.md']
elif '.md' not in source_suffix:
source_suffix.append('.md')
else:
source_suffix = ['.rst', '.md']
if 'source_parsers' in globals():
if '.md' not in source_parsers:
source_parsers['.md'] = CommonMarkParser
else:
source_parsers = {
'.md': CommonMarkParser,
}
if globals().get('source_suffix', False):
if isinstance(source_suffix, string_types):
SUFFIX = source_suffix
else:
SUFFIX = source_suffix[0]
else:
SUFFIX = '.rst'
#Add RTD Template Path.
if 'templates_path' in globals():
templates_path.insert(0, '/home/docs/checkouts/readthedocs.org/readthedocs/templates/sphinx')
else:
templates_path = ['/home/docs/checkouts/readthedocs.org/readthedocs/templates/sphinx', 'templates', '_templates',
'.templates']
# Add RTD Static Path. Add to the end because it overwrites previous files.
if not 'html_static_path' in globals():
html_static_path = []
if os.path.exists('_static'):
html_static_path.append('_static')
html_static_path.append('/home/docs/checkouts/readthedocs.org/readthedocs/templates/sphinx/_static')
# Add RTD Theme only if they aren't overriding it already
using_rtd_theme = False
if 'html_theme' in globals():
if html_theme in ['default']:
# Allow people to bail with a hack of having an html_style
if not 'html_style' in globals():
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_style = None
html_theme_options = {}
if 'html_theme_path' in globals():
html_theme_path.append(sphinx_rtd_theme.get_html_theme_path())
else:
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
using_rtd_theme = True
else:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_style = None
html_theme_options = {}
if 'html_theme_path' in globals():
html_theme_path.append(sphinx_rtd_theme.get_html_theme_path())
else:
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
using_rtd_theme = True
# Force theme on setting
if globals().get('RTD_NEW_THEME', False):
html_theme = 'sphinx_rtd_theme'
html_style = None
html_theme_options = {}
using_rtd_theme = True
if globals().get('RTD_OLD_THEME', False):
html_style = 'rtd.css'
html_theme = 'default'
if globals().get('websupport2_base_url', False):
websupport2_base_url = 'https://readthedocs.org//websupport'
if 'http' not in settings.MEDIA_URL:
websupport2_static_url = 'https://media.readthedocs.org/static/'
else:
websupport2_static_url = 'https://media.readthedocs.org//static'
#Add project information to the template context.
context = {
'using_theme': using_rtd_theme,
'html_theme': html_theme,
'current_version': "latest",
'MEDIA_URL': "https://media.readthedocs.org/",
'PRODUCTION_DOMAIN': "readthedocs.org",
'versions': [
("latest", "/en/latest/"),
],
'downloads': [
],
'slug': 'pynlo',
'name': u'PyNLO',
'rtd_language': u'en',
'canonical_url': 'http://pynlo.readthedocs.org/en/latest/',
'analytics_code': '',
'single_version': False,
'conf_py_path': '/./',
'api_host': 'https://readthedocs.org/',
'github_user': 'ycasg',
'github_repo': 'PyNLO',
'github_version': 'master',
'display_github': True,
'bitbucket_user': 'None',
'bitbucket_repo': 'None',
'bitbucket_version': 'master',
'display_bitbucket': False,
'READTHEDOCS': True,
'using_theme': (html_theme == "default"),
'new_theme': (html_theme == "sphinx_rtd_theme"),
'source_suffix': SUFFIX,
'user_analytics_code': '',
'global_analytics_code': 'UA-17997319-1',
'commit': '180d51c6',
}
if 'html_context' in globals():
html_context.update(context)
else:
html_context = context
# Add custom RTD extension
if 'extensions' in globals():
extensions.append("readthedocs_ext.readthedocs")
else:
extensions = ["readthedocs_ext.readthedocs"]
| gpl-3.0 |
alexmojaki/odo | odo/backends/tests/test_aws.py | 3 | 9423 | from __future__ import print_function
import pytest
import sys
pytestmark = pytest.mark.skipif(sys.platform == 'win32',
reason='Requires Mac or Linux')
sa = pytest.importorskip('sqlalchemy')
boto = pytest.importorskip('boto')
pytest.importorskip('psycopg2')
pytest.importorskip('redshift_sqlalchemy')
import os
import itertools
import json
from contextlib import contextmanager, closing
from odo import into, resource, S3, discover, CSV, drop, append, odo
from odo.backends.aws import get_s3_connection
from odo.utils import tmpfile
from odo.compatibility import urlopen
import pandas as pd
import pandas.util.testing as tm
import datashape
from datashape import string, float64, int64
from boto.exception import S3ResponseError, NoAuthHandlerFound
tips_uri = 's3://nyqpug/tips.csv'
df = pd.DataFrame({
'a': list('abc'),
'b': [1, 2, 3],
'c': [1.0, 2.0, 3.0]
})[['a', 'b', 'c']]
js = pd.io.json.loads(pd.io.json.dumps(df, orient='records'))
is_authorized = False
tried = False
with closing(urlopen('http://httpbin.org/ip')) as url:
public_ip = json.loads(url.read().decode())['origin']
cidrip = public_ip + '/32'
@pytest.fixture(scope='module')
def rs_auth():
# if we aren't authorized and we've tried to authorize then skip, prevents
# us from having to deal with timeouts
# TODO: this will fail if we want to use a testing cluster with a different
# security group than 'default'
global is_authorized, tried
if not is_authorized and not tried:
if not tried:
try:
conn = boto.connect_redshift()
except NoAuthHandlerFound as e:
pytest.skip('authorization to access redshift cluster failed '
'%s' % e)
try:
conn.authorize_cluster_security_group_ingress('default',
cidrip=cidrip)
except boto.redshift.exceptions.AuthorizationAlreadyExists:
is_authorized = True
except Exception as e:
pytest.skip('authorization to access redshift cluster failed '
'%s' % e)
else:
is_authorized = True
finally:
tried = True
else:
pytest.skip('authorization to access redshift cluster failed')
@pytest.fixture
def db(rs_auth):
key = os.environ.get('REDSHIFT_DB_URI', None)
if not key:
pytest.skip('Please define a non-empty environment variable called '
'REDSHIFT_DB_URI to test redshift <- S3')
else:
return key
@pytest.yield_fixture
def temp_tb(db):
t = '%s::%s' % (db, next(_tmps))
try:
yield t
finally:
drop(resource(t))
@pytest.yield_fixture
def tmpcsv():
with tmpfile('.csv') as fn:
with open(fn, mode='w') as f:
df.to_csv(f, index=False)
yield fn
@contextmanager
def s3_bucket(extension):
with conn():
b = 's3://%s/%s%s' % (test_bucket_name, next(_tmps), extension)
try:
yield b
finally:
drop(resource(b))
@contextmanager
def conn():
# requires that you have a config file or envars defined for credentials
# this code makes me hate exceptions
try:
conn = get_s3_connection()
except S3ResponseError:
pytest.skip('unable to connect to s3')
else:
try:
grants = conn.get_bucket(test_bucket_name).get_acl().acl.grants
except S3ResponseError:
pytest.skip('no permission to read on bucket %s' %
test_bucket_name)
else:
if not any(g.permission == 'FULL_CONTROL' or
g.permission == 'READ' for g in grants):
pytest.skip('no permission to read on bucket %s' %
test_bucket_name)
else:
yield conn
test_bucket_name = 'into-redshift-csvs'
_tmps = ('tmp%d' % i for i in itertools.count())
def test_s3_resource():
csv = resource(tips_uri)
assert isinstance(csv, S3(CSV))
def test_s3_discover():
csv = resource(tips_uri)
assert isinstance(discover(csv), datashape.DataShape)
def test_s3_to_local_csv():
with tmpfile('.csv') as fn:
csv = into(fn, tips_uri)
path = os.path.abspath(csv.path)
assert os.path.exists(path)
def test_csv_to_s3_append():
df = tm.makeMixedDataFrame()
with tmpfile('.csv') as fn:
with s3_bucket('.csv') as b:
s3 = resource(b)
df.to_csv(fn, index=False)
append(s3, CSV(fn))
result = into(pd.DataFrame, s3)
tm.assert_frame_equal(df, result)
def test_csv_to_s3_into():
df = tm.makeMixedDataFrame()
with tmpfile('.csv') as fn:
with s3_bucket('.csv') as b:
df.to_csv(fn, index=False)
s3 = into(b, CSV(fn))
result = into(pd.DataFrame, s3)
tm.assert_frame_equal(df, result)
def test_s3_to_redshift(temp_tb):
s3 = resource(tips_uri)
table = into(temp_tb, s3)
assert discover(table) == discover(s3)
assert into(set, table) == into(set, s3)
def test_redshift_getting_started(temp_tb):
dshape = datashape.dshape("""var * {
userid: int64,
username: ?string[8],
firstname: ?string[30],
lastname: ?string[30],
city: ?string[30],
state: ?string[2],
email: ?string[100],
phone: ?string[14],
likesports: ?bool,
liketheatre: ?bool,
likeconcerts: ?bool,
likejazz: ?bool,
likeclassical: ?bool,
likeopera: ?bool,
likerock: ?bool,
likevegas: ?bool,
likebroadway: ?bool,
likemusicals: ?bool,
}""")
csv = S3(CSV)('s3://awssampledb/tickit/allusers_pipe.txt')
table = into(temp_tb, csv, dshape=dshape)
# make sure we have a non empty table
assert table.count().scalar() == 49990
def test_redshift_dwdate(temp_tb):
dshape = datashape.dshape("""var * {
key: int64,
date: string[19],
day_of_week: string[10],
month: string[10],
year: int64,
year_month_num: int64,
year_month: string[8],
day_num_in_week: int64,
day_num_in_month: int64,
day_num_in_year: int64,
month_num_in_year: int64,
week_num_in_year: int64,
selling_season: string[13],
last_day_in_week_fl: string[1],
last_day_in_month_fl: string[1],
holiday_fl: string[1],
weekday_fl: string[1]
}""")
# we have to pass the separator here because the date column has a comma
# TODO: see if we can provide a better error message by querying
# stl_load_errors
assert odo(S3(CSV)('s3://awssampledb/ssbgz/dwdate'),
temp_tb,
delimiter='|',
compression='gzip',
dshape=dshape).count().scalar() == 2556
def test_frame_to_s3_to_frame():
with s3_bucket('.csv') as b:
s3_csv = into(b, df)
result = into(pd.DataFrame, s3_csv)
tm.assert_frame_equal(result, df)
def test_csv_to_redshift(tmpcsv, temp_tb):
assert into(set, into(temp_tb, tmpcsv)) == into(set, tmpcsv)
def test_frame_to_redshift(temp_tb):
tb = into(temp_tb, df)
assert into(set, tb) == into(set, df)
def test_textfile_to_s3():
text = 'A cow jumped over the moon'
with tmpfile('.txt') as fn:
with s3_bucket('.txt') as b:
with open(fn, mode='w') as f:
f.write(os.linesep.join(text.split()))
result = into(b, resource(fn))
assert discover(result) == datashape.dshape('var * string')
def test_jsonlines_to_s3():
with tmpfile('.json') as fn:
with open(fn, mode='w') as f:
for row in js:
f.write(pd.io.json.dumps(row))
f.write(os.linesep)
with s3_bucket('.json') as b:
result = into(b, resource(fn))
assert discover(result) == discover(js)
def test_s3_jsonlines_discover():
json_dshape = discover(resource('s3://nyqpug/tips.json'))
names = list(map(str, sorted(json_dshape.measure.names)))
assert names == ['day', 'sex', 'size', 'smoker', 'time', 'tip',
'total_bill']
types = [json_dshape.measure[name] for name in names]
assert types == [string, string, int64, string, string, float64, float64]
def test_s3_csv_discover():
result = discover(resource('s3://nyqpug/tips.csv'))
expected = datashape.dshape("""var * {
total_bill: ?float64,
tip: ?float64,
sex: ?string,
smoker: ?string,
day: ?string,
time: ?string,
size: int64
}""")
assert result == expected
def test_s3_gz_csv_discover():
result = discover(S3(CSV)('s3://nyqpug/tips.gz'))
expected = datashape.dshape("""var * {
total_bill: ?float64,
tip: ?float64,
sex: ?string,
smoker: ?string,
day: ?string,
time: ?string,
size: int64
}""")
assert result == expected
def test_s3_to_sqlite():
with tmpfile('.db') as fn:
tb = into('sqlite:///%s::tips' % fn, tips_uri,
dshape=discover(resource(tips_uri)))
lhs = into(list, tb)
assert lhs == into(list, tips_uri)
| bsd-3-clause |
saildata/data-science-from-scratch | code/introduction.py | 5 | 8183 | from __future__ import division
# at this stage in the book we haven't actually installed matplotlib,
# comment this out if you need to
from matplotlib import pyplot as plt
##########################
# #
# FINDING KEY CONNECTORS #
# #
##########################
users = [
{ "id": 0, "name": "Hero" },
{ "id": 1, "name": "Dunn" },
{ "id": 2, "name": "Sue" },
{ "id": 3, "name": "Chi" },
{ "id": 4, "name": "Thor" },
{ "id": 5, "name": "Clive" },
{ "id": 6, "name": "Hicks" },
{ "id": 7, "name": "Devin" },
{ "id": 8, "name": "Kate" },
{ "id": 9, "name": "Klein" },
{ "id": 10, "name": "Jen" }
]
friendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),
(4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]
# first give each user an empty list
for user in users:
user["friends"] = []
# and then populate the lists with friendships
for i, j in friendships:
# this works because users[i] is the user whose id is i
users[i]["friends"].append(users[j]) # add i as a friend of j
users[j]["friends"].append(users[i]) # add j as a friend of i
def number_of_friends(user):
"""how many friends does _user_ have?"""
return len(user["friends"]) # length of friend_ids list
total_connections = sum(number_of_friends(user)
for user in users) # 24
num_users = len(users)
avg_connections = total_connections / num_users # 2.4
################################
# #
# DATA SCIENTISTS YOU MAY KNOW #
# #
################################
def friends_of_friend_ids_bad(user):
# "foaf" is short for "friend of a friend"
return [foaf["id"]
for friend in user["friends"] # for each of user's friends
for foaf in friend["friends"]] # get each of _their_ friends
from collections import Counter # not loaded by default
def not_the_same(user, other_user):
"""two users are not the same if they have different ids"""
return user["id"] != other_user["id"]
def not_friends(user, other_user):
"""other_user is not a friend if he's not in user["friends"];
that is, if he's not_the_same as all the people in user["friends"]"""
return all(not_the_same(friend, other_user)
for friend in user["friends"])
def friends_of_friend_ids(user):
return Counter(foaf["id"]
for friend in user["friends"] # for each of my friends
for foaf in friend["friends"] # count *their* friends
if not_the_same(user, foaf) # who aren't me
and not_friends(user, foaf)) # and aren't my friends
print friends_of_friend_ids(users[3]) # Counter({0: 2, 5: 1})
interests = [
(0, "Hadoop"), (0, "Big Data"), (0, "HBase"), (0, "Java"),
(0, "Spark"), (0, "Storm"), (0, "Cassandra"),
(1, "NoSQL"), (1, "MongoDB"), (1, "Cassandra"), (1, "HBase"),
(1, "Postgres"), (2, "Python"), (2, "scikit-learn"), (2, "scipy"),
(2, "numpy"), (2, "statsmodels"), (2, "pandas"), (3, "R"), (3, "Python"),
(3, "statistics"), (3, "regression"), (3, "probability"),
(4, "machine learning"), (4, "regression"), (4, "decision trees"),
(4, "libsvm"), (5, "Python"), (5, "R"), (5, "Java"), (5, "C++"),
(5, "Haskell"), (5, "programming languages"), (6, "statistics"),
(6, "probability"), (6, "mathematics"), (6, "theory"),
(7, "machine learning"), (7, "scikit-learn"), (7, "Mahout"),
(7, "neural networks"), (8, "neural networks"), (8, "deep learning"),
(8, "Big Data"), (8, "artificial intelligence"), (9, "Hadoop"),
(9, "Java"), (9, "MapReduce"), (9, "Big Data")
]
def data_scientists_who_like(target_interest):
return [user_id
for user_id, user_interest in interests
if user_interest == target_interest]
from collections import defaultdict
# keys are interests, values are lists of user_ids with that interest
user_ids_by_interest = defaultdict(list)
for user_id, interest in interests:
user_ids_by_interest[interest].append(user_id)
# keys are user_ids, values are lists of interests for that user_id
interests_by_user_id = defaultdict(list)
for user_id, interest in interests:
interests_by_user_id[user_id].append(interest)
def most_common_interests_with(user_id):
return Counter(interested_user_id
for interest in interests_by_user["user_id"]
for interested_user_id in users_by_interest[interest]
if interested_user_id != user_id)
###########################
# #
# SALARIES AND EXPERIENCE #
# #
###########################
salaries_and_tenures = [(83000, 8.7), (88000, 8.1),
(48000, 0.7), (76000, 6),
(69000, 6.5), (76000, 7.5),
(60000, 2.5), (83000, 10),
(48000, 1.9), (63000, 4.2)]
def make_chart_salaries_by_tenure():
tenures = [tenure for salary, tenure in salaries_and_tenures]
salaries = [salary for salary, tenure in salaries_and_tenures]
plt.scatter(tenures, salaries)
plt.xlabel("Years Experience")
plt.ylabel("Salary")
plt.show()
# keys are years
# values are the salaries for each tenure
salary_by_tenure = defaultdict(list)
for salary, tenure in salaries_and_tenures:
salary_by_tenure[tenure].append(salary)
average_salary_by_tenure = {
tenure : sum(salaries) / len(salaries)
for tenure, salaries in salary_by_tenure.items()
}
def tenure_bucket(tenure):
if tenure < 2: return "less than two"
elif tenure < 5: return "between two and five"
else: return "more than five"
salary_by_tenure_bucket = defaultdict(list)
for salary, tenure in salaries_and_tenures:
bucket = tenure_bucket(tenure)
salary_by_tenure_bucket[bucket].append(salary)
average_salary_by_bucket = {
tenure_bucket : sum(salaries) / len(salaries)
for tenure_bucket, salaries in salary_by_tenure_bucket.iteritems()
}
#################
# #
# PAID_ACCOUNTS #
# #
#################
def predict_paid_or_unpaid(years_experience):
if years_experience < 3.0: return "paid"
elif years_experience < 8.5: return "unpaid"
else: return "paid"
######################
# #
# TOPICS OF INTEREST #
# #
######################
words_and_counts = Counter(word
for user, interest in interests
for word in interest.lower().split())
if __name__ == "__main__":
print
print "######################"
print "#"
print "# FINDING KEY CONNECTORS"
print "#"
print "######################"
print
print "total connections", total_connections
print "number of users", num_users
print "average connections", total_connections / num_users
print
# create a list (user_id, number_of_friends)
num_friends_by_id = [(user["id"], number_of_friends(user))
for user in users]
print "users sorted by number of friends:"
print sorted(num_friends_by_id,
key=lambda (user_id, num_friends): num_friends, # by number of friends
reverse=True) # largest to smallest
print
print "######################"
print "#"
print "# DATA SCIENTISTS YOU MAY KNOW"
print "#"
print "######################"
print
print "friends of friends bad for user 0:", friends_of_friend_ids_bad(users[0])
print "friends of friends for user 3:", friends_of_friend_ids(users[3])
print
print "######################"
print "#"
print "# SALARIES AND TENURES"
print "#"
print "######################"
print
print "average salary by tenure", average_salary_by_tenure
print "average salary by tenure bucket", average_salary_by_bucket
print
print "######################"
print "#"
print "# MOST COMMON WORDS"
print "#"
print "######################"
print
for word, count in words_and_counts.most_common():
if count > 1:
print word, count
| unlicense |
anhaidgroup/py_entitymatching | py_entitymatching/matcher/linregmatcher.py | 1 | 3321 | """
This module contains functions for linear regression classifier.
"""
import logging
from array import array
from py_entitymatching.matcher.mlmatcher import MLMatcher
from py_entitymatching.matcher.matcherutils import get_ts
from sklearn.linear_model import LinearRegression
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.base import TransformerMixin
import numpy as np
logger = logging.getLogger(__name__)
class LinRegClassifierSKLearn(BaseEstimator, ClassifierMixin, TransformerMixin):
"""
This class implements Linear Regression classifer.
Specifically, this class uses Linear Regression matcher from
scikit-learn, wraps it up to form a classifier.
"""
def __init__(self, *args, **kwargs):
# Set the classifier to the scikit-learn Linear Regression matcher.
self.clf = LinearRegression(*args, **kwargs)
# Set the threshold to 0
self.threshold = 0.0
# Set the classes_
self.classes_ = np.array([0, 1], np.int64)
def fit(self, X, y):
# Convert 0 and 1s to -1, and 1s
y = (2 * y) - 1
# Call the fit method of Linear Regression matcher
self.clf.fit(X, y)
# Return the wrapper object
return self
def predict(self, X):
# Call the predict method from the underlying matcher
y = self.clf.predict(X)
# Convert back the predictions a number between -1 and 1 to -1 and -1
y = (2 * (y > self.threshold)) - 1
# Convert all the -1 to 0s
y[y == -1] = 0
# Return back the predictions
return y
def predict_proba(self, X):
# There is no proba function defined for Linear Regression Matcher in scikit
# learn. So we return the probs as 0 or 1
# give the warning to the user
logger.warning('There is no proba function defined for Linear Regression '
'Matcher in scikit learn. So we return the probs as 1')
y = self.predict(X)
p = np.ndarray(shape=[len(y), 2])
for i in range(len(y)):
if y[i] == 1:
p[i][0] = 0
p[i][1] = 1
elif y[i] == 0:
p[i][0] = 1
p[i][1] = 0
return p
def get_params(self, deep=True):
"""
Function to get params. This will be used by other scikit-learn
matchers.
"""
return self.clf.get_params(deep=deep)
class LinRegMatcher(MLMatcher):
"""
Linear regression matcher.
Args:
*args,**kwargs: Arguments to scikit-learn's Linear Regression matcher.
name (string): Name that should be given to this matcher.
"""
def __init__(self, *args, **kwargs):
super(LinRegMatcher, self).__init__()
# If the name is given, then pop it
name = kwargs.pop('name', None)
if name is None:
# If the name is not given, then create one.
# Currently, we use a constant string + a random number.
self.name = 'LinearRegression' + '_' + get_ts()
else:
# set the name for the matcher.
self.name = name
# Wrap the class implementing linear regression classifer.
self.clf = LinRegClassifierSKLearn(*args, **kwargs)
| bsd-3-clause |
mschmidt87/nest-simulator | pynest/examples/cross_check_mip_corrdet.py | 9 | 3565 | # -*- coding: utf-8 -*-
#
# cross_check_mip_corrdet.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
from matplotlib.pylab import *
'''
Auto- and crosscorrelation functions for spike trains.
A time bin of size tbin is centered around the time difference it
represents. If the correlation function is calculated for tau in
[-tau_max, tau_max], the pair events contributing to the left-most
bin are those for which tau in [-tau_max-tbin/2, tau_max+tbin/2) and
so on.
Correlate two spike trains with each other assumes spike times to be ordered in
time. tau > 0 means spike2 is later than spike1
tau_max: maximum time lag in ms correlation function
tbin: bin size
spike1: first spike train [tspike...]
spike2: second spike train [tspike...]
'''
def corr_spikes_sorted(spike1, spike2, tbin, tau_max, h):
tau_max_i = int(tau_max / h)
tbin_i = int(tbin / h)
cross = zeros(int(2 * tau_max_i / tbin_i + 1), 'd')
j0 = 0
for spki in spike1:
j = j0
while j < len(spike2) and spike2[j] - spki < -tau_max_i - tbin_i / 2.0:
j += 1
j0 = j
while j < len(spike2) and spike2[j] - spki < tau_max_i + tbin_i / 2.0:
cross[int(
(spike2[j] - spki + tau_max_i + 0.5 * tbin_i) / tbin_i)] += 1.0
j += 1
return cross
nest.ResetKernel()
h = 0.1 # Computation step size in ms
T = 100000.0 # Total duration
delta_tau = 10.0
tau_max = 100.0
pc = 0.5
nu = 100.0
# grng_seed is 0 because test data was produced for seed = 0
nest.SetKernelStatus({'local_num_threads': 1, 'resolution': h,
'overwrite_files': True, 'grng_seed': 0})
# Set up network, connect and simulate
mg = nest.Create('mip_generator')
nest.SetStatus(mg, {'rate': nu, 'p_copy': pc})
cd = nest.Create('correlation_detector')
nest.SetStatus(cd, {'tau_max': tau_max, 'delta_tau': delta_tau})
sd = nest.Create('spike_detector')
nest.SetStatus(sd, {'withtime': True,
'withgid': True, 'time_in_steps': True})
pn1 = nest.Create('parrot_neuron')
pn2 = nest.Create('parrot_neuron')
nest.Connect(mg, pn1)
nest.Connect(mg, pn2)
nest.Connect(pn1, sd)
nest.Connect(pn2, sd)
nest.SetDefaults('static_synapse', {'weight': 1.0, 'receptor_type': 0})
nest.Connect(pn1, cd)
nest.SetDefaults('static_synapse', {'weight': 1.0, 'receptor_type': 1})
nest.Connect(pn2, cd)
nest.Simulate(T)
n_events = nest.GetStatus(cd)[0]['n_events']
n1 = n_events[0]
n2 = n_events[1]
lmbd1 = (n1 / (T - tau_max)) * 1000.0
lmbd2 = (n2 / (T - tau_max)) * 1000.0
h = 0.1
tau_max = 100.0 # ms correlation window
t_bin = 10.0 # ms bin size
spikes = nest.GetStatus(sd)[0]['events']['senders']
sp1 = find(spikes[:] == 4)
sp2 = find(spikes[:] == 5)
# Find crosscorrolation
cross = corr_spikes_sorted(sp1, sp2, t_bin, tau_max, h)
print("Crosscorrelation:")
print(cross)
print("Sum of crosscorrelation:")
print(sum(cross))
| gpl-2.0 |
harshaneelhg/scikit-learn | sklearn/covariance/tests/test_covariance.py | 142 | 11068 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
tony810430/flink | flink-python/pyflink/fn_execution/coder_impl_slow.py | 6 | 25554 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
import decimal
from abc import ABC, abstractmethod
from typing import List
import cloudpickle
import pyarrow as pa
from pyflink.common import Row, RowKind
from pyflink.datastream.window import TimeWindow, CountWindow
from pyflink.fn_execution.ResettableIO import ResettableIO
from pyflink.fn_execution.flink_fn_execution_pb2 import CoderParam
from pyflink.fn_execution.stream_slow import InputStream, OutputStream
from pyflink.table.utils import pandas_to_arrow, arrow_to_pandas
ROW_KIND_BIT_SIZE = 2
class LengthPrefixBaseCoderImpl(ABC):
"""
LengthPrefixBaseCoder will be used in Operations and other coders will be the field coder of
LengthPrefixBaseCoder.
"""
def __init__(self, field_coder: 'FieldCoderImpl'):
self._field_coder = field_coder
self._data_out_stream = OutputStream()
def decode_from_stream(self, in_stream: InputStream):
while in_stream.size() > 0:
yield self._field_coder.decode_from_stream(in_stream, in_stream.read_var_int64())
def _write_data_to_output_stream(self, out_stream: OutputStream):
out_stream.write_var_int64(self._data_out_stream.size())
out_stream.write(self._data_out_stream.get())
self._data_out_stream.clear()
class FieldCoderImpl(ABC):
@abstractmethod
def encode_to_stream(self, value, out_stream: OutputStream):
"""
Encodes `value` to the output stream.
:param value: The output data
:param out_stream: Output Stream
"""
pass
@abstractmethod
def decode_from_stream(self, in_stream: InputStream, length: int = 0):
"""
Decodes data from the input stream.
:param in_stream: Input Stream
:param length: The `length` size data of input stream will be decoded. The default value is
0 which means the coder won't take use of the length to decode the data from input stream.
:return: The decoded Data.
"""
pass
def encode(self, value):
out = OutputStream()
self.encode_to_stream(value, out)
return out.get()
def decode(self, encoded):
return self.decode_from_stream(InputStream(encoded), len(encoded))
class IterableCoderImpl(LengthPrefixBaseCoderImpl):
"""
Encodes iterable data to output stream. The output mode will decide whether write a special end
message 0x00 to output stream after encoding data.
"""
def __init__(self, field_coder: 'FieldCoderImpl', output_mode):
super(IterableCoderImpl, self).__init__(field_coder)
self._output_mode = output_mode
def encode_to_stream(self, value: List, out_stream):
for item in value:
self._field_coder.encode_to_stream(item, self._data_out_stream)
self._write_data_to_output_stream(out_stream)
# write end message
if self._output_mode == CoderParam.MULTIPLE_WITH_END:
out_stream.write_var_int64(1)
out_stream.write_byte(0x00)
class ValueCoderImpl(LengthPrefixBaseCoderImpl):
"""
Encodes a single data to output stream.
"""
def __init__(self, field_coder: 'FieldCoderImpl'):
super(ValueCoderImpl, self).__init__(field_coder)
def encode_to_stream(self, value, out_stream):
self._field_coder.encode_to_stream(value, self._data_out_stream)
self._write_data_to_output_stream(out_stream)
class MaskUtils:
"""
A util class used to encode mask value.
"""
def __init__(self, field_count):
self._field_count = field_count
# the row kind uses the first 2 bits of the bitmap, the remaining bits are used for null
# mask, for more details refer to:
# https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/RowSerializer.java
self._leading_complete_bytes_num = (self._field_count + ROW_KIND_BIT_SIZE) // 8
self._remaining_bits_num = (self._field_count + ROW_KIND_BIT_SIZE) % 8
self.null_mask_search_table = self.generate_null_mask_search_table()
self.null_byte_search_table = (0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01)
self.row_kind_search_table = [0x00, 0x80, 0x40, 0xC0]
@staticmethod
def generate_null_mask_search_table():
"""
Each bit of one byte represents if the column at the corresponding position is None or not,
e.g. 0x84 represents the first column and the sixth column are None.
"""
null_mask = []
for b in range(256):
every_num_null_mask = [(b & 0x80) > 0, (b & 0x40) > 0, (b & 0x20) > 0, (b & 0x10) > 0,
(b & 0x08) > 0, (b & 0x04) > 0, (b & 0x02) > 0, (b & 0x01) > 0]
null_mask.append(tuple(every_num_null_mask))
return tuple(null_mask)
def write_mask(self, value, row_kind_value, out_stream):
field_pos = 0
null_byte_search_table = self.null_byte_search_table
remaining_bits_num = self._remaining_bits_num
# first byte contains the row kind bits
b = self.row_kind_search_table[row_kind_value]
for i in range(0, 8 - ROW_KIND_BIT_SIZE):
if field_pos + i < len(value) and value[field_pos + i] is None:
b |= null_byte_search_table[i + ROW_KIND_BIT_SIZE]
field_pos += 8 - ROW_KIND_BIT_SIZE
out_stream.write_byte(b)
for _ in range(1, self._leading_complete_bytes_num):
b = 0x00
for i in range(0, 8):
if value[field_pos + i] is None:
b |= null_byte_search_table[i]
field_pos += 8
out_stream.write_byte(b)
if self._leading_complete_bytes_num >= 1 and remaining_bits_num:
b = 0x00
for i in range(remaining_bits_num):
if value[field_pos + i] is None:
b |= null_byte_search_table[i]
out_stream.write_byte(b)
def read_mask(self, in_stream):
mask = []
mask_search_table = self.null_mask_search_table
remaining_bits_num = self._remaining_bits_num
for _ in range(self._leading_complete_bytes_num):
b = in_stream.read_byte()
mask.extend(mask_search_table[b])
if remaining_bits_num:
b = in_stream.read_byte()
mask.extend(mask_search_table[b][0:remaining_bits_num])
return mask
class FlattenRowCoderImpl(FieldCoderImpl):
"""
A coder for flatten row (List) object (without field names and row kind value is 0).
"""
def __init__(self, field_coders: List[FieldCoderImpl]):
self._field_coders = field_coders
self._field_count = len(field_coders)
self._mask_utils = MaskUtils(self._field_count)
def encode_to_stream(self, value, out_stream):
# encode mask value
self._mask_utils.write_mask(value, 0, out_stream)
# encode every field value
for i in range(self._field_count):
item = value[i]
if item is not None:
self._field_coders[i].encode_to_stream(item, out_stream)
def decode_from_stream(self, in_stream, length: int = 0):
row_kind_and_null_mask = self._mask_utils.read_mask(in_stream)
return [None if row_kind_and_null_mask[idx + ROW_KIND_BIT_SIZE] else
self._field_coders[idx].decode_from_stream(in_stream)
for idx in range(0, self._field_count)]
def __repr__(self):
return 'FlattenRowCoderImpl[%s]' % ', '.join(str(c) for c in self._field_coders)
class RowCoderImpl(FieldCoderImpl):
"""
A coder for `Row` object.
"""
def __init__(self, field_coders, field_names):
self._field_coders = field_coders
self._field_count = len(field_coders)
self._field_names = field_names
self._mask_utils = MaskUtils(self._field_count)
def encode_to_stream(self, value: Row, out_stream):
# encode mask value
self._mask_utils.write_mask(value._values, value.get_row_kind().value, out_stream)
# encode every field value
for i in range(self._field_count):
item = value[i]
if item is not None:
self._field_coders[i].encode_to_stream(item, out_stream)
def decode_from_stream(self, in_stream, length=0) -> Row:
row_kind_and_null_mask = self._mask_utils.read_mask(in_stream)
fields = [None if row_kind_and_null_mask[idx + ROW_KIND_BIT_SIZE] else
self._field_coders[idx].decode_from_stream(in_stream)
for idx in range(0, self._field_count)]
# compute the row_kind value
row_kind_value = 0
for i in range(ROW_KIND_BIT_SIZE):
row_kind_value += int(row_kind_and_null_mask[i]) * 2 ** i
row = Row(*fields)
row.set_field_names(self._field_names)
row.set_row_kind(RowKind(row_kind_value))
return row
def __repr__(self):
return 'RowCoderImpl[%s, %s]' % \
(', '.join(str(c) for c in self._field_coders), self._field_names)
class ArrowCoderImpl(FieldCoderImpl):
"""
A coder for arrow format data.
"""
def __init__(self, schema, row_type, timezone):
self._schema = schema
self._field_types = row_type.field_types()
self._timezone = timezone
self._resettable_io = ResettableIO()
self._batch_reader = ArrowCoderImpl._load_from_stream(self._resettable_io)
def encode_to_stream(self, cols, out_stream):
self._resettable_io.set_output_stream(out_stream)
batch_writer = pa.RecordBatchStreamWriter(self._resettable_io, self._schema)
batch_writer.write_batch(
pandas_to_arrow(self._schema, self._timezone, self._field_types, cols))
def decode_from_stream(self, in_stream, length=0):
return self.decode_one_batch_from_stream(in_stream, length)
@staticmethod
def _load_from_stream(stream):
while stream.readable():
reader = pa.ipc.open_stream(stream)
yield reader.read_next_batch()
def decode_one_batch_from_stream(self, in_stream: InputStream, size: int) -> List:
self._resettable_io.set_input_bytes(in_stream.read(size))
# there is only one arrow batch in the underlying input stream
return arrow_to_pandas(self._timezone, self._field_types, [next(self._batch_reader)])
def __repr__(self):
return 'ArrowCoderImpl[%s]' % self._schema
class OverWindowArrowCoderImpl(FieldCoderImpl):
"""
A coder for over window with arrow format data.
The data structure: [window data][arrow format data].
"""
def __init__(self, arrow_coder_impl: ArrowCoderImpl):
self._arrow_coder = arrow_coder_impl
self._int_coder = IntCoderImpl()
def encode_to_stream(self, cols, out_stream):
self._arrow_coder.encode_to_stream(cols, out_stream)
def decode_from_stream(self, in_stream, length=0):
window_num = self._int_coder.decode_from_stream(in_stream)
length -= 4
window_boundaries_and_arrow_data = []
for _ in range(window_num):
window_size = self._int_coder.decode_from_stream(in_stream)
length -= 4
window_boundaries_and_arrow_data.append(
[self._int_coder.decode_from_stream(in_stream)
for _ in range(window_size)])
length -= 4 * window_size
window_boundaries_and_arrow_data.append(
self._arrow_coder.decode_one_batch_from_stream(in_stream, length))
return window_boundaries_and_arrow_data
def __repr__(self):
return 'OverWindowArrowCoderImpl[%s]' % self._arrow_coder
class TinyIntCoderImpl(FieldCoderImpl):
"""
A coder for tiny int value (from -128 to 127).
"""
def encode_to_stream(self, value, out_stream):
out_stream.write_int8(value)
def decode_from_stream(self, in_stream, length=0):
return in_stream.read_int8()
class SmallIntCoderImpl(FieldCoderImpl):
"""
A coder for small int value (from -32,768 to 32,767).
"""
def encode_to_stream(self, value, out_stream):
out_stream.write_int16(value)
def decode_from_stream(self, in_stream, length=0):
return in_stream.read_int16()
class IntCoderImpl(FieldCoderImpl):
"""
A coder for int value (from -2,147,483,648 to 2,147,483,647).
"""
def encode_to_stream(self, value, out_stream):
out_stream.write_int32(value)
def decode_from_stream(self, in_stream, length=0):
return in_stream.read_int32()
class BigIntCoderImpl(FieldCoderImpl):
"""
A coder for big int value (from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807).
"""
def encode_to_stream(self, value, out_stream):
out_stream.write_int64(value)
def decode_from_stream(self, in_stream, length=0):
return in_stream.read_int64()
class BooleanCoderImpl(FieldCoderImpl):
"""
A coder for a boolean value.
"""
def encode_to_stream(self, value, out_stream):
out_stream.write_byte(value)
def decode_from_stream(self, in_stream, length=0):
return not not in_stream.read_byte()
class FloatCoderImpl(FieldCoderImpl):
"""
A coder for a float value (4-byte single precision floating point number).
"""
def encode_to_stream(self, value, out_stream):
out_stream.write_float(value)
def decode_from_stream(self, in_stream, length=0):
return in_stream.read_float()
class DoubleCoderImpl(FieldCoderImpl):
"""
A coder for a double value (8-byte double precision floating point number).
"""
def encode_to_stream(self, value, out_stream):
out_stream.write_double(value)
def decode_from_stream(self, in_stream, length=0):
return in_stream.read_double()
class BinaryCoderImpl(FieldCoderImpl):
"""
A coder for a bytes value.
"""
def encode_to_stream(self, value, out_stream):
out_stream.write_bytes(value, len(value))
def decode_from_stream(self, in_stream, length=0):
return in_stream.read_bytes()
class CharCoderImpl(FieldCoderImpl):
"""
A coder for a str value.
"""
def encode_to_stream(self, value, out_stream):
bytes_value = value.encode("utf-8")
out_stream.write_bytes(bytes_value, len(bytes_value))
def decode_from_stream(self, in_stream, length=0):
return in_stream.read_bytes().decode("utf-8")
class DecimalCoderImpl(FieldCoderImpl):
"""
A coder for a decimal value (with fixed precision and scale).
"""
def __init__(self, precision, scale):
self.context = decimal.Context(prec=precision)
self.scale_format = decimal.Decimal(10) ** -scale
def encode_to_stream(self, value, out_stream):
user_context = decimal.getcontext()
decimal.setcontext(self.context)
value = value.quantize(self.scale_format)
bytes_value = str(value).encode("utf-8")
out_stream.write_bytes(bytes_value, len(bytes_value))
decimal.setcontext(user_context)
def decode_from_stream(self, in_stream, length=0):
user_context = decimal.getcontext()
decimal.setcontext(self.context)
value = decimal.Decimal(in_stream.read_bytes().decode("utf-8")).quantize(self.scale_format)
decimal.setcontext(user_context)
return value
class BigDecimalCoderImpl(FieldCoderImpl):
"""
A coder for a big decimal value (without fixed precision and scale).
"""
def encode_to_stream(self, value, out_stream):
bytes_value = str(value).encode("utf-8")
out_stream.write_bytes(bytes_value, len(bytes_value))
def decode_from_stream(self, in_stream, length=0):
return decimal.Decimal(in_stream.read_bytes().decode("utf-8"))
class DateCoderImpl(FieldCoderImpl):
"""
A coder for a datetime.date value.
"""
EPOCH_ORDINAL = datetime.datetime(1970, 1, 1).toordinal()
def encode_to_stream(self, value, out_stream):
out_stream.write_int32(self.date_to_internal(value))
def decode_from_stream(self, in_stream, length=0):
value = in_stream.read_int32()
return self.internal_to_date(value)
def date_to_internal(self, d):
return d.toordinal() - self.EPOCH_ORDINAL
def internal_to_date(self, v):
return datetime.date.fromordinal(v + self.EPOCH_ORDINAL)
class TimeCoderImpl(FieldCoderImpl):
"""
A coder for a datetime.time value.
"""
def encode_to_stream(self, value, out_stream):
out_stream.write_int32(self.time_to_internal(value))
def decode_from_stream(self, in_stream, length=0):
value = in_stream.read_int32()
return self.internal_to_time(value)
@staticmethod
def time_to_internal(t):
milliseconds = (t.hour * 3600000
+ t.minute * 60000
+ t.second * 1000
+ t.microsecond // 1000)
return milliseconds
@staticmethod
def internal_to_time(v):
seconds, milliseconds = divmod(v, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return datetime.time(hours, minutes, seconds, milliseconds * 1000)
class TimestampCoderImpl(FieldCoderImpl):
"""
A coder for a datetime.datetime value.
"""
def __init__(self, precision):
self.precision = precision
def is_compact(self):
return self.precision <= 3
def encode_to_stream(self, value, out_stream):
milliseconds, nanoseconds = self.timestamp_to_internal(value)
if self.is_compact():
assert nanoseconds == 0
out_stream.write_int64(milliseconds)
else:
out_stream.write_int64(milliseconds)
out_stream.write_int32(nanoseconds)
def decode_from_stream(self, in_stream, length=0):
if self.is_compact():
milliseconds = in_stream.read_int64()
nanoseconds = 0
else:
milliseconds = in_stream.read_int64()
nanoseconds = in_stream.read_int32()
return self.internal_to_timestamp(milliseconds, nanoseconds)
@staticmethod
def timestamp_to_internal(timestamp):
seconds = int(timestamp.replace(tzinfo=datetime.timezone.utc).timestamp())
microseconds_of_second = timestamp.microsecond
milliseconds = seconds * 1000 + microseconds_of_second // 1000
nanoseconds = microseconds_of_second % 1000 * 1000
return milliseconds, nanoseconds
def internal_to_timestamp(self, milliseconds, nanoseconds):
second, microsecond = (milliseconds // 1000,
milliseconds % 1000 * 1000 + nanoseconds // 1000)
return datetime.datetime.utcfromtimestamp(second).replace(microsecond=microsecond)
class LocalZonedTimestampCoderImpl(TimestampCoderImpl):
"""
A coder for a datetime.datetime with time zone value.
"""
def __init__(self, precision, timezone):
super(LocalZonedTimestampCoderImpl, self).__init__(precision)
self.timezone = timezone
def internal_to_timestamp(self, milliseconds, nanoseconds):
return self.timezone.localize(
super(LocalZonedTimestampCoderImpl, self).internal_to_timestamp(
milliseconds, nanoseconds))
class PickledBytesCoderImpl(FieldCoderImpl):
"""
A coder for all kinds of python object.
"""
def __init__(self):
self.field_coder = BinaryCoderImpl()
def encode_to_stream(self, value, out_stream):
coded_data = cloudpickle.dumps(value)
self.field_coder.encode_to_stream(coded_data, out_stream)
def decode_from_stream(self, in_stream, length=0):
return self._decode_one_value_from_stream(in_stream)
def _decode_one_value_from_stream(self, in_stream: InputStream):
real_data = self.field_coder.decode_from_stream(in_stream)
value = cloudpickle.loads(real_data)
return value
def __repr__(self) -> str:
return 'PickledBytesCoderImpl[%s]' % str(self.field_coder)
class TupleCoderImpl(FieldCoderImpl):
"""
A coder for a tuple value.
"""
def __init__(self, field_coders):
self._field_coders = field_coders
self._field_count = len(field_coders)
def encode_to_stream(self, value, out_stream):
field_coders = self._field_coders
for i in range(self._field_count):
field_coders[i].encode_to_stream(value[i], out_stream)
def decode_from_stream(self, stream, length=0):
decoded_list = [field_coder.decode_from_stream(stream)
for field_coder in self._field_coders]
return (*decoded_list,)
def __repr__(self) -> str:
return 'TupleCoderImpl[%s]' % ', '.join(str(c) for c in self._field_coders)
class GenericArrayCoderImpl(FieldCoderImpl):
"""
A coder for object array value (the element of array could be any kind of Python object).
"""
def __init__(self, elem_coder: FieldCoderImpl):
self._elem_coder = elem_coder
def encode_to_stream(self, value, out_stream):
out_stream.write_int32(len(value))
for elem in value:
if elem is None:
out_stream.write_byte(False)
else:
out_stream.write_byte(True)
self._elem_coder.encode_to_stream(elem, out_stream)
def decode_from_stream(self, in_stream, length=0):
size = in_stream.read_int32()
elements = [self._elem_coder.decode_from_stream(in_stream)
if in_stream.read_byte() else None for _ in range(size)]
return elements
def __repr__(self):
return 'GenericArrayCoderImpl[%s]' % repr(self._elem_coder)
class PrimitiveArrayCoderImpl(FieldCoderImpl):
"""
A coder for primitive array value (the element of array won't be null).
"""
def __init__(self, elem_coder: FieldCoderImpl):
self._elem_coder = elem_coder
def encode_to_stream(self, value, out_stream):
out_stream.write_int32(len(value))
for elem in value:
self._elem_coder.encode_to_stream(elem, out_stream)
def decode_from_stream(self, in_stream, length=0):
size = in_stream.read_int32()
elements = [self._elem_coder.decode_from_stream(in_stream) for _ in range(size)]
return elements
def __repr__(self):
return 'PrimitiveArrayCoderImpl[%s]' % repr(self._elem_coder)
class MapCoderImpl(FieldCoderImpl):
"""
A coder for map value (dict with same type key and same type value).
"""
def __init__(self, key_coder: FieldCoderImpl, value_coder: FieldCoderImpl):
self._key_coder = key_coder
self._value_coder = value_coder
def encode_to_stream(self, map_value, out_stream):
out_stream.write_int32(len(map_value))
for key in map_value:
self._key_coder.encode_to_stream(key, out_stream)
value = map_value[key]
if value is None:
out_stream.write_byte(True)
else:
out_stream.write_byte(False)
self._value_coder.encode_to_stream(map_value[key], out_stream)
def decode_from_stream(self, in_stream, length=0):
size = in_stream.read_int32()
map_value = {}
for _ in range(size):
key = self._key_coder.decode_from_stream(in_stream)
is_null = in_stream.read_byte()
if is_null:
map_value[key] = None
else:
value = self._value_coder.decode_from_stream(in_stream)
map_value[key] = value
return map_value
def __repr__(self):
return 'MapCoderImpl[%s]' % ' : '.join([repr(self._key_coder), repr(self._value_coder)])
class TimeWindowCoderImpl(FieldCoderImpl):
"""
A coder for TimeWindow.
"""
def encode_to_stream(self, value, out_stream):
out_stream.write_int64(value.start)
out_stream.write_int64(value.end)
def decode_from_stream(self, in_stream, length=0):
start = in_stream.read_int64()
end = in_stream.read_int64()
return TimeWindow(start, end)
class CountWindowCoderImpl(FieldCoderImpl):
"""
A coder for CountWindow.
"""
def encode_to_stream(self, value, out_stream):
out_stream.write_int64(value.id)
def decode_from_stream(self, in_stream, length=0):
return CountWindow(in_stream.read_int64())
| apache-2.0 |
shahankhatch/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 142 | 4467 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
Winand/pandas | pandas/core/indexes/category.py | 3 | 27066 | import numpy as np
from pandas._libs import index as libindex
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.generic import ABCCategorical, ABCSeries
from pandas.core.dtypes.common import (
is_categorical_dtype,
_ensure_platform_int,
is_list_like,
is_interval_dtype,
is_scalar)
from pandas.core.common import (_asarray_tuplesafe,
_values_from_object)
from pandas.core.dtypes.missing import array_equivalent
from pandas.core.algorithms import take_1d
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.config import get_option
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core import accessor
import pandas.core.base as base
import pandas.core.missing as missing
import pandas.core.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(dict(target_klass='CategoricalIndex'))
class CategoricalIndex(Index, accessor.PandasDelegate):
"""
Immutable Index implementing an ordered, sliceable set. CategoricalIndex
represents a sparsely populated Index with an underlying Categorical.
Parameters
----------
data : array-like or Categorical, (1-dimensional)
categories : optional, array-like
categories for the CategoricalIndex
ordered : boolean,
designating if the categories are ordered
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
See Also
--------
Categorical, Index
"""
_typ = 'categoricalindex'
_engine_type = libindex.Int64Engine
_attributes = ['name']
def __new__(cls, data=None, categories=None, ordered=None, dtype=None,
copy=False, name=None, fastpath=False, **kwargs):
if fastpath:
return cls._simple_new(data, name=name, dtype=dtype)
if name is None and hasattr(data, 'name'):
name = data.name
if isinstance(data, ABCCategorical):
data = cls._create_categorical(cls, data, categories, ordered,
dtype)
elif isinstance(data, CategoricalIndex):
data = data._data
data = cls._create_categorical(cls, data, categories, ordered,
dtype)
else:
# don't allow scalars
# if data is None, then categories must be provided
if is_scalar(data):
if data is not None or categories is None:
cls._scalar_data_error(data)
data = []
data = cls._create_categorical(cls, data, categories, ordered)
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
def _create_from_codes(self, codes, categories=None, ordered=None,
name=None):
"""
*this is an internal non-public method*
create the correct categorical from codes
Parameters
----------
codes : new codes
categories : optional categories, defaults to existing
ordered : optional ordered attribute, defaults to existing
name : optional name attribute, defaults to existing
Returns
-------
CategoricalIndex
"""
from pandas.core.categorical import Categorical
if categories is None:
categories = self.categories
if ordered is None:
ordered = self.ordered
if name is None:
name = self.name
cat = Categorical.from_codes(codes, categories=categories,
ordered=self.ordered)
return CategoricalIndex(cat, name=name)
@staticmethod
def _create_categorical(self, data, categories=None, ordered=None,
dtype=None):
"""
*this is an internal non-public method*
create the correct categorical from data and the properties
Parameters
----------
data : data for new Categorical
categories : optional categories, defaults to existing
ordered : optional ordered attribute, defaults to existing
dtype : CategoricalDtype, defaults to existing
Returns
-------
Categorical
"""
if (isinstance(data, (ABCSeries, type(self))) and
is_categorical_dtype(data)):
data = data.values
if not isinstance(data, ABCCategorical):
if ordered is None and dtype is None:
ordered = False
from pandas.core.categorical import Categorical
data = Categorical(data, categories=categories, ordered=ordered,
dtype=dtype)
else:
from pandas.core.dtypes.dtypes import CategoricalDtype
if categories is not None:
data = data.set_categories(categories, ordered=ordered)
elif ordered is not None and ordered != data.ordered:
data = data.set_ordered(ordered)
if isinstance(dtype, CategoricalDtype):
# we want to silently ignore dtype='category'
data = data._set_dtype(dtype)
return data
@classmethod
def _simple_new(cls, values, name=None, categories=None, ordered=None,
dtype=None, **kwargs):
result = object.__new__(cls)
values = cls._create_categorical(cls, values, categories, ordered,
dtype=dtype)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
result._reset_identity()
return result
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, categories=None, ordered=None,
dtype=None, **kwargs):
# categories and ordered can't be part of attributes,
# as these are properties
# we want to reuse self.dtype if possible, i.e. neither are
# overridden.
if dtype is not None and (categories is not None or
ordered is not None):
raise TypeError("Cannot specify both `dtype` and `categories` "
"or `ordered`")
if categories is None and ordered is None:
dtype = self.dtype if dtype is None else dtype
return super(CategoricalIndex, self)._shallow_copy(
values=values, dtype=dtype, **kwargs)
if categories is None:
categories = self.categories
if ordered is None:
ordered = self.ordered
return super(CategoricalIndex, self)._shallow_copy(
values=values, categories=categories,
ordered=ordered, **kwargs)
def _is_dtype_compat(self, other):
"""
*this is an internal non-public method*
provide a comparison between the dtype of self and other (coercing if
needed)
Raises
------
TypeError if the dtypes are not compatible
"""
if is_categorical_dtype(other):
if isinstance(other, CategoricalIndex):
other = other._values
if not other.is_dtype_equal(self):
raise TypeError("categories must match existing categories "
"when appending")
else:
values = other
if not is_list_like(values):
values = [values]
other = CategoricalIndex(self._create_categorical(
self, other, categories=self.categories, ordered=self.ordered))
if not other.isin(values).all():
raise TypeError("cannot append a non-category item to a "
"CategoricalIndex")
return other
def equals(self, other):
"""
Determines if two CategorialIndex objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
try:
other = self._is_dtype_compat(other)
return array_equivalent(self._data, other)
except (TypeError, ValueError):
pass
return False
@property
def _formatter_func(self):
return self.categories._formatter_func
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
attrs = [
('categories',
ibase.default_pprint(self.categories,
max_seq_items=max_categories)),
('ordered', self.ordered)]
if self.name is not None:
attrs.append(('name', ibase.default_pprint(self.name)))
attrs.append(('dtype', "'%s'" % self.dtype.name))
max_seq_items = get_option('display.max_seq_items') or len(self)
if len(self) > max_seq_items:
attrs.append(('length', len(self)))
return attrs
@property
def inferred_type(self):
return 'categorical'
@property
def values(self):
""" return the underlying data, which is a Categorical """
return self._data
def get_values(self):
""" return the underlying data as an ndarray """
return self._data.get_values()
def tolist(self):
return self._data.tolist()
@property
def codes(self):
return self._data.codes
@property
def categories(self):
return self._data.categories
@property
def ordered(self):
return self._data.ordered
def _reverse_indexer(self):
return self._data._reverse_indexer()
@Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)
def __contains__(self, key):
hash(key)
if self.categories._defer_to_indexing:
return key in self.categories
return key in self.values
@Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
def contains(self, key):
hash(key)
if self.categories._defer_to_indexing:
return self.categories.contains(key)
return key in self.values
def __array__(self, dtype=None):
""" the array interface, return my values """
return np.array(self._data, dtype=dtype)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if is_interval_dtype(dtype):
from pandas import IntervalIndex
return IntervalIndex.from_intervals(np.array(self))
return super(CategoricalIndex, self).astype(dtype=dtype, copy=copy)
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
return self._data.codes == -1
@Appender(ibase._index_shared_docs['fillna'])
def fillna(self, value, downcast=None):
self._assert_can_do_op(value)
return CategoricalIndex(self._data.fillna(value), name=self.name)
def argsort(self, *args, **kwargs):
return self.values.argsort(*args, **kwargs)
@cache_readonly
def _engine(self):
# we are going to look things up with the codes themselves
return self._engine_type(lambda: self.codes.astype('i8'), len(self))
# introspection
@cache_readonly
def is_unique(self):
return not self.duplicated().any()
@property
def is_monotonic_increasing(self):
return Index(self.codes).is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
return Index(self.codes).is_monotonic_decreasing
@Appender(base._shared_docs['unique'] % _index_doc_kwargs)
def unique(self):
result = base.IndexOpsMixin.unique(self)
# CategoricalIndex._shallow_copy uses keeps original categories
# and ordered if not otherwise specified
return self._shallow_copy(result, categories=result.categories,
ordered=result.ordered)
@Appender(base._shared_docs['duplicated'] % _index_doc_kwargs)
def duplicated(self, keep='first'):
from pandas._libs.hashtable import duplicated_int64
codes = self.codes.astype('i8')
return duplicated_int64(codes, keep)
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self.astype('object')
def get_loc(self, key, method=None):
"""
Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
method : {None}
* default: exact matches only.
Returns
-------
loc : int if unique index, slice if monotonic index, else mask
Examples
---------
>>> unique_index = pd.CategoricalIndex(list('abc'))
>>> unique_index.get_loc('b')
1
>>> monotonic_index = pd.CategoricalIndex(list('abbc'))
>>> monotonic_index.get_loc('b')
slice(1, 3, None)
>>> non_monotonic_index = p.dCategoricalIndex(list('abcb'))
>>> non_monotonic_index.get_loc('b')
array([False, True, False, True], dtype=bool)
"""
codes = self.categories.get_loc(key)
if (codes == -1):
raise KeyError(key)
return self._engine.get_loc(codes)
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
try:
k = _values_from_object(key)
k = self._convert_scalar_indexer(k, kind='getitem')
indexer = self.get_loc(k)
return series.iloc[indexer]
except (KeyError, TypeError):
pass
# we might be a positional inexer
return super(CategoricalIndex, self).get_value(series, key)
def _can_reindex(self, indexer):
""" always allow reindexing """
pass
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
values = np.where(cond, self.values, other)
from pandas.core.categorical import Categorical
cat = Categorical(values,
categories=self.categories,
ordered=self.ordered)
return self._shallow_copy(cat, **self._get_attributes_dict())
def reindex(self, target, method=None, level=None, limit=None,
tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
if method is not None:
raise NotImplementedError("argument method is not implemented for "
"CategoricalIndex.reindex")
if level is not None:
raise NotImplementedError("argument level is not implemented for "
"CategoricalIndex.reindex")
if limit is not None:
raise NotImplementedError("argument limit is not implemented for "
"CategoricalIndex.reindex")
target = ibase._ensure_index(target)
if not is_categorical_dtype(target) and not target.is_unique:
raise ValueError("cannot reindex with a non-unique indexer")
indexer, missing = self.get_indexer_non_unique(np.array(target))
if len(self.codes):
new_target = self.take(indexer)
else:
new_target = target
# filling in missing if needed
if len(missing):
cats = self.categories.get_indexer(target)
if (cats == -1).any():
# coerce to a regular index here!
result = Index(np.array(self), name=self.name)
new_target, indexer, _ = result._reindex_non_unique(
np.array(target))
else:
codes = new_target.codes.copy()
codes[indexer == -1] = cats[missing]
new_target = self._create_from_codes(codes)
# we always want to return an Index type here
# to be consistent with .reindex for other index types (e.g. they don't
# coerce based on the actual values, only on the dtype)
# unless we had an inital Categorical to begin with
# in which case we are going to conform to the passed Categorical
new_target = np.asarray(new_target)
if is_categorical_dtype(target):
new_target = target._shallow_copy(new_target, name=self.name)
else:
new_target = Index(new_target, name=self.name)
return new_target, indexer
def _reindex_non_unique(self, target):
""" reindex from a non-unique; which CategoricalIndex's are almost
always
"""
new_target, indexer = self.reindex(target)
new_indexer = None
check = indexer == -1
if check.any():
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[check] = -1
cats = self.categories.get_indexer(target)
if not (cats == -1).any():
# .reindex returns normal Index. Revert to CategoricalIndex if
# all targets are included in my categories
new_target = self._shallow_copy(new_target)
return new_target, indexer, new_indexer
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = ibase._ensure_index(target)
if self.is_unique and self.equals(target):
return np.arange(len(self), dtype='intp')
if method == 'pad' or method == 'backfill':
raise NotImplementedError("method='pad' and method='backfill' not "
"implemented yet for CategoricalIndex")
elif method == 'nearest':
raise NotImplementedError("method='nearest' not implemented yet "
'for CategoricalIndex')
if (isinstance(target, CategoricalIndex) and
self.values.is_dtype_equal(target)):
# we have the same codes
codes = target.codes
else:
if isinstance(target, CategoricalIndex):
code_indexer = self.categories.get_indexer(target.categories)
codes = take_1d(code_indexer, target.codes, fill_value=-1)
else:
codes = self.categories.get_indexer(target)
indexer, _ = self._engine.get_indexer_non_unique(codes)
return _ensure_platform_int(indexer)
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = ibase._ensure_index(target)
if isinstance(target, CategoricalIndex):
target = target.categories
codes = self.categories.get_indexer(target)
indexer, missing = self._engine.get_indexer_non_unique(codes)
return _ensure_platform_int(indexer), missing
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
if self.categories._defer_to_indexing:
return self.categories._convert_scalar_indexer(key, kind=kind)
return super(CategoricalIndex, self)._convert_scalar_indexer(
key, kind=kind)
@Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
# Return our indexer or raise if all of the values are not included in
# the categories
if self.categories._defer_to_indexing:
indexer = self.categories._convert_list_indexer(keyarr, kind=kind)
return Index(self.codes).get_indexer_for(indexer)
indexer = self.categories.get_indexer(np.asarray(keyarr))
if (indexer == -1).any():
raise KeyError(
"a list-indexer must only "
"include values that are "
"in the categories")
return self.get_indexer(keyarr)
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
keyarr = _asarray_tuplesafe(keyarr)
if self.categories._defer_to_indexing:
return keyarr
return self._shallow_copy(keyarr)
@Appender(_index_shared_docs['_convert_index_indexer'])
def _convert_index_indexer(self, keyarr):
return self._shallow_copy(keyarr)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = _ensure_platform_int(indices)
taken = self._assert_take_fillable(self.codes, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=-1)
return self._create_from_codes(taken)
def is_dtype_equal(self, other):
return self._data.is_dtype_equal(other)
take_nd = take
def map(self, mapper):
"""Apply mapper function to its categories (not codes).
Parameters
----------
mapper : callable
Function to be applied. When all categories are mapped
to different categories, the result will be a CategoricalIndex
which has the same order property as the original. Otherwise,
the result will be a Index.
Returns
-------
applied : CategoricalIndex or Index
"""
return self._shallow_copy_with_infer(self.values.map(mapper))
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
return self._create_from_codes(np.delete(self.codes, loc))
def insert(self, loc, item):
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
Raises
------
ValueError if the item is not in the categories
"""
code = self.categories.get_indexer([item])
if (code == -1):
raise TypeError("cannot insert an item into a CategoricalIndex "
"that is not already an existing category")
codes = self.codes
codes = np.concatenate((codes[:loc], code, codes[loc:]))
return self._create_from_codes(codes)
def _concat(self, to_concat, name):
# if calling index is category, don't check dtype of others
return CategoricalIndex._concat_same_dtype(self, to_concat, name)
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class
ValueError if other is not in the categories
"""
to_concat = [self._is_dtype_compat(c) for c in to_concat]
codes = np.concatenate([c.codes for c in to_concat])
result = self._create_from_codes(codes, name=name)
# if name is None, _create_from_codes sets self.name
result.name = name
return result
def _codes_for_groupby(self, sort):
""" Return a Categorical adjusted for groupby """
return self.values._codes_for_groupby(sort)
@classmethod
def _add_comparison_methods(cls):
""" add in comparison methods """
def _make_compare(op):
def _evaluate_compare(self, other):
# if we have a Categorical type, then must have the same
# categories
if isinstance(other, CategoricalIndex):
other = other._values
elif isinstance(other, Index):
other = self._create_categorical(
self, other._values, categories=self.categories,
ordered=self.ordered)
if isinstance(other, (ABCCategorical, np.ndarray,
ABCSeries)):
if len(self.values) != len(other):
raise ValueError("Lengths must match to compare")
if isinstance(other, ABCCategorical):
if not self.values.is_dtype_equal(other):
raise TypeError("categorical index comparisions must "
"have the same categories and ordered "
"attributes")
return getattr(self.values, op)(other)
return _evaluate_compare
cls.__eq__ = _make_compare('__eq__')
cls.__ne__ = _make_compare('__ne__')
cls.__lt__ = _make_compare('__lt__')
cls.__gt__ = _make_compare('__gt__')
cls.__le__ = _make_compare('__le__')
cls.__ge__ = _make_compare('__ge__')
def _delegate_method(self, name, *args, **kwargs):
""" method delegation to the ._values """
method = getattr(self._values, name)
if 'inplace' in kwargs:
raise ValueError("cannot use inplace with CategoricalIndex")
res = method(*args, **kwargs)
if is_scalar(res):
return res
return CategoricalIndex(res, name=self.name)
@classmethod
def _add_accessors(cls):
""" add in Categorical accessor methods """
from pandas.core.categorical import Categorical
CategoricalIndex._add_delegate_accessors(
delegate=Categorical, accessors=["rename_categories",
"reorder_categories",
"add_categories",
"remove_categories",
"remove_unused_categories",
"set_categories",
"as_ordered", "as_unordered",
"min", "max"],
typ='method', overwrite=True)
CategoricalIndex._add_numeric_methods_add_sub_disabled()
CategoricalIndex._add_numeric_methods_disabled()
CategoricalIndex._add_logical_methods_disabled()
CategoricalIndex._add_comparison_methods()
CategoricalIndex._add_accessors()
| bsd-3-clause |
luo66/scikit-learn | sklearn/linear_model/passive_aggressive.py | 97 | 10879 | # Authors: Rob Zinkov, Mathieu Blondel
# License: BSD 3 clause
from .stochastic_gradient import BaseSGDClassifier
from .stochastic_gradient import BaseSGDRegressor
from .stochastic_gradient import DEFAULT_EPSILON
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=False
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
loss : string, optional
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDClassifier
Perceptron
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="hinge", n_jobs=1, random_state=None,
warm_start=False, class_weight=None):
BaseSGDClassifier.__init__(self,
penalty=None,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=1.0,
warm_start=warm_start,
class_weight=class_weight,
n_jobs=n_jobs)
self.C = C
self.loss = loss
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight == 'balanced':
raise ValueError("class_weight 'balanced' is not supported for "
"partial_fit. For 'balanced' weights, use "
"`sklearn.utils.compute_class_weight` with "
"`class_weight='balanced'`. In place of y you "
"can use a large enough subset of the full "
"training set target to properly estimate the "
"class frequency distributions. Pass the "
"resulting weights as the class_weight "
"parameter.")
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr, n_iter=1,
classes=classes, sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr,
coef_init=coef_init, intercept_init=intercept_init)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
epsilon : float
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
loss : string, optional
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDRegressor
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="epsilon_insensitive",
epsilon=DEFAULT_EPSILON, random_state=None, warm_start=False):
BaseSGDRegressor.__init__(self,
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=1.0,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start)
self.C = C
self.loss = loss
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr, n_iter=1,
sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init)
| bsd-3-clause |
edermartioli/fotometria | plot.py | 1 | 2535 | # -*- coding: iso-8859-1 -*-
"""
Created on Wed Jul 12, 2017
@author: Eder Martioli & Janderson Oliveira
Laboratorio Nacional de Astrofisica, Brazil
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
from optparse import OptionParser
from scipy.stats import binned_statistic
parser = OptionParser()
parser.add_option("-i", "--input", dest="input", help='Input light curve',type='string',default="")
parser.add_option("-v", action="store_true", dest="verbose", help="verbose",default=0)
try:
options,args = parser.parse_args(sys.argv[1:])
except:
print "Error: check usage with runPhotometry.py -h ";sys.exit(1);
if options.verbose:
print 'Input light curve: ', options.input
depth = 0.0183
t,x,y,f,ef,x1,y1,f1,ef1,x2,y2,f2,ef2,x3,y3,f3,ef3,x4,y4,f4,ef4,x5,y5,f5,ef5,x6,y6,f6,ef6,x7,y7,f7,ef7 = np.loadtxt(options.input,delimiter=' ', usecols=(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32), unpack=True)
t = t - t[0]
#df =f/(f1+f2+f3+f4+f5+f6+f7)
df =f/(f1+f2+f4+f5)
#dfref = f
dfref = y
df1 = f/f1
df2 = f/f2
df3 = f/f3
df4 = f/f4
df5 = f/f5
df6 = f/f6
df7 = f/f7
s = np.std(df)
s1 = np.std(df1)
s2 = np.std(df2)
s3 = np.std(df3)
s4 = np.std(df4)
s5 = np.std(df5)
s6 = np.std(df6)
s7 = np.std(df7)
print "----------------------------"
print "All stars - Precision:", s*100
print "----------------------------"
print "Star 1 - Precision:", s1*100
print "Star 2 - Precision:", s2*100
print "Star 3 - Precision:", s3*100
print "Star 4 - Precision:", s4*100
print "Star 5 - Precision:", s5*100
print "Star 6 - Precision:", s6*100
print "Star 7 - Precision:", s7*100
print "----------------------------"
snr = depth/s
snr1 = depth/s1
snr2 = depth/s2
snr3 = depth/s3
snr4 = depth/s4
snr5 = depth/s5
snr6 = depth/s6
snr7 = depth/s7
print "------------------------"
print "All stars - SNR:", snr
print "------------------------"
print "Star 1 - SNR:", snr1
print "Star 2 - SNR:", snr2
print "Star 3 - SNR:", snr3
print "Star 4 - SNR:", snr4
print "Star 5 - SNR:", snr5
print "Star 6 - SNR:", snr6
print "Star 7 - SNR:", snr7
print "------------------------"
dfn = df / np.median(df)
plt.plot(t,dfn,'.')
nbins = 70
statistic='median'
binnedTime = binned_statistic(t, t, statistic=statistic, bins=nbins)[0]
binnedFlux = binned_statistic(t, dfn, statistic=statistic, bins=nbins)[0]
plt.plot(binnedTime,binnedFlux,'o-', lw=2)
plt.ylabel('flux')
plt.xlabel('time (d)')
#dfrefn = dfref / np.median(dfref)
#plt.plot(t,dfrefn,'-.')
plt.show()
| gpl-3.0 |
roxyboy/scikit-learn | sklearn/feature_selection/variance_threshold.py | 238 | 2594 | # Author: Lars Buitinck <[email protected]>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
sixy6e/geospatial-hdf5 | examples/append_reference_band_example.py | 1 | 3853 | #!/usr/bin/env python
import numpy
from scipy import ndimage
import pandas
from geoh5 import kea
from geoh5.kea import common as kc
# https://github.com/sixy6e/image-processing
from image_processing.segmentation import Segments
"""
Once completed open the file in tuiview to see the colourised segments
and the raster attribute table.
"""
def main():
"""
Create a segmented array.
Compute basic stats for each segment:
(min, max, mean, standard deviation, total, area)
Write the segmented image and the raster attribute table.
Add another raster band to the dataset as a linked/reference
dataset.
Compute basic stats for the same segments, but using different
input data, and save the new raster attribute table to the new
`reference/linked` band.
"""
# data dimensions
dims = (1000, 1000)
# create some random data and segment via value > 5000
seg_data = numpy.random.randint(0, 10001, dims).astype('uint32')
seg_data, nlabels = ndimage.label(seg_data > 5000)
# create some random data to calculate stats against
data = numpy.random.ranf(dims)
# create a segments class object
seg = Segments(seg_data, include_zero=True)
# retrieve basic stats (min, max, mean, standard deviation, total, area)
stats_table = seg.basic_statistics(data, dataframe=True)
stats_table.set_index("Segment_IDs", inplace=True)
# join via segment id, specifying 'outer' will account for empty segments
df = pandas.DataFrame({"Histogram": seg.histogram})
stats_table = df.join(stats_table, how='outer')
nrows = stats_table.shape[0]
# assign random colours to each segment
stats_table.insert(1, "Red", numpy.random.randint(0, 256, (nrows)))
stats_table.insert(2, "Green", numpy.random.randint(0, 256, (nrows)))
stats_table.insert(3, "Blue", numpy.random.randint(0, 256, (nrows)))
stats_table.insert(4, "Alpha", 255)
# define 1 output band and add another band later
kwargs = {'width': dims[1],
'height': dims[0],
'count': 1,
'compression': 4,
'chunks': (100, 100),
'blocksize': 100,
'dtype': seg_data.dtype.name}
with kea.open('add-reference-band-example.kea', 'w', **kwargs) as src:
src.write(seg_data, 1)
# define the layer type as thematic (labelled, classified etc)
src.write_layer_type(1, kc.LayerType.thematic)
# write the stats table as an attribute table
usage = {"Red": "Red",
"Green": "Green",
"Blue": "Blue",
"Alpha": "Alpha",
"Histogram": "PixelCount"}
src.write_rat(stats_table, 1, usage=usage)
# add a new image band, but as a reference to band 1
src.add_image_band(band_name='Reference to band 1', link=1)
src.write_layer_type(2, kc.LayerType.thematic)
# create some random data to calculate stats against
data = numpy.random.ranf(dims)
# retrieve basic stats (min, max, mean, standard deviation, total, area)
stats_table2 = seg.basic_statistics(data, dataframe=True)
stats_table2.set_index("Segment_IDs", inplace=True)
# join via segment id, specifying 'outer' will account for empty segments
df = pandas.DataFrame({"Histogram": seg.histogram})
stats_table2 = df.join(stats_table2, how='outer')
# insert colors
stats_table2.insert(1, "Red", stats_table["Red"])
stats_table2.insert(2, "Green", stats_table["Green"])
stats_table2.insert(3, "Blue", stats_table["Blue"])
stats_table2.insert(4, "Alpha", 255)
# write the rat to the newly created band 2
src.write_rat(stats_table2, 2, usage=usage)
if __name__ == '__main__':
main()
| mit |
lin-credible/scikit-learn | examples/covariance/plot_covariance_estimation.py | 250 | 5070 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
evidation-health/bokeh | examples/glyphs/anscombe.py | 39 | 2945 | from __future__ import print_function
import numpy as np
import pandas as pd
from bokeh.browserlib import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models.glyphs import Circle, Line
from bokeh.models import (
ColumnDataSource, Grid, GridPlot, LinearAxis, Plot, Range1d
)
from bokeh.resources import INLINE
raw_columns=[
[10.0, 8.04, 10.0, 9.14, 10.0, 7.46, 8.0, 6.58],
[8.0, 6.95, 8.0, 8.14, 8.0, 6.77, 8.0, 5.76],
[13.0, 7.58, 13.0, 8.74, 13.0, 12.74, 8.0, 7.71],
[9.0, 8.81, 9.0, 8.77, 9.0, 7.11, 8.0, 8.84],
[11.0, 8.33, 11.0, 9.26, 11.0, 7.81, 8.0, 8.47],
[14.0, 9.96, 14.0, 8.10, 14.0, 8.84, 8.0, 7.04],
[6.0, 7.24, 6.0, 6.13, 6.0, 6.08, 8.0, 5.25],
[4.0, 4.26, 4.0, 3.10, 4.0, 5.39, 19.0, 12.5],
[12.0, 10.84, 12.0, 9.13, 12.0, 8.15, 8.0, 5.56],
[7.0, 4.82, 7.0, 7.26, 7.0, 6.42, 8.0, 7.91],
[5.0, 5.68, 5.0, 4.74, 5.0, 5.73, 8.0, 6.89]]
quartet = pd.DataFrame(data=raw_columns, columns=
['Ix','Iy','IIx','IIy','IIIx','IIIy','IVx','IVy'])
circles_source = ColumnDataSource(
data = dict(
xi = quartet['Ix'],
yi = quartet['Iy'],
xii = quartet['IIx'],
yii = quartet['IIy'],
xiii = quartet['IIIx'],
yiii = quartet['IIIy'],
xiv = quartet['IVx'],
yiv = quartet['IVy'],
)
)
x = np.linspace(-0.5, 20.5, 10)
y = 3 + 0.5 * x
lines_source = ColumnDataSource(data=dict(x=x, y=y))
xdr = Range1d(start=-0.5, end=20.5)
ydr = Range1d(start=-0.5, end=20.5)
def make_plot(title, xname, yname):
plot = Plot(
x_range=xdr, y_range=ydr,
title=title, plot_width=400, plot_height=400,
border_fill='white', background_fill='#e9e0db'
)
xaxis = LinearAxis(axis_line_color=None)
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis(axis_line_color=None)
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
line = Line(x='x', y='y', line_color="#666699", line_width=2)
plot.add_glyph(lines_source, line)
circle = Circle(
x=xname, y=yname, size=12,
fill_color="#cc6633", line_color="#cc6633", fill_alpha=0.5
)
plot.add_glyph(circles_source, circle)
return plot
#where will this comment show up
I = make_plot('I', 'xi', 'yi')
II = make_plot('II', 'xii', 'yii')
III = make_plot('III', 'xiii', 'yiii')
IV = make_plot('IV', 'xiv', 'yiv')
grid = GridPlot(children=[[I, II], [III, IV]])
doc = Document()
doc.add(grid)
if __name__ == "__main__":
filename = "anscombe.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Anscombe's Quartet"))
print("Wrote %s" % filename)
view(filename)
| bsd-3-clause |
lyuboraykov/flight-genie | flight_genie/main.py | 1 | 5465 | from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
from flight_genie.flight import Flight
from flight_genie.utils import (
get_names_values_from_csv,
get_pairs_list_from_names_values,
get_relative_error,
get_relative_error_success_count,
get_median_of_list,
get_avg_of_list,
get_value_by_key_in_pairs_list,
print_comparable_flights
)
PRICE_USD = 'priceusd'
BIN_SIZE = 128
K_NEIGHBORS = 2
def get_flights_list_from_csv(data_csv,
flight_constructor):
"""Get a list of flights from csv file"""
names, values = get_names_values_from_csv(data_csv)
pairs_list = get_pairs_list_from_names_values(names, values)
flights = []
for p in pairs_list:
flight = flight_constructor(p)
is_flight_away = float(flight.get_attribute('daystodeparture')) >= 30
is_one_way_flight = flight.get_attribute('inbounddate').strip() == ''
if is_one_way_flight:
continue
flights.append(flight)
return flights
def get_KD_tree(flights_dataset):
"""Get the KD tree from the csv
Used mostly with training_csv
"""
neigh = NearestNeighbors(1,
algorithm='auto',
radius=1.0,
leaf_size=30,
p=1,
metric='cityblock')
neigh.fit(list(flights_dataset))
return neigh
def parse_csv(training_csv, testing_csv):
"""Return a tuple of lists - train flights, test flights"""
training_flights = get_flights_list_from_csv(training_csv,
Flight)
testing_flights = get_flights_list_from_csv(testing_csv,
Flight.get_from_core_data)
return training_flights, testing_flights
def predicted_and_real_flights_prices(training_flights, testing_flights):
"""Return generator over pairs of predicted and real prices for flights"""
training_flights_dataset = [f.to_numerical_list([PRICE_USD])
for f in training_flights]
neigh_tree = get_KD_tree(training_flights_dataset)
testing_flights_dataset = [f.to_numerical_list([PRICE_USD])
for f in testing_flights]
for i, flight in enumerate(testing_flights_dataset):
predicted_ids = neigh_tree.kneighbors([flight],
K_NEIGHBORS,
return_distance=False)[0]
predicted_prices = sorted([
training_flights[i].get_price_per_ticket()
for i in predicted_ids
])
# TODO play with K_NEIGHBORS and median vs avg
# predicted_price = get_median_of_list(predicted_prices)
predicted_price = get_avg_of_list(predicted_prices)
real_price = testing_flights[i].get_price_per_ticket()
if get_relative_error(predicted_price, real_price) > 5:
print_comparable_flights(training_flights[predicted_ids[0]],
testing_flights[i])
yield predicted_price, real_price
def generate_plots(training_csv, testing_csv):
"""Make all necessary plots from testing and training CSVs"""
training_flights, testing_flights = parse_csv(training_csv,
testing_csv)
relative_errors = []
prices_pair = predicted_and_real_flights_prices(training_flights,
testing_flights)
for predicted_price, real_price in prices_pair:
relative_error = get_relative_error(float(predicted_price),
float(real_price))
relative_errors.append(relative_error * 100)
plt.hist(relative_errors, bins=BIN_SIZE)
plt.ylabel('Count')
plt.xlabel('Relative error %')
plt.show()
def main(training_csv, testing_csv):
"""Print all predicted, real prices and relative errors"""
training_flights, testing_flights = parse_csv(training_csv,
testing_csv)
prices_pair = predicted_and_real_flights_prices(training_flights,
testing_flights)
relative_errors = []
for predicted_price, real_price in prices_pair:
relative_errors.append(get_relative_error(float(predicted_price),
float(real_price)))
percentage_of_all = 0
current_perc = 5
while percentage_of_all < 100:
success_count = get_relative_error_success_count(relative_errors,
current_perc / 100)
print('Flights predicted below {}% err - {}'.format(current_perc,
success_count),
end=' ')
percentage_of_all = (success_count / len(relative_errors)) * 100
print('This is {}% of all'.format(percentage_of_all))
current_perc += 5
def plot_data():
raise NotImplemented()
def linear_regression():
raise NotImplemented()
def random_forest():
raise NotImplemented()
def nearest_neighbour():
raise NotImplemented()
def time_series():
raise NotImplemented()
if __name__ == '__main__':
main('training_data_reworked_prices.csv', 'test_data_reworked_prices.csv')
| mit |
abimannans/scikit-learn | sklearn/metrics/tests/test_ranking.py | 127 | 40813 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
matthewwardrop/formulaic | formulaic/materializers/transforms/encode_categorical.py | 1 | 2236 | import warnings
from collections import OrderedDict
import numpy
import pandas
import scipy.sparse as spsparse
from formulaic.errors import DataMismatchWarning
from formulaic.utils.sparse import categorical_encode_series_to_sparse_csc_matrix
from formulaic.utils.stateful_transforms import stateful_transform
@stateful_transform
def encode_categorical(data, reduced_rank=False, spans_intercept=True, output=None, _state=None, _spec=None):
# TODO: Add support for specifying contrast matrix / etc
output = output or _spec.output or 'pandas'
if output == 'sparse':
data = numpy.array(data)
data = data.reshape((data.size, ))
categories, encoded = categorical_encode_series_to_sparse_csc_matrix(data, reduced_rank=reduced_rank)
else:
data = pandas.Series(data).astype('category')
categories = list(data.cat.categories)
encoded = dict(pandas.get_dummies(data, drop_first=reduced_rank))
# Update state
if 'categories' in _state:
extra_categories = set(categories).difference(_state['categories'])
if extra_categories:
warnings.warn(f"Data has categories that were not seen in original dataset: {extra_categories}. This will likely skew the results of your analyses.", DataMismatchWarning)
for category in extra_categories:
del encoded[category]
missing_categories = set(_state['categories']).difference(categories)
if missing_categories:
for missing_category in missing_categories:
if output == 'sparse':
encoded[missing_category] = spsparse.csc_matrix((data.shape[0], 1))
else:
encoded[missing_category] = pandas.Series(numpy.zeros(data.shape[0]))
encoded = OrderedDict(sorted(encoded.items(), key=lambda x: x[0]))
else:
_state['categories'] = categories
encoded.update({
'__kind__': 'categorical',
'__spans_intercept__': spans_intercept and not reduced_rank,
'__drop_field__': _state['categories'][0] if spans_intercept and not reduced_rank else None,
'__format__': "{name}[T.{field}]",
'__encoded__': True,
})
return encoded
| mit |
PyClass/PyClassLessons | instructors/need-rework/6_socrata_matplotlib_workshop/date-demo.py | 3 | 1667 | #!/usr/bin/env python
"""
Show how to make date plots in matplotlib using date tick locators and
formatters. See major_minor_demo1.py for more information on
controlling major and minor ticks
All matplotlib date plotting is done by converting date instances into
days since the 0001-01-01 UTC. The conversion, tick locating and
formatting is done behind the scenes so this is most transparent to
you. The dates module provides several converter functions date2num
and num2date
"""
import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.cbook as cbook
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
yearsFmt = mdates.DateFormatter('%Y')
# load a numpy record array from yahoo csv data with fields date,
# open, close, volume, adj_close from the mpl-data/example directory.
# The record array stores python datetime.date as an object array in
# the date column
datafile = cbook.get_sample_data('goog.npy')
r = np.load(datafile).view(np.recarray)
fig, ax = plt.subplots()
ax.plot(r.date, r.adj_close)
# format the ticks
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
datemin = datetime.date(r.date.min().year, 1, 1)
datemax = datetime.date(r.date.max().year+1, 1, 1)
ax.set_xlim(datemin, datemax)
# format the coords message box
def price(x): return '$%1.2f'%x
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.format_ydata = price
ax.grid(True)
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
fig.autofmt_xdate()
plt.show()
| mit |
rhyolight/nupic.research | projects/combined_sequences/generate_plots.py | 4 | 23002 | # Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file plots the results obtained from combined_sequences.py.
"""
import cPickle
import matplotlib.pyplot as plt
from optparse import OptionParser
import os
import sys
from collections import defaultdict
import numpy
import matplotlib as mpl
import traceback
mpl.rcParams['pdf.fonttype'] = 42
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Arial']})
def plotOneInferenceRun(stats,
fields,
basename,
itemType="",
plotDir="plots",
ymax=100,
trialNumber=0):
"""
Plots individual inference runs.
"""
if not os.path.exists(plotDir):
os.makedirs(plotDir)
plt.figure()
# plot request stats
for field in fields:
fieldKey = field[0] + " C0"
plt.plot(stats[fieldKey], marker='+', label=field[1])
# format
plt.legend(loc="upper right")
plt.xlabel("Input number")
plt.xticks(range(stats["numSteps"]))
plt.ylabel("Number of cells")
plt.ylim(-5, ymax)
plt.title("Activity while inferring {}".format(itemType))
# save
relPath = "{}_exp_{}.pdf".format(basename, trialNumber)
path = os.path.join(plotDir, relPath)
plt.savefig(path)
plt.close()
def plotMultipleInferenceRun(stats,
fields,
basename,
plotDir="plots"):
"""
Plots individual inference runs.
"""
if not os.path.exists(plotDir):
os.makedirs(plotDir)
plt.figure()
colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y']
# plot request stats
for i, field in enumerate(fields):
fieldKey = field[0] + " C0"
trace = []
for s in stats:
trace += s[fieldKey]
plt.plot(trace, label=field[1], color=colorList[i])
# format
plt.legend(loc="upper right")
plt.xlabel("Input number")
plt.xticks(range(0, len(stats)*stats[0]["numSteps"]+1,5))
plt.ylabel("Number of cells")
plt.ylim(-5, 55)
plt.title("Inferring combined sensorimotor and temporal sequence stream")
# save
relPath = "{}_exp_combined.pdf".format(basename)
path = os.path.join(plotDir, relPath)
plt.savefig(path)
plt.close()
def plotAccuracyDuringSensorimotorInference(resultsFig5B, title="", yaxis=""):
"""
Plot accuracy vs number of features
"""
# Read out results and get the ranges we want.
with open(resultsFig5B, "rb") as f:
results = cPickle.load(f)
objectRange = []
featureRange = []
for r in results:
if r["numObjects"] not in objectRange: objectRange.append(r["numObjects"])
if r["numFeatures"] not in featureRange: featureRange.append(r["numFeatures"])
objectRange.sort()
featureRange.sort()
print "objectRange=",objectRange
print "featureRange=",featureRange
########################################################################
#
# Accumulate the TM accuracies for each condition in a list and compute mean
# and stdeviations
# For L2 we average across all feature ranges
accuracies = defaultdict(list)
l2Accuracies = defaultdict(list)
for r in results:
accuracies[(r["numObjects"], r["numFeatures"])].append(r["objectCorrectSparsityTM"])
l2Accuracies[r["numObjects"]].append(r["objectAccuracyL2"])
# meanAccuracy[o,f] = accuracy of TM with o objects and f unique features.
meanAccuracy = numpy.zeros((max(objectRange)+1, max(featureRange) + 1))
stdev = numpy.zeros((max(objectRange)+1, max(featureRange) + 1))
meanL2Accuracy = numpy.zeros(max(objectRange)+1)
stdevL2 = numpy.zeros(max(objectRange)+1)
for o in objectRange:
for f in featureRange:
a = numpy.array(accuracies[(o, f)])
meanAccuracy[o, f] = 100.0*a.mean()
stdev[o, f] = 100.0*a.std()
# Accuracies for L2
a = numpy.array(l2Accuracies[o])
meanL2Accuracy[o] = 100.0*a.mean()
stdevL2[o] = 100.0*a.std()
# Create the plot.
plt.figure()
plotPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"plots", "accuracy_during_sensorimotor_inference.pdf")
# Plot each curve
legendList = []
colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y']
for i in range(len(featureRange)):
f = featureRange[i]
legendList.append('Sequence layer, feature pool size: {}'.format(f))
plt.errorbar(objectRange, meanAccuracy[objectRange, f],
yerr=stdev[objectRange, f],
color=colorList[i])
plt.errorbar(objectRange, meanL2Accuracy[objectRange],
yerr=stdevL2[objectRange],
color=colorList[len(featureRange)])
legendList.append('Sensorimotor layer')
# format
plt.legend(legendList, bbox_to_anchor=(0., 0.6, 1., .102), loc="right", prop={'size':10})
plt.xlabel("Number of objects")
plt.ylim(-10.0, 110.0)
plt.ylabel(yaxis)
plt.title(title)
# save
plt.savefig(plotPath)
plt.close()
def plotAccuracyDuringDecrementChange(results, title="", yaxis=""):
"""
Plot accuracy vs decrement value
"""
decrementRange = []
featureRange = []
for r in results:
if r["basalPredictedSegmentDecrement"] not in decrementRange:
decrementRange.append(r["basalPredictedSegmentDecrement"])
if r["numFeatures"] not in featureRange:
featureRange.append(r["numFeatures"])
decrementRange.sort()
featureRange.sort()
print decrementRange
print featureRange
########################################################################
#
# Accumulate all the results per column in a convergence array.
#
# accuracy[o,f] = accuracy with o objects in training
# and f unique features.
accuracy = numpy.zeros((len(featureRange), len(decrementRange)))
TMAccuracy = numpy.zeros((len(featureRange), len(decrementRange)))
totals = numpy.zeros((len(featureRange), len(decrementRange)))
for r in results:
dec = r["basalPredictedSegmentDecrement"]
nf = r["numFeatures"]
accuracy[featureRange.index(nf), decrementRange.index(dec)] += r["objectAccuracyL2"]
TMAccuracy[featureRange.index(nf), decrementRange.index(dec)] += r["sequenceCorrectClassificationsTM"]
totals[featureRange.index(nf), decrementRange.index(dec)] += 1
for i,f in enumerate(featureRange):
print i, f, accuracy[i] / totals[i]
print i, f, TMAccuracy[i] / totals[i]
print
# ########################################################################
# #
# # Create the plot.
# plt.figure()
# plotPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
# "plots", "accuracy_during_sensorimotor_inference.pdf")
#
# # Plot each curve
# legendList = []
# colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y']
#
# for i in range(len(featureRange)):
# f = featureRange[i]
# legendList.append('Sequence layer, feature pool size: {}'.format(f))
# plt.plot(objectRange, accuracy[objectRange, f], color=colorList[i])
#
# plt.plot(objectRange, [100] * len(objectRange),
# color=colorList[len(featureRange)])
# legendList.append('Sensorimotor layer')
#
# # format
# plt.legend(legendList, bbox_to_anchor=(0., 0.6, 1., .102), loc="right", prop={'size':10})
# plt.xlabel("Number of objects")
# plt.ylim(-10.0, 110.0)
# plt.ylabel(yaxis)
# plt.title(title)
#
# # save
# plt.savefig(plotPath)
# plt.close()
def plotAccuracyAndMCsDuringDecrementChange(results, title="", yaxis=""):
"""
Plot accuracy vs decrement value
"""
decrementRange = []
mcRange = []
for r in results:
if r["basalPredictedSegmentDecrement"] not in decrementRange:
decrementRange.append(r["basalPredictedSegmentDecrement"])
if r["inputSize"] not in mcRange:
mcRange.append(r["inputSize"])
decrementRange.sort()
mcRange.sort()
print decrementRange
print mcRange
########################################################################
#
# Accumulate all the results per column in a convergence array.
#
# accuracy[o,f] = accuracy with o objects in training
# and f unique features.
accuracy = numpy.zeros((len(mcRange), len(decrementRange)))
TMAccuracy = numpy.zeros((len(mcRange), len(decrementRange)))
totals = numpy.zeros((len(mcRange), len(decrementRange)))
for r in results:
dec = r["basalPredictedSegmentDecrement"]
nf = r["inputSize"]
accuracy[mcRange.index(nf), decrementRange.index(dec)] += r["objectAccuracyL2"]
TMAccuracy[mcRange.index(nf), decrementRange.index(dec)] += r["sequenceCorrectClassificationsTM"]
totals[mcRange.index(nf), decrementRange.index(dec)] += 1
for i,f in enumerate(mcRange):
print i, f, accuracy[i] / totals[i]
print i, f, TMAccuracy[i] / totals[i]
print i, f, totals[i]
print
# ########################################################################
# #
# # Create the plot.
# plt.figure()
# plotPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
# "plots", "accuracy_during_sensorimotor_inference.pdf")
#
# # Plot each curve
# legendList = []
# colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y']
#
# for i in range(len(featureRange)):
# f = featureRange[i]
# legendList.append('Sequence layer, feature pool size: {}'.format(f))
# plt.plot(objectRange, accuracy[objectRange, f], color=colorList[i])
#
# plt.plot(objectRange, [100] * len(objectRange),
# color=colorList[len(featureRange)])
# legendList.append('Sensorimotor layer')
#
# # format
# plt.legend(legendList, bbox_to_anchor=(0., 0.6, 1., .102), loc="right", prop={'size':10})
# plt.xlabel("Number of objects")
# plt.ylim(-10.0, 110.0)
# plt.ylabel(yaxis)
# plt.title(title)
#
# # save
# plt.savefig(plotPath)
# plt.close()
def plotAccuracyDuringSequenceInference(dirName, title="", yaxis=""):
"""
Plot accuracy vs number of locations
"""
# Read in results file
with open(os.path.join(dirName,
"sequence_batch_high_dec_normal_features.pkl"), "rb") as f:
results = cPickle.load(f)
locationRange = []
featureRange = []
for r in results:
if r["numLocations"] not in locationRange: locationRange.append(r["numLocations"])
if r["numFeatures"] not in featureRange: featureRange.append(r["numFeatures"])
locationRange.sort()
featureRange.sort()
if 10 in featureRange: featureRange.remove(10)
print "locationRange=",locationRange
print "featureRange=",featureRange
########################################################################
#
# Accumulate the L2 accuracies for each condition in a list and compute mean
# and stdeviations
# For TM we average across all feature ranges
L2Accuracies = defaultdict(list)
TMAccuracies = defaultdict(list)
for r in results:
if r["numFeatures"] in featureRange:
L2Accuracies[(r["numLocations"], r["numFeatures"])].append(r["sequenceAccuracyL2"])
TMAccuracies[r["numLocations"]].append(r["sequenceCorrectSparsityTM"])
# meanAccuracy[o,f] = accuracy of TM with o objects and f unique features.
meanL2Accuracy = numpy.zeros((max(locationRange)+1, max(featureRange) + 1))
stdevL2 = numpy.zeros((max(locationRange)+1, max(featureRange) + 1))
meanTMAccuracy = numpy.zeros(max(locationRange)+1)
stdevTM = numpy.zeros(max(locationRange)+1)
for o in locationRange:
for f in featureRange:
a = numpy.array(L2Accuracies[(o, f)])
meanL2Accuracy[o, f] = 100.0*a.mean()
stdevL2[o, f] = 100.0*a.std()
# Accuracies for TM
a = numpy.array(TMAccuracies[o])
meanTMAccuracy[o] = 100.0*a.mean()
stdevTM[o] = 100.0*a.std()
########################################################################
#
# Create the plot.
plt.figure()
plotPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"plots", "accuracy_during_sequence_inference.pdf")
# Plot each curve
legendList = []
colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y']
for i in range(len(featureRange)):
f = featureRange[i]
legendList.append('Sensorimotor layer, feature pool size: {}'.format(f))
plt.errorbar(locationRange, meanL2Accuracy[locationRange, f],
yerr=stdevL2[locationRange, f],
color=colorList[i])
plt.errorbar(locationRange, meanTMAccuracy[locationRange],
yerr=stdevTM[locationRange],
color=colorList[len(featureRange)])
legendList.append('Temporal sequence layer')
# format
plt.legend(legendList, bbox_to_anchor=(0., 0.65, 1., .102), loc="right", prop={'size':10})
plt.xlabel("Size of location pool")
# plt.xticks(range(0,max(locationRange)+1,10))
# plt.yticks(range(0,int(accuracy.max())+2,10))
plt.ylim(-10.0, 110.0)
plt.ylabel(yaxis)
plt.title(title)
# save
plt.savefig(plotPath)
plt.close()
def plotAccuracyVsSequencesDuringSequenceInference(dirName, title="", yaxis=""):
# Read in results file
with open(os.path.join(dirName, "sequences_range_2048_mcs.pkl"), "rb") as f:
results = cPickle.load(f)
sequenceRange = []
featureRange = []
for r in results:
if r["numSequences"] not in sequenceRange: sequenceRange.append(r["numSequences"])
if r["numFeatures"] not in featureRange: featureRange.append(r["numFeatures"])
sequenceRange.sort()
featureRange.sort()
if 10 in featureRange: featureRange.remove(10)
print "numSequences=",sequenceRange
print "featureRange=",featureRange
########################################################################
#
# Accumulate the L2 accuracies for each condition in a list and compute mean
# and stdeviations
# For TM we average across all feature ranges
L2Accuracies = defaultdict(list)
TMAccuracies = defaultdict(list)
for r in results:
if r["numFeatures"] in featureRange:
L2Accuracies[(r["numSequences"], r["numFeatures"])].append(r["sequenceAccuracyL2"])
TMAccuracies[r["numSequences"]].append(r["sequenceCorrectSparsityTM"])
# meanAccuracy[o,f] = accuracy of TM with o objects and f unique features.
meanL2Accuracy = numpy.zeros((max(sequenceRange)+1, max(featureRange) + 1))
stdevL2 = numpy.zeros((max(sequenceRange)+1, max(featureRange) + 1))
meanTMAccuracy = numpy.zeros(max(sequenceRange)+1)
stdevTM = numpy.zeros(max(sequenceRange)+1)
for o in sequenceRange:
for f in featureRange:
a = numpy.array(L2Accuracies[(o, f)])
meanL2Accuracy[o, f] = 100.0*a.mean()
stdevL2[o, f] = 100.0*a.std()
# Accuracies for TM
a = numpy.array(TMAccuracies[o])
meanTMAccuracy[o] = 100.0*a.mean()
stdevTM[o] = 100.0*a.std()
########################################################################
#
# Create the plot.
plt.figure()
plotPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"plots", "accuracy_vs_sequences_2048_mcs.pdf")
# Plot each curve
legendList = []
colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y']
for i in range(len(featureRange)):
f = featureRange[i]
legendList.append('Sensorimotor layer, feature pool size: {}'.format(f))
plt.errorbar(sequenceRange, meanL2Accuracy[sequenceRange, f],
yerr=stdevL2[sequenceRange, f],
color=colorList[i])
plt.errorbar(sequenceRange, meanTMAccuracy[sequenceRange],
yerr=stdevTM[sequenceRange],
color=colorList[len(featureRange)])
legendList.append('Temporal sequence layer')
# format
plt.legend(legendList, bbox_to_anchor=(0., 0.65, 1., .102),
loc="right", prop={'size':10})
plt.xlabel("Number of sequences")
# plt.xticks(range(0,max(locationRange)+1,10))
# plt.yticks(range(0,int(accuracy.max())+2,10))
plt.ylim(-10.0, 110.0)
plt.ylabel(yaxis)
plt.title(title)
# save
plt.savefig(plotPath)
plt.close()
def gen4(dirName):
"""Plots 4A and 4B"""
# Generate images similar to those used in the first plot for the section
# "Simulations with Pure Temporal Sequences"
try:
resultsFig4A = os.path.join(dirName, "pure_sequences_example.pkl")
with open(resultsFig4A, "rb") as f:
results = cPickle.load(f)
for trialNum, stat in enumerate(results["statistics"]):
plotOneInferenceRun(
stat,
itemType="a single sequence",
fields=[
("L4 PredictedActive", "Predicted active cells in sensorimotor layer"),
("TM NextPredicted", "Predicted cells in temporal sequence layer"),
("TM PredictedActive",
"Predicted active cells in temporal sequence layer"),
],
basename="pure_sequences",
trialNumber=trialNum,
plotDir=os.path.join(os.path.dirname(os.path.realpath(__file__)),
"detailed_plots")
)
print "Plots for Fig 4A generated in 'detailed_plots'"
except Exception, e:
print "\nCould not generate plots for Fig 4A: "
traceback.print_exc()
print
# Generate the second plot for the section "Simulations with Pure
# Temporal Sequences"
try:
plotAccuracyDuringSequenceInference(
dirName,
title="Relative performance of layers while inferring temporal sequences",
yaxis="Accuracy (%)")
print "Plots for Fig 4B generated in 'plots'"
except Exception, e:
print "\nCould not generate plots for Fig 4B: "
traceback.print_exc()
print
# Generate the accuracy vs number of sequences
try:
plotAccuracyVsSequencesDuringSequenceInference(
dirName,
title="Relative performance of layers while inferring temporal sequences",
yaxis="Accuracy (%)")
print "Plots for Fig 4C generated in 'plots'"
except Exception, e:
print "\nCould not generate plots for Fig 4C: "
traceback.print_exc()
print
def gen5(dirName):
# Generate images similar to the first plot for the section "Simulations with
# Sensorimotor Sequences"
try:
resultsFig5A = os.path.join(dirName, "sensorimotor_sequence_example.pkl")
with open(resultsFig5A, "rb") as f:
results = cPickle.load(f)
for trialNum, stat in enumerate(results["statistics"]):
plotOneInferenceRun(
stat,
itemType="a single object",
fields=[
("L4 PredictedActive", "Predicted active cells in sensorimotor layer"),
("TM NextPredicted", "Predicted cells in temporal sequence layer"),
("TM PredictedActive",
"Predicted active cells in temporal sequence layer"),
],
basename="sensorimotor_sequences",
trialNumber=trialNum,
ymax=50,
plotDir=os.path.join(os.path.dirname(os.path.realpath(__file__)),
"detailed_plots")
)
print "Plots for Fig 5A generated in 'detailed_plots'"
except Exception, e:
print "\nCould not generate plots for Fig 5A: "
traceback.print_exc()
print
# Generate the second plot for the section "Simulations with Sensorimotor
# Sequences"
try:
resultsFig5B = os.path.join(dirName, "sensorimotor_batch_results_more_objects.pkl")
plotAccuracyDuringSensorimotorInference(
resultsFig5B,
title="Relative performance of layers during sensorimotor inference",
yaxis="Accuracy (%)")
print "Plots for Fig 5B generated in 'plots'"
except Exception, e:
print "\nCould not generate plots for Fig 5B: "
traceback.print_exc()
print
def gen6(dirName):
# Generate a plot similar to one in the section "Simulations with Combined
# Sequences". Note that the dashed vertical lines and labels were added in
# manually.
try:
resultsFig6 = os.path.join(dirName, "combined_results.pkl")
# resultsFig6 = os.path.join(dirName, "superimposed_sequence_results.pkl")
if os.path.exists(resultsFig6):
with open(resultsFig6, "rb") as f:
results = cPickle.load(f)
plotMultipleInferenceRun(
results["statistics"][0:10],
fields=[
("L4 PredictedActive", "Predicted active cells in sensorimotor layer"),
("TM PredictedActive",
"Predicted active cells in temporal sequence layer"),
],
basename=results["name"],
plotDir=os.path.join(dirName, "plots")
)
print "Plots for Fig 6 generated in 'plots'"
except Exception, e:
print "\nCould not generate plots for Fig 6: "
traceback.print_exc()
print
if __name__ == "__main__":
dirName = os.path.dirname(os.path.realpath(__file__))
parser = OptionParser("python %prog [-h]\n\n"
"Regenerate the plots for every figure, if the "
"appropriate pkl file exists.")
options, args = parser.parse_args(sys.argv[1:])
gen4(dirName)
# gen5(dirName)
# gen6(dirName)
# Generate performance as a function of decrements
# try:
# for fn in [
# # "superimposed_more_increments_500_features.pkl",
# "superimposed_pool_increments_varying_features.pkl",
# "superimposed_more_increments_1000_features.pkl",
# "superimposed_more_increments_varying_features.pkl",
# "superimposed_more_increments_50_features.pkl",
# "superimposed_smaller_mcs.pkl",
# ]:
# # resultsFile = os.path.join(dirName, "superimposed_pool_increments_stripped.pkl")
# resultsFile = os.path.join(dirName, fn)
# print "\n\nFile: ",fn
#
# # Analyze results
# with open(resultsFile, "rb") as f:
# results = cPickle.load(f)
#
# plotAccuracyDuringDecrementChange(results)
#
# # print "Plots for decrements generated in 'plots'"
# except Exception, e:
# print "\nCould not generate plots for decrements: "
# traceback.print_exc()
# print
# Generate performance as a function of minicolumns
# try:
# for fn in [
# "superimposed_range_of_mcs.pkl",
# ]:
# resultsFile = os.path.join(dirName, fn)
# print "\n\nFile: ",fn
#
# # Analyze results
# with open(resultsFile, "rb") as f:
# results = cPickle.load(f)
#
# plotAccuracyAndMCsDuringDecrementChange(results)
#
# # print "Plots for decrements generated in 'plots'"
# except Exception, e:
# print "\nCould not generate plots for decrements: "
# traceback.print_exc()
# print
| gpl-3.0 |
nicholasding/pythonml | document-classification/test.py | 1 | 3458 | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn import metrics
from optparse import OptionParser
import sys
import json
class DataSource(object):
"""
Attributes:
- data
- target
- target_names
"""
def __init__(self, json_file):
self.target_index = 0
self.target_mapping = {}
self.target_names = []
data = []
target = []
with open(json_file, 'r') as fp:
for line in fp:
i = json.loads(line)
data.append(i['X'])
target.append(self.mapping_target(i['y']))
self.data = data
self.target = target
def mapping_target(self, label):
if label not in self.target_mapping:
self.target_names.append(label)
self.target_mapping[label] = self.target_index
self.target_index += 1
return self.target_mapping[label]
CLASSIFIERS = {
'svm': LinearSVC(),
'sgd': SGDClassifier(alpha=.0001, n_iter=50, penalty='l2'),
'nb': MultinomialNB(alpha=.01),
}
def classifier(name):
if name in CLASSIFIERS: return CLASSIFIERS[name]
return CLASSIFIERS['sgd']
def train_and_test(opts):
data_train = DataSource(opts.training_file)
data_test = DataSource(opts.testing_file)
# Train
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True, n_features=2 ** 16)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
y_train = data_train.target
print("Training: n_samples: %d, n_features: %d" % X_train.shape)
clf = classifier(opts.classifier)
clf.fit(X_train, y_train)
# Test
X_test = vectorizer.transform(data_test.data)
y_test = data_test.target
print("Testing: n_samples: %d, n_features: %d" % X_test.shape)
# Metrics
pred = clf.predict(X_test)
score = metrics.f1_score(y_test, pred)
print('F1 score: %.3f' % score)
print(metrics.classification_report(y_test, pred, target_names=data_train.target_names))
# Save model
if opts.model_file:
from sklearn.externals import joblib
model = [vectorizer, clf, data_train.target_names]
joblib.dump(model, opts.model_file, compress=9)
print('Saved model to %s' % opts.model_file)
def main():
op = OptionParser()
op.add_option('-i', '--training_file', dest='training_file', action='store', help='JSON file for training')
op.add_option('-t', '--testing_file', dest='testing_file', action='store', help='JSON file for testing')
op.add_option('-c', '--classifier', dest='classifier', action='store', help='Classifier, options (svm, sgd, nb)')
op.add_option('-s', '--save_model', dest='model_file', action='store', help='Save model into a file')
op.add_option('--use_hashing', dest='use_hashing', action='store_true', help='Use feature hashing')
(opts, args) = op.parse_args()
if not (opts.training_file and opts.testing_file):
op.print_help()
else:
train_and_test(opts)
if __name__ == '__main__':
main()
| mit |
williamalu/mimo_usrp | scripts/channel_trimmer.py | 1 | 2788 | #!/usr/bin/env python
""" Class for creating trimmed received noise files to estimate H
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
class Trimmer(object):
data_path = "../data/"
@staticmethod
def trim_both(fname, output_name, noise_length=100000, gap=10000, offset=10):
""" Writes two files that each contain one of the two trimmed blocks
of received noise
Parameters
----------
fname : str
name of the binary file to be trimmed, without file extension
noise_length : int
length of the noise block, in number of samples
gap : int
length of the gap between noise blocks, in number of samples
offset : int
number of samples used to accurately tune finding the blocks
"""
received = np.fromfile(Trimmer.data_path+fname+".bin",
dtype=np.complex64)
rec_length = range(len(received))
rec_ampl = np.absolute(received)
noise_ampl = np.amax(rec_ampl[:200000])
beg1 = np.argmax(rec_ampl>3*noise_ampl)+offset
end1 = beg1 + noise_length
beg2 = end1 + gap
end2 = beg2 + noise_length
plt.subplot(2,1,1)
plt.plot(rec_length[beg1-gap:end1+gap], rec_ampl[beg1-gap:end1+gap],
'.', ms=2, label="received")
plt.plot(rec_length[beg1:end1], rec_ampl[beg1:end1],
'.', ms=2, label="first")
plt.title("FIRST")
plt.subplot(2,1,2)
plt.plot(rec_length[beg2-gap:end2+gap], rec_ampl[beg2-gap:end2+gap],
'.', ms=2, label="received")
plt.plot(rec_length[beg2:end2], rec_ampl[beg2:end2],
'.', ms=2, label="second")
plt.title("SECOND")
plt.show()
Trimmer.write_trimmed(output_name, received[beg1:end1], received[beg2:end2])
@staticmethod
def write_trimmed(output_name, first, second):
""" Writes two binary complex64 files
Parametersc
----------
fname : str
base name of the file to write
first : ndarray
the first complex array to write to a file
second : ndarray
the second complex array to write to a file
"""
output_file = open(Trimmer.data_path+output_name+"1.bin", 'wb')
output_file.write(first.tobytes())
output_file.close()
output_file = open(Trimmer.data_path+output_name+"2.bin", 'wb')
output_file.write(second.tobytes())
output_file.close()
if __name__ == "__main__":
Trimmer.trim_both("recnoise1", output_name="noise_h1", offset=19)
Trimmer.trim_both("recnoise2", output_name="noise_h2", offset=19)
| mit |
wathen/PhD | MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/SplitMatrix/ScottTest/Hartman3D/MHDfluid.py | 1 | 16357 | #!/usr/bin/python
# interpolate scalar gradient onto nedelec space
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
from dolfin import *
import mshr
Print = PETSc.Sys.Print
# from MatrixOperations import *
import numpy as np
import PETScIO as IO
import common
import scipy
import scipy.io
import time
import BiLinear as forms
import IterOperations as Iter
import MatrixOperations as MO
import CheckPetsc4py as CP
import Solver as S
import MHDmatrixPrecondSetup as PrecondSetup
import NSprecondSetup
import MHDprec as MHDpreconditioner
import memory_profiler
import gc
import MHDmulti
import MHDmatrixSetup as MHDsetup
import HartmanChannel
#@profile
m = 6
set_log_active(False)
errL2u =np.zeros((m-1,1))
errH1u =np.zeros((m-1,1))
errL2p =np.zeros((m-1,1))
errL2b =np.zeros((m-1,1))
errCurlb =np.zeros((m-1,1))
errL2r =np.zeros((m-1,1))
errH1r =np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
H1uorder =np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
l2border = np.zeros((m-1,1))
Curlborder =np.zeros((m-1,1))
l2rorder = np.zeros((m-1,1))
H1rorder = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Velocitydim = np.zeros((m-1,1))
Magneticdim = np.zeros((m-1,1))
Pressuredim = np.zeros((m-1,1))
Lagrangedim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
MU = np.zeros((m-1,1))
level = np.zeros((m-1,1))
NSave = np.zeros((m-1,1))
Mave = np.zeros((m-1,1))
TotalTime = np.zeros((m-1,1))
def polarr(u, x, y):
return (1./sqrt(x**2 + y**2))*(x*sy.diff(u,x)+y*sy.diff(u,y))
def polart(u, x, y):
return -y*sy.diff(u,x)+x*sy.diff(u,y)
nn = 2
dim = 2
ShowResultPlots = 'yes'
split = 'Linear'
MU[0]= 1e0
for xx in xrange(1,m):
print xx
level[xx-1] = xx+1
nn = 2**(level[xx-1])
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn/2
L = 10.
y0 = 2.
z0 = 1.
mesh, boundaries, domains = HartmanChannel.Domain(nn, L, y0, z0)
# set_log_level(WARNING)
parameters['form_compiler']['quadrature_degree'] = -1
order = 2
parameters['reorder_dofs_serial'] = False
Velocity = VectorFunctionSpace(mesh, "CG", order)
Pressure = FunctionSpace(mesh, "CG", order-1)
VecPressure = VectorFunctionSpace(mesh, "CG", order-1)
Magnetic = FunctionSpace(mesh, "N1curl", order-1 )
Lagrange = FunctionSpace(mesh, "CG", order-1 )
W = MixedFunctionSpace([Velocity, Pressure, Magnetic,Lagrange])
# W = Velocity*Pressure*Magnetic*Lagrange
Velocitydim[xx-1] = Velocity.dim()
Pressuredim[xx-1] = Pressure.dim()
Magneticdim[xx-1] = Magnetic.dim()
Lagrangedim[xx-1] = Lagrange.dim()
Wdim[xx-1] = W.dim()
print "\n\nW: ",Wdim[xx-1],"Velocity: ",Velocitydim[xx-1],"Pressure: ",Pressuredim[xx-1],"Magnetic: ",Magneticdim[xx-1],"Lagrange: ",Lagrangedim[xx-1],"\n\n"
dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(), Lagrange.dim()]
def boundary(x, on_boundary):
return on_boundary
FSpaces = [Velocity,Pressure,Magnetic,Lagrange]
kappa = 1.0
Mu_m =float(1e4)
MU = 1.0
N = FacetNormal(mesh)
ds = Measure('ds', domain=mesh, subdomain_data=boundaries)
# g = inner(p0*N - MU*grad(u0)*N,v)*dx
IterType = 'Full'
Split = "No"
Saddle = "No"
Stokes = "No"
SetupType = 'python-class'
# F_NS = -MU*Laplacian+Advection+gradPres-kappa*NS_Couple
# if kappa == 0:
# F_M = Mu_m*CurlCurl+gradR -kappa*M_Couple
# else:
# F_M = Mu_m*kappa*CurlCurl+gradR -kappa*M_Couple
params = [kappa,Mu_m,MU]
F_M = Expression(("0.0","0.0","0.0"))
F_S = Expression(("0.0","0.0","0.0"))
n = FacetNormal(mesh)
trunc = 4
u0, b0, pN, pN2 = HartmanChannel.ExactSol(mesh, params, y0, z0, trunc)
b = Expression(("0.0","1.0","0.0"))
r0 = Expression(("0.0"))
# pN = -pN
# u_k = Function(Velocity)
# p_k = Function(Pressure)
# b_k = Function(Magnetic)
# r_k = Function(Lagrange)
MO.PrintStr("Seting up initial guess matricies",2,"=","\n\n","\n")
BCtime = time.time()
BC = MHDsetup.BoundaryIndices(mesh)
MO.StrTimePrint("BC index function, time: ", time.time()-BCtime)
Hiptmairtol = 1e-6
HiptmairMatrices = PrecondSetup.MagneticSetup(Magnetic, Lagrange, b, r0, Hiptmairtol, params)
MO.PrintStr("Setting up MHD initial guess",5,"+","\n\n","\n\n")
# u_k,p_k,b_k,r_k = common.InitialGuess(FSpaces,[u0,p0,b0,r0],[F_NS,F_M],params,HiptmairMatrices,1e-10,Neumann=None,options ="New")
u_k, p_k = HartmanChannel.Stokes(Velocity, Pressure, F_S, u0, pN2, params, boundaries, domains)
b_k, r_k = HartmanChannel.Maxwell(Magnetic, Lagrange, F_M, b0, r0, params, boundaries,HiptmairMatrices, Hiptmairtol)
(u, p, b, r) = TrialFunctions(W)
(v, q, c, s) = TestFunctions(W)
m11 = params[1]*params[0]*inner(curl(b),curl(c))*dx('everywhere')
m21 = inner(c,grad(r))*dx('everywhere')
m12 = inner(b,grad(s))*dx('everywhere')
a11 = params[2]*inner(grad(v), grad(u))*dx('everywhere') + inner((grad(u)*u_k),v)*dx('everywhere') + (1./2)*div(u_k)*inner(u,v)*dx('everywhere') - (1./2)*inner(u_k,n)*inner(u,v)*ds
a12 = -div(v)*p*dx('everywhere')
a21 = -div(u)*q*dx('everywhere')
CoupleT = params[0]*inner(cross(v,b_k),curl(b))*dx('everywhere')
Couple = -params[0]*inner(cross(u,b_k),curl(c))*dx('everywhere')
a = m11 + m12 + m21 + a11 + a21 + a12 + Couple + CoupleT
Lns = inner(v, F_S)*dx('everywhere') #+ 0*inner(pN*n,v)*ds(1)
Lmaxwell = inner(c, F_M)*dx('everywhere')
m11 = params[1]*params[0]*inner(curl(b_k),curl(c))*dx('everywhere')
m21 = inner(c,grad(r_k))*dx('everywhere')
m12 = inner(b_k,grad(s))*dx('everywhere')
a11 = params[2]*inner(grad(v), grad(u_k))*dx('everywhere') + inner((grad(u_k)*u_k),v)*dx('everywhere') + (1./2)*div(u_k)*inner(u_k,v)*dx('everywhere') - (1./2)*inner(u_k,n)*inner(u_k,v)*ds
a12 = -div(v)*p_k*dx('everywhere')
a21 = -div(u_k)*q*dx('everywhere')
CoupleT = params[0]*inner(cross(v,b_k),curl(b_k))*dx('everywhere')
Couple = -params[0]*inner(cross(u_k,b_k),curl(c))*dx('everywhere')
L = Lns + Lmaxwell - (m11 + m12 + m21 + a11 + a21 + a12 + Couple + CoupleT)
ones = Function(Pressure)
ones.vector()[:]=(0*ones.vector().array()+1)
pConst = - assemble(p_k*dx('everywhere'))/assemble(ones*dx)
p_k.vector()[:] += - assemble(p_k*dx)/assemble(ones*dx)
x = Iter.u_prev(u_k,p_k,b_k,r_k)
KSPlinearfluids, MatrixLinearFluids = PrecondSetup.FluidLinearSetup(Pressure, MU, mesh, boundaries, domains)
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k, mesh, boundaries, domains)
#plot(b_k)
# ns,maxwell,CoupleTerm,Lmaxwell,Lns = forms.MHD2D(mesh, W,F_M,F_NS, u_k,b_k,params,IterType,"CG",Saddle,Stokes)
# RHSform = forms.PicardRHS(mesh, W, u_k, p_k, b_k, r_k, params,"CG",Saddle,Stokes)
# bcu = DirichletBC(W.sub(0),Expression(("0.0","0.0")), boundaries, 1)
# bcb = DirichletBC(W.sub(2),Expression(("0.0","0.0")), boundaries, 1)
# bcr = DirichletBC(W.sub(3),Expression(("0.0")), boundaries, 1)
# bcs = [bcu,bcb,bcr]
IS = MO.IndexSet(W, 'Blocks')
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-4 # tolerance
iter = 0 # iteration counter
maxiter = 10 # max no of iterations allowed
SolutionTime = 0
outer = 0
# parameters['linear_algebra_backend'] = 'uBLAS'
# FSpaces = [Velocity,Magnetic,Pressure,Lagrange]
u_is = PETSc.IS().createGeneral(range(Velocity.dim()))
NS_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim()))
M_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim(),W.dim()))
OuterTol = 1e-5
InnerTol = 1e-5
NSits =0
Mits =0
TotalStart =time.time()
SolutionTime = 0
while eps > tol and iter < maxiter:
iter += 1
MO.PrintStr("Iter "+str(iter),7,"=","\n\n","\n\n")
# if iter == 1:
# bcu = DirichletBC(W.sub(0),u0, boundaries, 1)
# bcb = DirichletBC(W.sub(2),b0, boundaries, 1)
# bcr = DirichletBC(W.sub(3),r0, boundaries, 1)
# bcs = [bcu,bcb,bcr]
# else:
# bcu = DirichletBC(W.sub(0),Expression(("0.0","0.0","0.0")), boundaries, 2)
bcu = DirichletBC(W.sub(0),Expression(("0.0","0.0","0.0")), boundary)
bcb = DirichletBC(W.sub(2),Expression(("0.0","0.0","0.0")), boundary)
bcr = DirichletBC(W.sub(3),Expression("0.0"), boundary)
bcs = [bcu,bcb,bcr]
# if iter == 1:
# , L
A, b = assemble_system(a, L, bcs)
# AA = assemble(a)
# bb = assemble(L)
# for bc in bcs:
# bc.apply(AA,bb)
# print A.sparray().todense()
# MO.StoreMatrix(A.sparray(),'name')
A, b = CP.Assemble(A,b)
u = b.duplicate()
print " Max rhs = ",np.max(b.array)
# ssss
# L = assemble(L)
# print L.array()
# for bc in bcs:
# bc.apply(L)
# print L.array()
# MO.StrTimePrint("MHD total assemble, time: ", time.time()-AssembleTime)
# u = b.duplicate()
# kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k)
# print "Inititial guess norm: ", u.norm(PETSc.NormType.NORM_INFINITY)
# #A,Q
n = FacetNormal(mesh)
b_t = TrialFunction(Velocity)
c_t = TestFunction(Velocity)
mat = as_matrix([[b_k[2]*b_k[2]+b[1]*b[1],-b_k[1]*b_k[0],-b_k[0]*b_k[2]],
[-b_k[1]*b_k[0],b_k[0]*b_k[0]+b_k[2]*b_k[2],-b_k[2]*b_k[1]],
[-b_k[0]*b_k[2],-b_k[1]*b_k[2],b_k[0]*b_k[0]+b_k[1]*b_k[1]]])
aa = params[2]*inner(grad(b_t), grad(c_t))*dx(W.mesh()) + inner((grad(b_t)*u_k),c_t)*dx(W.mesh()) +(1./2)*div(u_k)*inner(c_t,b_t)*dx(W.mesh()) - (1./2)*inner(u_k,n)*inner(c_t,b_t)*ds(W.mesh())+kappa/Mu_m*inner(mat*b_t,c_t)*dx(W.mesh())
ShiftedMass = assemble(aa)
bcu.apply(ShiftedMass)
ShiftedMass = CP.Assemble(ShiftedMass)
kspF = NSprecondSetup.LSCKSPnonlinear(ShiftedMass)
stime = time.time()
u, mits,nsits = S.solve(A,b,u,params,W,'Directss',IterType,OuterTol,InnerTol,HiptmairMatrices,Hiptmairtol,KSPlinearfluids, Fp,kspF)
Soltime = time.time() - stime
MO.StrTimePrint("MHD solve, time: ", Soltime)
Mits += mits
NSits += nsits
SolutionTime += Soltime
u1, p1, b1, r1, eps= Iter.PicardToleranceDecouple(u,x,FSpaces,dim,"2",iter)
p1.vector()[:] += - assemble(p1*dx)/assemble(ones*dx)
u_k.assign(u1)
p_k.assign(p1)
b_k.assign(b1)
r_k.assign(r1)
uOld= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
x = IO.arrayToVec(uOld)
SolTime[xx-1] = SolutionTime/iter
NSave[xx-1] = (float(NSits)/iter)
Mave[xx-1] = (float(Mits)/iter)
iterations[xx-1] = iter
TotalTime[xx-1] = time.time() - TotalStart
XX= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(),Lagrange.dim()]
# u0, p0, b0, r0, F_NS, F_M, F_MX, F_S, gradu0, Neumann, p0vec = Lshaped.Solution2(mesh, params)
# Vel = plot(u_k, prefix='velocityApprox')
# Vel.write_png()
# Vel = plot(interpolate(u0,Velocity), prefix='velocityExact')
# Vel.write_png()
#
# Vel = plot(p_k, prefix='pressureApprox')
# Vel.write_png()
# Vel = plot(interpolate(pN,Pressure), prefix='pressureExact')
# Vel.write_png()
ExactSolution = [u0,pN,b0,r0]
errL2u[xx-1], errH1u[xx-1], errL2p[xx-1], errL2b[xx-1], errCurlb[xx-1], errL2r[xx-1], errH1r[xx-1] = Iter.Errors(XX,mesh,FSpaces,ExactSolution,order,dim, "CG")
print float(Wdim[xx-1][0])/Wdim[xx-2][0]
if xx > 1:
l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1])/np.log2((float(Velocitydim[xx-1][0])/Velocitydim[xx-2][0])**(1./3)))
H1uorder[xx-1] = np.abs(np.log2(errH1u[xx-2]/errH1u[xx-1])/np.log2((float(Velocitydim[xx-1][0])/Velocitydim[xx-2][0])**(1./3)))
l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1])/np.log2((float(Pressuredim[xx-1][0])/Pressuredim[xx-2][0])**(1./3)))
l2border[xx-1] = np.abs(np.log2(errL2b[xx-2]/errL2b[xx-1])/np.log2((float(Magneticdim[xx-1][0])/Magneticdim[xx-2][0])**(1./3)))
Curlborder[xx-1] = np.abs(np.log2(errCurlb[xx-2]/errCurlb[xx-1])/np.log2((float(Magneticdim[xx-1][0])/Magneticdim[xx-2][0])**(1./3)))
l2rorder[xx-1] = np.abs(np.log2(errL2r[xx-2]/errL2r[xx-1])/np.log2((float(Lagrangedim[xx-1][0])/Lagrangedim[xx-2][0])**(1./3)))
H1rorder[xx-1] = np.abs(np.log2(errH1r[xx-2]/errH1r[xx-1])/np.log2((float(Lagrangedim[xx-1][0])/Lagrangedim[xx-2][0])**(1./3)))
import pandas as pd
LatexTitles = ["l","DoFu","Dofp","V-L2","L2-order","V-H1","H1-order","P-L2","PL2-order"]
LatexValues = np.concatenate((level,Velocitydim,Pressuredim,errL2u,l2uorder,errH1u,H1uorder,errL2p,l2porder), axis=1)
LatexTable = pd.DataFrame(LatexValues, columns = LatexTitles)
pd.set_option('precision',3)
LatexTable = MO.PandasFormat(LatexTable,"V-L2","%2.4e")
LatexTable = MO.PandasFormat(LatexTable,'V-H1',"%2.4e")
LatexTable = MO.PandasFormat(LatexTable,"H1-order","%1.2f")
LatexTable = MO.PandasFormat(LatexTable,'L2-order',"%1.2f")
LatexTable = MO.PandasFormat(LatexTable,"P-L2","%2.4e")
LatexTable = MO.PandasFormat(LatexTable,'PL2-order',"%1.2f")
print LatexTable.to_latex()
print "\n\n Magnetic convergence"
MagneticTitles = ["l","B DoF","R DoF","B-L2","L2-order","B-Curl","HCurl-order"]
MagneticValues = np.concatenate((level,Magneticdim,Lagrangedim,errL2b,l2border,errCurlb,Curlborder),axis=1)
MagneticTable= pd.DataFrame(MagneticValues, columns = MagneticTitles)
pd.set_option('precision',3)
MagneticTable = MO.PandasFormat(MagneticTable,"B-Curl","%2.4e")
MagneticTable = MO.PandasFormat(MagneticTable,'B-L2',"%2.4e")
MagneticTable = MO.PandasFormat(MagneticTable,"L2-order","%1.2f")
MagneticTable = MO.PandasFormat(MagneticTable,'HCurl-order',"%1.2f")
print MagneticTable.to_latex()
print "\n\n Lagrange convergence"
LagrangeTitles = ["l","B DoF","R DoF","R-L2","L2-order","R-H1","H1-order"]
LagrangeValues = np.concatenate((level,Magneticdim,Lagrangedim,errL2r,l2rorder,errH1r,H1rorder),axis=1)
LagrangeTable= pd.DataFrame(LagrangeValues, columns = LagrangeTitles)
pd.set_option('precision',3)
LagrangeTable = MO.PandasFormat(LagrangeTable,"R-L2","%2.4e")
LagrangeTable = MO.PandasFormat(LagrangeTable,'R-H1',"%2.4e")
LagrangeTable = MO.PandasFormat(LagrangeTable,"L2-order","%1.2f")
LagrangeTable = MO.PandasFormat(LagrangeTable,'H1-order',"%1.2f")
print LagrangeTable.to_latex()
import pandas as pd
print "\n\n Iteration table"
if IterType == "Full":
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av Outer its","Av Inner its",]
else:
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av NS iters","Av M iters"]
IterValues = np.concatenate((level,Wdim,SolTime,TotalTime,iterations,Mave,NSave),axis=1)
IterTable= pd.DataFrame(IterValues, columns = IterTitles)
if IterType == "Full":
IterTable = MO.PandasFormat(IterTable,'Av Outer its',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av Inner its',"%2.1f")
else:
IterTable = MO.PandasFormat(IterTable,'Av NS iters',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av M iters',"%2.1f")
print IterTable
# print " \n Outer Tol: ",OuterTol, "Inner Tol: ", InnerTol
# tableName = "2d_Lshaped_nu="+str(MU)+"_nu_m="+str(Mu_m)+"_kappa="+str(kappa)+"_l="+str(np.min(level))+"-"+str(np.max(level))+"Approx.tex"
# IterTable.to_latex(tableName)
# # # if (ShowResultPlots == 'yes'):
# plot(interpolate(u0,Velocity))
#
# u = plot(interpolate(u0,Velocity))
# p = plot(interpolate(pN2,Pressure))
# b = plot(interpolate(b0,Magnetic))
# u.write_png()
# p.write_png()
# b.write_png()
# u = plot(u_k)
# p = plot(p_k)
# b = plot(b_k)
# u.write_png()
# p.write_png()
# b.write_png()
#
# plot(interpolate(p0,Pressure))
#
# plot(interpolate(b0,Magnetic))
#
# plot(r_k)
# plot(interpolate(r0,Lagrange))
#
# interactive()
interactive()
| mit |
altairpearl/scikit-learn | sklearn/utils/tests/test_multiclass.py | 6 | 13417 |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
from sklearn.utils.multiclass import check_classification_targets
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formatted as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that weren't supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_check_classification_targets():
for y_type in EXAMPLES.keys():
if y_type in ["unknown", "continuous", 'continuous-multioutput']:
for example in EXAMPLES[y_type]:
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg,
check_classification_targets, example)
else:
for example in EXAMPLES[y_type]:
check_classification_targets(example)
# @ignore_warnings
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
| bsd-3-clause |
rgommers/statsmodels | examples/python/generic_mle.py | 33 | 7532 |
## Maximum Likelihood Estimation (Generic models)
# This tutorial explains how to quickly implement new maximum likelihood models in `statsmodels`. We give two examples:
#
# 1. Probit model for binary dependent variables
# 2. Negative binomial model for count data
#
# The `GenericLikelihoodModel` class eases the process by providing tools such as automatic numeric differentiation and a unified interface to ``scipy`` optimization functions. Using ``statsmodels``, users can fit new MLE models simply by "plugging-in" a log-likelihood function.
# ## Example 1: Probit model
from __future__ import print_function
import numpy as np
from scipy import stats
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel
# The ``Spector`` dataset is distributed with ``statsmodels``. You can access a vector of values for the dependent variable (``endog``) and a matrix of regressors (``exog``) like this:
data = sm.datasets.spector.load_pandas()
exog = data.exog
endog = data.endog
print(sm.datasets.spector.NOTE)
print(data.exog.head())
# Them, we add a constant to the matrix of regressors:
exog = sm.add_constant(exog, prepend=True)
# To create your own Likelihood Model, you simply need to overwrite the loglike method.
class MyProbit(GenericLikelihoodModel):
def loglike(self, params):
exog = self.exog
endog = self.endog
q = 2 * endog - 1
return stats.norm.logcdf(q*np.dot(exog, params)).sum()
# Estimate the model and print(a summary:
sm_probit_manual = MyProbit(endog, exog).fit()
print(sm_probit_manual.summary())
# Compare your Probit implementation to ``statsmodels``' "canned" implementation:
sm_probit_canned = sm.Probit(endog, exog).fit()
print(sm_probit_canned.params)
print(sm_probit_manual.params)
print(sm_probit_canned.cov_params())
print(sm_probit_manual.cov_params())
# Notice that the ``GenericMaximumLikelihood`` class provides automatic differentiation, so we didn't have to provide Hessian or Score functions in order to calculate the covariance estimates.
#
#
# ## Example 2: Negative Binomial Regression for Count Data
#
# Consider a negative binomial regression model for count data with
# log-likelihood (type NB-2) function expressed as:
#
# $$
# \mathcal{L}(\beta_j; y, \alpha) = \sum_{i=1}^n y_i ln
# \left ( \frac{\alpha exp(X_i'\beta)}{1+\alpha exp(X_i'\beta)} \right ) -
# \frac{1}{\alpha} ln(1+\alpha exp(X_i'\beta)) + ln \Gamma (y_i + 1/\alpha) - ln \Gamma (y_i+1) - ln \Gamma (1/\alpha)
# $$
#
# with a matrix of regressors $X$, a vector of coefficients $\beta$,
# and the negative binomial heterogeneity parameter $\alpha$.
#
# Using the ``nbinom`` distribution from ``scipy``, we can write this likelihood
# simply as:
#
import numpy as np
from scipy.stats import nbinom
def _ll_nb2(y, X, beta, alph):
mu = np.exp(np.dot(X, beta))
size = 1/alph
prob = size/(size+mu)
ll = nbinom.logpmf(y, size, prob)
return ll
# ### New Model Class
#
# We create a new model class which inherits from ``GenericLikelihoodModel``:
from statsmodels.base.model import GenericLikelihoodModel
class NBin(GenericLikelihoodModel):
def __init__(self, endog, exog, **kwds):
super(NBin, self).__init__(endog, exog, **kwds)
def nloglikeobs(self, params):
alph = params[-1]
beta = params[:-1]
ll = _ll_nb2(self.endog, self.exog, beta, alph)
return -ll
def fit(self, start_params=None, maxiter=10000, maxfun=5000, **kwds):
# we have one additional parameter and we need to add it for summary
self.exog_names.append('alpha')
if start_params == None:
# Reasonable starting values
start_params = np.append(np.zeros(self.exog.shape[1]), .5)
# intercept
start_params[-2] = np.log(self.endog.mean())
return super(NBin, self).fit(start_params=start_params,
maxiter=maxiter, maxfun=maxfun,
**kwds)
# Two important things to notice:
#
# + ``nloglikeobs``: This function should return one evaluation of the negative log-likelihood function per observation in your dataset (i.e. rows of the endog/X matrix).
# + ``start_params``: A one-dimensional array of starting values needs to be provided. The size of this array determines the number of parameters that will be used in optimization.
#
# That's it! You're done!
#
# ### Usage Example
#
# The [Medpar](http://vincentarelbundock.github.com/Rdatasets/doc/COUNT/medpar.html)
# dataset is hosted in CSV format at the [Rdatasets repository](http://vincentarelbundock.github.com/Rdatasets). We use the ``read_csv``
# function from the [Pandas library](http://pandas.pydata.org) to load the data
# in memory. We then print(the first few columns:
#
import statsmodels.api as sm
medpar = sm.datasets.get_rdataset("medpar", "COUNT", cache=True).data
medpar.head()
# The model we are interested in has a vector of non-negative integers as
# dependent variable (``los``), and 5 regressors: ``Intercept``, ``type2``,
# ``type3``, ``hmo``, ``white``.
#
# For estimation, we need to create two variables to hold our regressors and the outcome variable. These can be ndarrays or pandas objects.
y = medpar.los
X = medpar[["type2", "type3", "hmo", "white"]]
X["constant"] = 1
# Then, we fit the model and extract some information:
mod = NBin(y, X)
res = mod.fit()
# Extract parameter estimates, standard errors, p-values, AIC, etc.:
print('Parameters: ', res.params)
print('Standard errors: ', res.bse)
print('P-values: ', res.pvalues)
print('AIC: ', res.aic)
# As usual, you can obtain a full list of available information by typing
# ``dir(res)``.
# We can also look at the summary of the estimation results.
print(res.summary())
# ### Testing
# We can check the results by using the statsmodels implementation of the Negative Binomial model, which uses the analytic score function and Hessian.
res_nbin = sm.NegativeBinomial(y, X).fit(disp=0)
print(res_nbin.summary())
print(res_nbin.params)
print(res_nbin.bse)
# Or we could compare them to results obtained using the MASS implementation for R:
#
# url = 'http://vincentarelbundock.github.com/Rdatasets/csv/COUNT/medpar.csv'
# medpar = read.csv(url)
# f = los~factor(type)+hmo+white
#
# library(MASS)
# mod = glm.nb(f, medpar)
# coef(summary(mod))
# Estimate Std. Error z value Pr(>|z|)
# (Intercept) 2.31027893 0.06744676 34.253370 3.885556e-257
# factor(type)2 0.22124898 0.05045746 4.384861 1.160597e-05
# factor(type)3 0.70615882 0.07599849 9.291748 1.517751e-20
# hmo -0.06795522 0.05321375 -1.277024 2.015939e-01
# white -0.12906544 0.06836272 -1.887951 5.903257e-02
#
# ### Numerical precision
#
# The ``statsmodels`` generic MLE and ``R`` parameter estimates agree up to the fourth decimal. The standard errors, however, agree only up to the second decimal. This discrepancy is the result of imprecision in our Hessian numerical estimates. In the current context, the difference between ``MASS`` and ``statsmodels`` standard error estimates is substantively irrelevant, but it highlights the fact that users who need very precise estimates may not always want to rely on default settings when using numerical derivatives. In such cases, it is better to use analytical derivatives with the ``LikelihoodModel`` class.
#
| bsd-3-clause |
awickert/river-network-evolution | backup/OneChannel.py | 2 | 4152 | import numpy as np
from scipy.sparse import spdiags
from scipy.sparse.linalg import spsolve, isolve
from matplotlib import pyplot as plt
def sediment__discharge_per_unit_width(D, h, z, x):
"""
Compute q_s as a function of the bed elevations, grain size, and flow depth.
This utility exists because the equation solved by the main code combines
MPM and Exner into a nonlinear diffusion equation and doesn't explicitly give
transport rates
Wang and Parker (2006) version of MPM (1948)
Normal flow assumptions: depth--slope product
q_s = 7.55 * ( np.abs(h[1:-1]/D * S) - 0.0816 )**1.5
with corrections for values below the threshold of motion
and for sdiment motion that is backwards.
S = -dz_dx
"""
S = (z[2:] - z[:-2]) / (x[2:] - x[:-2]) # = -dz_dx
q_s_inner = np.abs(h[1:-1]/D * S) - 0.0816
q_s_inner[q_s_inner < 0] = 0 # no transport if less than threshold
q_s = np.sign(q_s_inner) * 7.55 * q_s_inner**1.5
return q_s
def transport__slope(D, h, q_s):
"""
This transport slope, or d(eta)/dx, is the slope required for a certain
sediment discharge per unit channel width.
This is a utility to create the ghost nodes for the Neumann boundary
condition.
It will be returned positive, even though it is a negative dz/dx
S_t = -0.26 * D/h * (q_s**(2./3.) + 0.314) --> +0.26...
"""
S_t = np.sign(q_s) * 0.26 * D/h * (np.abs(q_s)**(2./3.) + 0.314)
return S_t
D = 15E-3 # [m]
porosity = lambda_p = 0.35 # [-]
nx = 11
h = 2. * np.ones(nx)
B = 100 * np.ones(nx)
x = np.linspace(0, 1E6, nx)
dx = np.mean(np.diff(x))
eta = -1E-3 * x + np.max(x)*1E-3
eta = np.round(eta, 6) # coarse trick to rmv floating point issues
t = np.linspace(0, 10, 11) # start at 1 below, t0 is initial
A0 = 11.325 / (1 - lambda_p) * h/D
#q_s_in = 0.69623693 # [m^3 s^{-1}]
q_s_in = sediment__discharge_per_unit_width(D, h, eta, x)[0]
#q_s_out = whatever it has to be to transport out as much material as it receives
S_t_in = transport__slope(D, h[0], q_s_in)
S_t_out = transport__slope(D, h[0], q_s_in*.1)
dt = 3.15E9
print np.mean(eta)
for t in range(10):
#S_t_out = -(eta[-1] - eta[-3])/(2*dx)
etatmp = eta.copy() # for iterating
eta_with_ghost = np.hstack((eta[1] + S_t_in*2*dx, eta, eta[-2] - S_t_out*2*dx))
deta = eta_with_ghost[2:] - eta_with_ghost[:-2]
for i in range(3):
# etatmp used to update coefficient: this is the nonlinearity that
# requires iteration
###################################################################
etatmp_with_ghost = np.hstack((etatmp[1] + S_t_in*2*dx, etatmp, etatmp[-2] - S_t_out*2*dx))
detatmp_dx = (etatmp_with_ghost[2:] - etatmp_with_ghost[:-2]) / (2*dx)
#A1 = (- ( (h/D) * detatmp_dx ) - 0.0816)**.5
# HAVE TO CHECK ABS TO LET UPSTREAM QS HAPPEN
A1_inside_inside = -h*detatmp_dx/D # - because MPM has slope down positive
A1_inside = np.abs(h*detatmp_dx/D) - 0.0816
A1_inside[A1_inside < 0] = 0 # no transport
print A1_inside
A1 = np.sign(A1_inside) * (A1_inside)**0.5
# Minus for flipping eta(t) and eta(t+1)
A = - A0 * A1
#A = 0*A+1 # Making A linear for the moment -- none of the above matters!!!
Adt = A * dt
# upstream on left -- becomes on top w/ multiplication
l1 = Adt/dx**2
c0 = -2*Adt/dx**2 + 1 # +1 for eta(t+1)
r1 = Adt/dx**2
r1[0] += l1[0]
l1[-1] += r1[-1]
# RHS = B
RHS = eta.copy() # (eta(t))
RHS[0] += Adt[0] * (eta_with_ghost[2] - eta_with_ghost[0]) / (dx**2)
RHS[-1] -= Adt[-1] * (eta_with_ghost[-1] - eta_with_ghost[-3]) / (dx**2)
# Now populate tridiagonal
l1 = np.roll(l1, -1)
r1 = np.roll(r1, 1)
diags = np.vstack((l1,c0,r1))
offsets = np.array([-1,0,1])
coeff_matrix = spdiags(diags, offsets, nx, nx, format='csr')
# Eventually have to use this for iteration
#etatmp = spsolve(coeff_matrix, RHS, use_umfpack=True)
# round = coarse trick to rmv floating point issues
etatmp = spsolve(coeff_matrix, RHS, use_umfpack=True)
#etatmp[1:-1] = coeff_matrix * eta[1:-1]
#print etatmp[-1]
#print ""
eta = etatmp.copy()
print np.mean(eta)
plt.ion()
plt.plot(eta)
plt.show()
| gpl-3.0 |
xunilrj/sandbox | courses/MITx/MITx 6.86x Machine Learning with Python-From Linear Models to Deep Learning/project2/mnist/utils.py | 5 | 2533 | import pickle, gzip, numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import math
def plot_images(X):
if X.ndim == 1:
X = np.array([X])
num_images = X.shape[0]
num_rows = math.floor(math.sqrt(num_images))
num_cols = math.ceil(num_images/num_rows)
for i in range(num_images):
reshaped_image = X[i,:].reshape(28,28)
plt.subplot(num_rows, num_cols, i+1)
plt.imshow(reshaped_image, cmap = cm.Greys_r)
plt.axis('off')
plt.show()
def pick_examples_of(X, Y, labels, total_count):
bool_arr = None
for label in labels:
bool_arr_for_label = (Y == label)
if bool_arr is None:
bool_arr = bool_arr_for_label
else:
bool_arr |= bool_arr_for_label
filtered_x = X[bool_arr]
filtered_y = Y[bool_arr]
return (filtered_x[:total_count], filtered_y[:total_count])
def extract_training_and_test_examples_with_labels(train_x, train_y, test_x, test_y, labels, training_count, test_count):
filtered_train_x, filtered_train_y = pick_examples_of(train_x, train_y, labels, training_count)
filtered_test_x, filtered_test_y = pick_examples_of(test_x, test_y, labels, test_count)
return (filtered_train_x, filtered_train_y, filtered_test_x, filtered_test_y)
def write_pickle_data(data, file_name):
f = gzip.open(file_name, 'wb')
pickle.dump(data, f)
f.close()
def read_pickle_data(file_name):
f = gzip.open(file_name, 'rb')
data = pickle.load(f, encoding='latin1')
f.close()
return data
def get_MNIST_data():
"""
Reads mnist dataset from file
Returns:
train_x - 2D Numpy array (n, d) where each row is an image
train_y - 1D Numpy array (n, ) where each row is a label
test_x - 2D Numpy array (n, d) where each row is an image
test_y - 1D Numpy array (n, ) where each row is a label
"""
train_set, valid_set, test_set = read_pickle_data('../Datasets/mnist.pkl.gz')
train_x, train_y = train_set
valid_x, valid_y = valid_set
train_x = np.vstack((train_x, valid_x))
train_y = np.append(train_y, valid_y)
test_x, test_y = test_set
return (train_x, train_y, test_x, test_y)
def load_train_and_test_pickle(file_name):
train_x, train_y, test_x, test_y = read_pickle_data(file_name)
return train_x, train_y, test_x, test_y
# returns the feature set in a numpy ndarray
def load_CSV(filename):
stuff = np.asarray(np.loadtxt(open(filename, 'rb'), delimiter=','))
return stuff
| apache-2.0 |
dingocuster/scikit-learn | sklearn/utils/tests/test_utils.py | 215 | 8100 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.mocking import MockDataFrame
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
| bsd-3-clause |
maxlikely/scikit-learn | benchmarks/bench_plot_nmf.py | 1 | 5717 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
import gc
from time import time
import numpy as np
from collections import defaultdict
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, R=None):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 10000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
R : integer, optional
random seed
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
if R == "svd":
W, H = _initialize_nmf(V, r)
elif R == None:
R = np.random.mtrand._rand
W = np.abs(R.standard_normal((n, r)))
H = np.abs(R.standard_normal((r, m)))
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if True or (i % 10) == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def compute_bench(samples_range, features_range, rank=50, tolerance=1e-7):
it = 0
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benching nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
print(m.reconstruction_err_, tend)
gc.collect()
print "benching nndsvda-nmf: "
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
print(m.reconstruction_err_, tend)
gc.collect()
print("benching nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
print(m.reconstruction_err_, tend)
gc.collect()
print("benching random-nmf")
tstart = time()
m = NMF(n_components=30, init=None, max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
print(m.reconstruction_err_, tend)
gc.collect()
print("benching alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, R=None, tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
print(np.linalg.norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = compute_bench(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure()
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
enriquecoronadozu/HMPy | src/borrar/test/newmodel2.py | 2 | 7258 | #!/usr/bin/env python
"""@See preprocessed data
"""
from numpy import arange, sin, pi
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
from matplotlib.path import Path
import matplotlib.patches as patches
from numpy import*
from numpy.linalg import*
from scipy import interpolate
from scipy.signal import filtfilt, lfilter
from scipy.signal import medfilt
from scipy.signal import filter_design as ifd
from scipy.stats import multivariate_normal
import scipy.spatial
import numpy as np
from scipy.stats import multivariate_normal
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import os
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import math
from sklearn.mixture import GMM
from GestureModel import*
from Creator import*
from Classifier import*
def plotResults(gr_points,gr_sig, b_points,b_sig,name_model):
from scipy import linalg
import matplotlib.pyplot as plt
gr_points = gr_points.transpose()
b_points = b_points.transpose()
gr_sigma = []
b_sigma = []
n,m = gr_points.shape
maximum = zeros((m))
minimum = zeros((m))
x = arange(0,m,1)
for i in range(m):
gr_sigma.append(gr_sig[i*3:i*3+3])
b_sigma.append(b_sig[i*3:i*3+3])
for i in range(m):
sigma = 3.*linalg.sqrtm(gr_sigma[i])
maximum[i] = gr_points[0,i]+ sigma[0,0];
minimum[i] = gr_points[0,i]- sigma[0,0];
fig2 = plt.figure()
import matplotlib.pyplot as plt
plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 )
plt.plot(x, gr_points[0])
plt.savefig(name_model+ "_gravity_x_axis.png")
for i in range(m):
sigma = 3.*linalg.sqrtm(gr_sigma[i])
maximum[i] = gr_points[1,i]+ sigma[1,1];
minimum[i] = gr_points[1,i]- sigma[1,1];
fig3 = plt.figure()
plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 )
plt.plot(x, gr_points[1])
plt.savefig(name_model+ "_gravity_y_axis.png")
for i in range(m):
sigma = 3.*linalg.sqrtm(gr_sigma[i])
maximum[i] = gr_points[2,i]+ sigma[2,2];
minimum[i] = gr_points[2,i]- sigma[2,2];
fig3 = plt.figure()
import matplotlib.pyplot as plt
plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 )
plt.plot(x, gr_points[2])
plt.savefig(name_model+ "_gravity_z_axis.png")
for i in range(m):
sigma = 3.*linalg.sqrtm(b_sigma[i])
maximum[i] = b_points[0,i]+ sigma[0,0];
minimum[i] = b_points[0,i]- sigma[0,0];
fig4 = plt.figure()
import matplotlib.pyplot as plt
plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 )
plt.plot(x, b_points[0])
plt.savefig(name_model+ "_body_x_axis.png")
for i in range(m):
sigma = 3.*linalg.sqrtm(b_sigma[i])
maximum[i] = b_points[1,i]+ sigma[1,1];
minimum[i] = b_points[1,i]- sigma[1,1];
fig5 = plt.figure()
import matplotlib.pyplot as plt
plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 )
plt.plot(x, b_points[1])
plt.savefig(name_model+ "_body_axis.png")
for i in range(m):
sigma = 3.*linalg.sqrtm(b_sigma[i])
maximum[i] = b_points[2,i]+ sigma[2,2];
minimum[i] = b_points[2,i]- sigma[2,2];
fig6 = plt.figure()
import matplotlib.pyplot as plt
plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 )
plt.plot(x, b_points[2])
plt.savefig(name_model+ "_body_z_axis.png")
def newModel(name,list_files,th=1):
g = Creator()
#Read the data
g.ReadFiles(list_files,[])
g.CreateDatasets_Acc()
g.ObtainNumberOfCluster()
gravity = g.gravity
K_gravity = g.K_gravity
body = g.body
K_body = g.K_body
# 2) define the number of points to be used in GMR
# (current settings allow for CONSTANT SPACING only)
numPoints = amax(gravity[0,:]);
scaling_factor = 10/10;
numGMRPoints = math.ceil(numPoints*scaling_factor);
# 3) perform Gaussian Mixture Modelling and Regression to retrieve the
# expected curve and associated covariance matrices for each feature
gr_points, gr_sigma = g.GetExpected(gravity,K_gravity,numGMRPoints)
b_points, b_sigma = g.GetExpected(body,K_body,numGMRPoints)
savetxt(name+"MuGravity.txt", gr_points,fmt='%.12f')
savetxt(name+"SigmaGravity.txt", gr_sigma,fmt='%.12f')
savetxt(name+"MuBody.txt", b_points,fmt='%.12f')
savetxt(name+"SigmaBody.txt", b_sigma,fmt='%.12f')
def loadModel(file_name, th=1,plot=True):
#Load files
gr_points = loadtxt(file_name+"MuGravity.txt")
gr_sigma = loadtxt(file_name+"SigmaGravity.txt")
b_points = loadtxt(file_name+"MuBody.txt")
b_sigma = loadtxt(file_name+"SigmaBody.txt")
#Add model
gm = GestureModel()
gm.addModel("gravity",gr_points, gr_sigma,th)
gm.addModel("body",b_points, b_sigma,th)
if plot == True:
plotResults(gr_points,gr_sigma, b_points,b_sigma,file_name)
return gm
#For ROS we must stay in the src folder
files1 = ["A/mod(1).txt","A/mod(2).txt","A/mod(3).txt","A/mod(4).txt","A/mod(5).txt","A/mod(6).txt" ]
#newModel("A",files1,th=3)
gm1 = loadModel("A",25)
files2 = ["P/mod(1).txt","P/mod(2).txt","P/mod(3).txt","P/mod(4).txt","P/mod(5).txt", "P/mod(6).txt"]
#newModel("P",files2,th=3)
gm2 = loadModel("P",22)
print "1"
v1 = Classifier()
v2 = Classifier()
w_g, w_b = v1.calculateW(files1,gm1)
print w_g, w_b
gm1.addWeight("gravity",w_g)
gm1.addWeight("body",w_b)
w_g, w_b = v1.calculateW(files2,gm2)
print w_g, w_b
gm2.addWeight("gravity",w_g)
gm2.addWeight("body",w_b)
v1.classify(gm1)
v2.classify(gm2)
sfile = "AP/acc(2).txt"
##poss1 = v1.validate_from_file(sfile, ',')
##poss2 = v2.validate_from_file(sfile, ',')
##
##fig = plt.figure()
##
##m,n = poss1.shape
##print m,n
##
##x = arange(0,m,1)
##import matplotlib.pyplot as plt
##plt.plot(x, poss1)
##
##m,n = poss2.shape
##print m,n
##
##x = arange(0,m,1)
##import matplotlib.pyplot as plt
##plt.plot(x, poss2)
#plt.savefig("result.png")
##print "\n\n 2"
##
##sfile = "D/mod(1).txt"
##v1.validate_from_file(sfile)
data = genfromtxt(sfile, delimiter=',')
numSamples,m = data.shape
p1 = zeros((numSamples,1))
p2 = zeros((numSamples,1))
import time
cont1 = 0
cont2 = 0
th1 = .5
th2 = .5
for i in range (numSamples):
start = time.time()
p1[i] = v1.online_validation(data[i,0],data[i,1],data[i,2])
p2[i] = v2.online_validation(data[i,0],data[i,1],data[i,2])
p1[i] = v1.online_validation(data[i,0],data[i,1],data[i,2])
p2[i] = v2.online_validation(data[i,0],data[i,1],data[i,2])
if(p1[i]>th1):
cont1 = cont1 + 1
if(p2[i]>th2):
cont2 = cont2 + 1
if(cont1 > 5):
print "avanti"
cont1 = 0
if(cont2 > 5):
print "stop"
cont2 = 0
done = time.time()
elapsed = done - start
#print(elapsed)
fig = plt.figure()
m,n = p1.shape
print m,n
x = arange(0,m,1)
import matplotlib.pyplot as plt
plt.plot(x, p1)
m,n = p2.shape
print m,n
x = arange(0,m,1)
import matplotlib.pyplot as plt
plt.plot(x, p2)
plt.savefig("result.png")
| gpl-3.0 |
stefanbuenten/nanodegree | p5/feature_selection/find_signature.py | 1 | 2111 | #!/usr/bin/python
import pickle
import numpy
numpy.random.seed(42)
### The words (features) and authors (labels), already largely processed.
### These files should have been created from the previous (Lesson 10)
### mini-project.
words_file = "../text_learning/your_word_data.pkl"
authors_file = "../text_learning/your_email_authors.pkl"
word_data = pickle.load( open(words_file, "r"))
authors = pickle.load( open(authors_file, "r") )
### test_size is the percentage of events assigned to the test set (the
### remainder go into training)
### feature matrices changed to dense representations for compatibility with
### classifier functions in versions 0.15.2 and earlier
from sklearn import cross_validation
features_train, features_test, labels_train, labels_test = cross_validation.train_test_split(word_data, authors, test_size=0.1, random_state=42)
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
features_train = vectorizer.fit_transform(features_train)
features_test = vectorizer.transform(features_test).toarray()
### a classic way to overfit is to use a small number
### of data points and a large number of features;
### train on only 150 events to put ourselves in this regime
features_train = features_train[:150].toarray()
labels_train = labels_train[:150]
### your code goes here
# setup decision tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
clf = DecisionTreeClassifier()
# fit tree
clf.fit(features_train, labels_train)
# predict
pred = clf.predict(features_test)
# print accuracy
print "accuracy: %f" % accuracy_score(labels_test, pred)
# get sorted feature importance
feature_importance = clf.feature_importances_
# get most important features and their position
most_important = [[ix, i] for ix, i in enumerate(feature_importance) if i > 0.2]
print most_important
# get words behind most important features
feature_names = vectorizer.get_feature_names()
print feature_names[most_important[0][0]]
| mit |
maheshakya/scikit-learn | sklearn/tests/test_pipeline.py | 17 | 12512 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import BaseEstimator, clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(BaseEstimator):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
""" Test the various init parameters of the pipeline.
"""
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params()
params2 = pipe2.get_params()
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
""" Test the various methods of the pipeline (anova).
"""
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
"""Test that the pipeline can take fit parameters
"""
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_methods_pca_svm():
"""Test the various methods of the pipeline (pca + svm)."""
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
"""Test the various methods of the pipeline (preprocessing + svm)."""
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
| bsd-3-clause |
rs2/pandas | pandas/tests/series/methods/test_at_time.py | 2 | 2629 | from datetime import time
import numpy as np
import pytest
from pandas._libs.tslibs import timezones
from pandas import DataFrame, Series, date_range
import pandas._testing as tm
class TestAtTime:
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_localized_at_time(self, tzstr):
tz = timezones.maybe_get_tz(tzstr)
rng = date_range("4/16/2012", "5/1/2012", freq="H")
ts = Series(np.random.randn(len(rng)), index=rng)
ts_local = ts.tz_localize(tzstr)
result = ts_local.at_time(time(10, 0))
expected = ts.at_time(time(10, 0)).tz_localize(tzstr)
tm.assert_series_equal(result, expected)
assert timezones.tz_compare(result.index.tz, tz)
def test_at_time(self):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
assert (rs.index.hour == rng[1].hour).all()
assert (rs.index.minute == rng[1].minute).all()
assert (rs.index.second == rng[1].second).all()
result = ts.at_time("9:30")
expected = ts.at_time(time(9, 30))
tm.assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.loc[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
result.index = result.index._with_freq(None)
tm.assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.loc["1/4/2000":]
result = chunk.loc[time(9, 30)]
expected = result_df[-1:]
# Without resetting the freqs, these are 5 min and 1440 min, respectively
result.index = result.index._with_freq(None)
expected.index = expected.index._with_freq(None)
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range("1/1/2000", "1/31/2000")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
tm.assert_series_equal(result, ts)
# time doesn't exist
rng = date_range("1/1/2012", freq="23Min", periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time("16:00")
assert len(rs) == 0
def test_at_time_raises(self):
# GH20725
ser = Series("a b c".split())
msg = "Index must be DatetimeIndex"
with pytest.raises(TypeError, match=msg):
ser.at_time("00:00")
| bsd-3-clause |
kristoforcarlson/nest-simulator-fork | doc/nest_by_example/scripts/one_neuron_with_sine_wave.py | 13 | 1515 | # -*- coding: utf-8 -*-
#
# one_neuron_with_sine_wave.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import nest.voltage_trace
nest.ResetKernel()
neuron = nest.Create('iaf_neuron')
sine = nest.Create('ac_generator', 1,
{'amplitude': 100.0,
'frequency': 2.0})
noise = nest.Create('poisson_generator', 2,
[{'rate': 70000.0},
{'rate': 20000.0}])
voltmeter = nest.Create('voltmeter',1,
{'withgid': True})
nest.Connect(sine, neuron)
nest.Connect(voltmeter, neuron)
nest.Connect(noise[:1], neuron, syn_spec={'weight': 1.0, 'delay': 1.0})
nest.Connect(noise[1:], neuron, syn_spec={'weight': -1.0, 'delay': 1.0})
nest.Simulate(1000.0)
nest.voltage_trace.from_device(voltmeter)
import matplotlib.pyplot as plt
plt.savefig('../figures/voltage_trace.eps')
| gpl-2.0 |
mlyundin/scikit-learn | examples/cluster/plot_color_quantization.py | 297 | 3443 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1]
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
petosegan/scikit-learn | examples/model_selection/plot_train_error_vs_test_error.py | 349 | 2577 | """
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
| bsd-3-clause |
Athemis/PyDSF | ui/mplwidget.py | 1 | 2733 | from PyQt5 import QtWidgets
from PyQt5.QtCore import QCoreApplication
from matplotlib.backends.backend_qt5agg import (FigureCanvasQTAgg as
FigureCanvas)
from matplotlib.backends.backend_qt5agg import (NavigationToolbar2QT as
NavigationToolbar)
from matplotlib.figure import Figure
_translate = QCoreApplication.translate
class MplCanvas(FigureCanvas):
def __init__(self, parent=None, width=4, height=5, dpi=100):
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.ax = self.fig.add_subplot(111)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self, QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
# override mouseMoveEvent with non-functional dummy
# this will prevent the gui thread to hang while moving the mouse
# while a large number of plots is shown simultaniously
def mouseMoveEvent(self, event):
pass
def clear(self):
self.ax.clear()
self.fig.clear()
def save(self, filename):
try:
self.fig.savefig(filename, dpi=300)
except IOError:
QtWidgets.QMessageBox.critical(
self, _translate("MainWindow", "Error"),
_translate("MainWindow", "Error saving figure! Please check "
"permissions/free space of target path!"),
QtWidgets.QMessageBox.Close, QtWidgets.QMessageBox.Close)
class CustomNavigationToolbar(NavigationToolbar):
toolitems = (
(_translate("CustomNavigationToolbar", "Save"),
_translate("CustomNavigationToolbar",
"Save the figure"), "filesave",
"save_figure"),
(_translate("CustomNavigationToolbar", "Subplots"),
_translate("CustomNavigationToolbar",
"Configure subplots"), "subplots",
"configure_subplots"),
(None, None, None, None), )
def __init__(self, canvas, parent, coordinates=True):
NavigationToolbar.__init__(self, canvas, parent,
coordinates=coordinates)
class MplWidget(QtWidgets.QGraphicsView):
def __init__(self, parent=None):
QtWidgets.QGraphicsView.__init__(self, parent)
self.canvas = MplCanvas()
self.ntb = CustomNavigationToolbar(self.canvas, self,
coordinates=False)
self.vbl = QtWidgets.QVBoxLayout()
self.vbl.addWidget(self.ntb)
self.vbl.addWidget(self.canvas)
self.setLayout(self.vbl)
| mit |
StanczakDominik/PythonBiotSavart | biot.py | 1 | 30332 | from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
from numpy import pi, sin, cos, mgrid
from mayavi import mlab
import os.path
import scipy.spatial
import sys
import shutil
import h5py
import pdb
import sys
#Grid parameters
NGRID=50
NZGRID=NGRID
#Region parameters
xmax=0.016
xmin=-xmax
ymax=xmax
ymin=-ymax
zmax=0.1/2.
zmin=-zmax
#Wire parameters for Biot-Savart
N_wires=1
r_wires = 0.008
wire_current=1.e6/N_wires
N=int(NGRID*12.5) #wire segments
low_cutoff_distance=0.0000000000001
display_every_n_point=1 #display every n point vectors of magnetic field
#Simulation parameters
N_particles=10
N_interpolation=8
velocity_scaling=1e6 #for random selection of initial velocity
dt=0.01/velocity_scaling
#Physical constants. All units in SI
electron_charge = -1.60217657e-19
electron_mass = 9.10938291e-31
deuteron_mass = 3.343583719e-27
qmratio=-electron_charge/deuteron_mass
MU=4e-7*np.pi
######Folder name management#################
number_of_arguments = len(sys.argv)
if number_of_arguments==1:
folder_name=""
else:
folder_name=str(sys.argv[1]) +"/"
if not os.path.isdir(folder_name):
os.mkdir(folder_name)
shutil.copy2('biot.py',folder_name)
shutil.copy2('plot.py',folder_name)
shutil.copy2('plot.py',folder_name)
def append_to_file(file, array):
"""TO BE WRITTEN"""
# print("Array begins with")
# print(array[:3,:])
# print("Array ends with")
# print(array[-3:,:])
# length = len(array)
with open(file, 'ab') as file:
np.savetxt(file, array)
# print("Successfully appended an array of length %d" % length)
#########Grid functions####################
def nonuniform_grid():
"""TO BE WRITTEN"""
x,dx=np.linspace(xmin,xmax,NGRID,retstep=True)
y,dy=np.linspace(ymin,ymax,NGRID,retstep=True)
z,dz=np.linspace(zmin,zmax,NZGRID,retstep=True)
grid_positions=np.zeros((NGRID**2*NZGRID,3))
for ix, vx in enumerate(x):
for iy, vy in enumerate(y):
for iz, vz in enumerate(z):
row = NZGRID*NGRID*ix+NZGRID*iy+iz
grid_positions[row, 0] = vx
grid_positions[row, 1] = vy
grid_positions[row, 2] = vz
return grid_positions, dx, dy, dz
def uniform_grid():
"""TO BE WRITTEN"""
dx = dy = (ymax - ymin)/NGRID
dz = (zmax-zmin)/NZGRID
step_size=min([dx, dz])
x = y = np.arange(xmin, xmax, step_size)
z = np.arange(zmin, zmax, step_size)
NGRID_local = len(x)
NZGRID_local = len(z)
grid_positions=np.zeros((NGRID_local**2*NZGRID_local,3))
for ix, vx in enumerate(x):
for iy, vy in enumerate(y):
for iz, vz in enumerate(z):
row = NZGRID_local*NGRID_local*ix+NZGRID_local*iy+iz
grid_positions[row, 0] = vx
grid_positions[row, 1] = vy
grid_positions[row, 2] = vz
return grid_positions, dx, dy, dz
def load_grid(grid_calculation_function, mode_name=""):
"""TO BE WRITTEN"""
if(os.path.isfile(folder_name+mode_name+"grid_positions.dat")):
grid_positions=np.loadtxt(folder_name+mode_name+"grid_positions.dat")
dx, dy, dz = np.loadtxt(folder_name + mode_name + "step_sizes.dat")
print("Loaded grid positions")
else:
grid_positions, dx, dy, dz = grid_calculation_function()
np.savetxt(folder_name + mode_name + "grid_positions.dat", grid_positions)
step_sizes = np.array((dx, dy, dz))
np.savetxt(folder_name + mode_name + "step_sizes.dat", step_sizes)
print("Saved grid positions")
return grid_positions, dx, dy, dz
#########Magnetic field functions#########
def exact_ramp_field_grid(grid_positions, N_wires = 1, r_wires=r_wires,mode_name="", N=N):
"""TO BE WRITTEN"""
print("Calculating field via exact linear ramp formula")
B_constant_outside = MU*wire_current/2./np.pi
B_constant_inside = B_constant_outside/r_wires**2
grid_B = np.empty_like(grid_positions)
distances = np.sqrt(np.sum(grid_positions[:,:2]**2, axis=1))
distances_squared=distances**2
indices_inside = distances < r_wires
indices_outside= ~indices_inside
orientation=grid_positions/distances[:,np.newaxis]**2
grid_B[indices_inside,0] = B_constant_inside * grid_positions[indices_inside,1]
grid_B[indices_inside,1] = -B_constant_inside * grid_positions[indices_inside,0]
grid_B[indices_outside,0] = B_constant_outside * orientation[indices_outside,1]
grid_B[indices_outside,1] = -B_constant_outside * orientation[indices_outside,0]
grid_B[:,2] = 0.
# low_cutoff_indices=distances<low_cutoff_distance
# indices_cut_off=np.sum(low_cutoff_indices)
# if(indices_cut_off>0):
# grid_B[low_cutoff_indices, :] = 0
grid_B[np.isinf(grid_B)] = 0
grid_B[np.isnan(grid_B)] = 0
return grid_B
def exact_single_wire_field_grid(grid_positions, N_wires = 1, r_wires=r_wires,mode_name="", N=N):
"""TO BE WRITTEN"""
print("Calculating field via exact single wire ramp formula")
B0 = MU*wire_current/2./np.pi
grid_B = np.empty_like(grid_positions)
distances = np.sqrt(np.sum(grid_positions[:,:2]**2, axis=1))
orientation=(grid_positions/np.dstack((distances, distances, distances)))[0]
grid_B[:,0] = -B0 / distances*orientation[:,1]
grid_B[:,1] = B0 / distances*orientation[:,0]
grid_B[:,2] = 0.
low_cutoff_indices=distances<low_cutoff_distance
indices_cut_off=np.sum(low_cutoff_indices)
if(indices_cut_off>0):
grid_B[low_cutoff_indices, :] = 0
grid_B[np.isinf(grid_B)] = 0
grid_B[np.isnan(grid_B)] = 0
return grid_B
def biot_savart_field_grid(grid_positions, N_wires=6, r_wires=0.08, wire_current=1e6, mode_name="", N=N):
"""TO BE WRITTEN"""
print("Calculating field via Biot Savart")
grid_B=np.empty_like(grid_positions)
for i in range(N_wires):
angle = 2*i*np.pi/N_wires
x_wire_pos=r_wires*np.cos(angle)
y_wire_pos=r_wires*np.sin(angle)
z_wire=np.linspace(zmin,zmax,N)
x_wire=np.ones_like(z_wire)*x_wire_pos
y_wire=np.ones_like(z_wire)*y_wire_pos
wire = np.vstack((x_wire, y_wire, z_wire)).T
wire_gradient = np.gradient(wire)[0]
wire_length = np.sqrt(np.sum(wire_gradient**2, axis=1))
wire_gradient *= np.vstack((wire_length, wire_length, wire_length)).T
for index, wire_segment in enumerate(wire):
wire_segment_length = wire_gradient[index,:]
rprime=(grid_positions-wire_segment)
distances = np.sum(rprime**2, axis=1)**(3./2.)
denominator = np.vstack((distances, distances, distances)).T
differential=np.cross(wire_segment_length, rprime)/denominator*wire_current
low_cutoff_indices=distances<low_cutoff_distance
indices_cut_off=np.sum(low_cutoff_indices)
if(indices_cut_off>0):
differential[low_cutoff_indices, :] = 0
grid_B += differential*MU/np.pi/(4)
grid_B[np.isinf(grid_B)] = np.nan
grid_B*=N*10 # a correction factor to get the proper result - no idea why!
return grid_B
def load_field(field_generation_function, grid_positions, field_mode_name="", grid_mode_name="", N_wires=N_wires, r_wires=r_wires, N=N):
"""TO BE WRITTEN"""
if(os.path.isfile(folder_name+grid_mode_name+field_mode_name+"grid_B.dat")):
grid_B=np.loadtxt(folder_name+grid_mode_name+field_mode_name+"grid_B.dat")
print("Loaded grid fields")
else:
grid_B=field_generation_function(N_wires=N_wires, r_wires=r_wires, mode_name=field_mode_name, grid_positions=grid_positions)
np.savetxt(folder_name+grid_mode_name+field_mode_name+"grid_B.dat", grid_B)
print("Saved grid fields")
return grid_B
###########Solving fields at particle positions
def field_interpolation(r, N_interpolation=N_interpolation):
"""TO BE WRITTEN"""
distances, indices = mytree.query(r, k=N_interpolation)
weights =1./(distances)**8
sum_weights=np.sum(weights)
local_B=grid_B[indices]
interpolated_BX = np.sum(local_B[:,0]*weights)/sum_weights
interpolated_BY = np.sum(local_B[:,1]*weights)/sum_weights
interpolated_BZ = np.sum(local_B[:,2]*weights)/sum_weights
array = np.array([interpolated_BX,interpolated_BY,interpolated_BZ])
return array
def exact_ramp_field(r, N_interpolation = N_interpolation):
"""TO BE WRITTEN"""
B=np.empty_like(r)
B_constant_outside = MU*wire_current/2./np.pi
B_constant_inside = B_constant_outside/r_wires**2
distances = np.sqrt(np.sum(r[:,:2]**2,axis=1))
orientation=r/distances[:, np.newaxis]**2
index = distances<r_wires
not_index=np.logical_not(index)
B[index,0]=B_constant_inside*r[index,1]
B[index,1]=-B_constant_inside*r[index,0]
B[not_index,0]=B_constant_outside*orientation[not_index,1]
B[not_index,1]=-B_constant_outside*orientation[not_index,0]
B[np.isinf(B)] = 0
B[np.isnan(B)] = 0
return B
def exact_single_wire_field(r, N_interpolation = N_interpolation):
"""TO BE WRITTEN"""
B=np.empty_like(r)
B0 = MU*wire_current/2./np.pi
distances = np.sqrt(np.sum(r[:,:2]**2, axis=1))
orientation=r/distances
B[:,0] = B0 / distances*orientation[:,1]
B[:,1] = -B0 / distances*orientation[:,0]
B[np.isinf(B)] = 0
B[np.isnan(B)] = 0
return B
def test_exact_ramp_field():
N=100000
radial_distances = np.linspace(xmax/100000., xmax, N)
r=np.empty((N, 3))
r[:,0] = radial_distances
r[:,1:]=0.
B = exact_ramp_field(r)
B_magnitude=np.sqrt(np.sum(B**2, axis=1))
plt.plot(radial_distances, B_magnitude, "ko-.")
plt.show()
def test_z_field(r, N_interpolation = N_interpolation):
"""TO BE WRITTEN"""
B = np.zeros_like(r)
B[:,2]=1.
return B
############Random sampling
def old_position_sampler(N_particles):
"""Samples a uniform distribution of positions, scaled in the XY plane
and in the Z direction"""
r=np.random.random((N_particles,3))
r[:,:2]=r[:,:2]*(xmax-xmin)+xmin
r[:,2] = r[:,2]*(zmax-zmin)+zmin
r/=2.
return r
def old_velocity_sampler(N_particles, velocity_scaling, z_velocity=None, v_thermal=None):
"""Samples a uniform distribution of velocities with different
ranges in the XY plane and in the Z direction."""
v=np.random.random((N_particles,3))
v[:,:2]=(v[:,:2]*(xmax-xmin)+xmin)*velocity_scaling
v[:,2]=(v[:,2]*(zmax-zmin)+zmin)*velocity_scaling
return v
def bottom_position_sampler(N_particles):
"""Samples a uniform distribution of particles in the XY plane and a preset
positions in the Z direction, near the bottom of the simulation area"""
r=np.random.random((N_particles,3))
r[:,:2]=r[:,:2]*(xmax-xmin)+xmin
r/=2.
r[:,2] = 0.1*(zmax-zmin)+zmin
return r
def directional_velocity_sampler(N_particles, velocity_scaling, z_velocity=1e4, v_thermal=None):
"""Samples a random velocity in XY plane and a preset, upwards velocity in
the Z direction. Works best with particles spawning close to zmin"""
v = np.random.random((N_particles,3))
v[:,:2]=(v[:,:2]*(xmax-xmin)+xmin)*velocity_scaling/(xmax-xmin)
v[:,2] = z_velocity
return v
def directional_preset_magnitude_velocity_sampler(**kwargs):
"""Takes N_particles, v_thermal, z_velocity.
Samples only a random direction"""
v = np.empty((N_particles, 3))
v[:,2] = z_velocity
xy_velocity_magnitude=np.sqrt(v_thermal**2-z_velocity**2)
random_angles=2*np.pi()*np.random.random(N_particles)
v[:,0]=np.cos(random_angles)
v[:,1]=np.sin(random_angles)
v[:,:2]*=xy_velocity_magnitude
return v
def maxwellian_velocity_sampler():
pass
############Particle pushing algorithms
def boris_step(r, v, dt, calculate_field, N_interpolation=N_interpolation):
"""TO BE WRITTEN"""
field = calculate_field(r, N_interpolation = N_interpolation)
t = qmratio*field*dt/2.
vprime = v + np.cross(v,t, axis=1)
s = 2*t/(1.+np.sum(t*t))
dv = np.cross(vprime,s, axis=1)
v += dv
dr=v*dt
r+=dr
return r,v
def RK4_step(r,v,dt, calculate_field, N_interpolation=N_interpolation):
"""TO BE WRITTEN"""
field1 = calculate_field(r, N_interpolation = N_interpolation)
k1v = qmratio*np.cross(v,field1, axis=1)
k1r = v
r2 = r + k1r*dt/2.
v2 = v + k1v*dt/2.
field2 = calculate_field(r2)
k2v = qmratio*np.cross(v2,field2, axis=1)
k2r = v2
r3 = r + k2r*dt/2.
v3 = v + k2v*dt/2.
field3 = calculate_field(r3)
k3v = qmratio*np.cross(v3, field3, axis=1)
k3r = v3
r4 = r + k3r*dt
v4 = v + k3v*dt
field4 = calculate_field(r4)
k4v = qmratio*np.cross(v4, field4, axis=1)
k4r = v4
r += dt/6.*(k1r+2*(k2r+k3r)+k4r)
v += dt/6.*(k1v+2*(k2v+k3v)+k4v)
return r,v
def particle_loop(pusher_function, field_calculation_function, mode_name, N_particles,
N_iterations, save_every_n_iterations=10, save_velocities=False, seed=1,
N_interpolation=N_interpolation, continue_run=False, dt=dt, preset_r=None, preset_v=None,
velocity_sampler=old_velocity_sampler, position_sampler=old_position_sampler, preset_z_velocity=1e3, v_thermal=1.5e3,
velocity_scaling=1e3):
"""TO BE WRITTEN"""
print("""
Running simulation of mode %s with %d particles.
Pusher algorithm is %s.
Field is calculated using %s.
%d iterations with timestep %e, %d particles.
Saves data every %d iterations. Random seed is %d.
preset_r is %s, preset_v is %s
velocity sampler is %s, position sampler is %s
preset_z_velocity is %s""" %(mode_name, N_particles,
pusher_function.__name__,
field_calculation_function.__name__,
N_iterations, dt, N_particles,
save_every_n_iterations, seed,
preset_r, preset_v,
velocity_sampler.__name__, position_sampler.__name__,
preset_z_velocity))
if continue_run: print(" This is a continued run.")
if save_velocities: print(" Velocities are saved.")
print("\n")
np.random.seed(seed)
N_iterations=int(N_iterations)
N_particles=int(N_particles)
Dump_every_N_iterations=N_iterations/100
total_data_length = int(N_iterations/save_every_n_iterations)
with h5py.File(folder_name+mode_name+".hdf5", 'w') as loop_file:
loop_file.attrs['pusher_function'] = str(pusher_function)
loop_file.attrs['field_calculation_function'] = str(field_calculation_function)
loop_file.attrs['N_particles'] = N_particles
loop_file.attrs['N_iterations'] = N_iterations
loop_file.attrs['save_every_n_iterations'] = save_every_n_iterations
loop_file.attrs['seed'] = seed
loop_file.attrs['N_interpolation'] = N_interpolation
loop_file.attrs['continue_run'] = continue_run
loop_file.attrs['dt'] = dt
loop_file.attrs['position_sampler'] = str(position_sampler)
loop_file.attrs['velocity_sampler'] = str(velocity_sampler)
#TODO LEFT WORK HERE
if preset_r is not None:
loop_file.attrs['preset_r'] = preset_r
else:
loop_file.attrs['preset_r'] = "No preset r"
if preset_v is not None:
loop_file.attrs['preset_v'] = preset_v
else:
loop_file.attrs['preset_v'] = "No preset v"
positions_dataset = loop_file.create_dataset("positions", (N_particles,3,total_data_length), dtype='float')
velocities_dataset = loop_file.create_dataset("velocities", (N_particles,3,total_data_length), dtype='float')
if preset_r is not None:
#if there are preset initial conditions (N_particles should be 1 for this case)
r=preset_r
else:
#generate initial conditions at random
r=position_sampler(N_particles)
if preset_v is not None:
v=preset_r
else:
v=velocity_sampler(N_particles, velocity_scaling, preset_z_velocity, v_thermal)
positions_dataset.attrs['starting_position']=r
velocities_dataset.attrs['starting_velocity']=v
print(r)
print(v)
if (pusher_function==boris_step):
dummy, v = pusher_function(r,v,-dt/2., field_calculation_function)
ended_on_region_exit=False
for i in xrange(N_iterations):
#Enter loop
if not i%Dump_every_N_iterations:
sys.stdout.write('\r')
sys.stdout.write("%.1f%% done, iteration %d out of %d"%(i/N_iterations*100., i,N_iterations))
sys.stdout.flush()
counter_to_save_data=i%save_every_n_iterations
#Push position and velocity
running_particles=np.isfinite(r[:,0])*np.isfinite(r[:,1])*np.isfinite(r[:,2])
if np.sum(running_particles)<1:
# if not np.any(running_particles):
print("All of the particles have run out!")
positions_dataset[:,:,data_save_index+1:]=np.nan
velocities_dataset[:,:,data_save_index+1:]=np.nan
break
try:
r[running_particles],v[running_particles] = pusher_function(r[running_particles],v[running_particles],dt, field_calculation_function, N_interpolation=N_interpolation)
except IndexError:
pdb.set_trace()
# r[running_particles],v[running_particles] = pusher_function(r[running_particles],v[running_particles],dt, field_calculation_function, N_interpolation=N_interpolation)
#Check for particle leaving region
x_iter, y_iter, z_iter = r[:,0], r[:,1], r[:,2]
if not counter_to_save_data:
data_save_index=i//save_every_n_iterations
positions_dataset[:,:,data_save_index]=r
velocities_dataset[:,:,data_save_index]=v
# use isnan for removed particles
ran_out_x=np.logical_or(x_iter<xmin,x_iter>xmax)
ran_out_y=np.logical_or(y_iter<ymin,y_iter>ymax)
ran_out_z=np.logical_or(z_iter<zmin,z_iter>zmax)
ran_out=np.logical_or(ran_out_x, np.logical_or(ran_out_y,ran_out_z))
# pdb.set_trace()
r[ran_out,:]=np.nan
v[ran_out,:]=np.nan
# if x_iter > xmax or x_iter < xmin or y_iter > ymax or y_iter < ymin or z_iter > zmax or z_iter < zmin:
# print("Ran out of the area at i=" + str(i))
# ended_on_region_exit = True #prevent program from saving position after leaving the loop
# break #quit the for loop
print("Push finished.")
##########################Diagnostics
def calculate_variances(exact_trajectory, trial_trajectory):
"""TO BE REDONE"""
lengths = (len(exact_trajectory), len(trial_trajectory))
min_len=min(lengths)
return np.sum((exact_trajectory[:min_len]-trial_trajectory[:min_len])**2, axis=1)
compared_trajectories_number=0
def compare_trajectories(exact_trajectory, trial_trajectory):
"""TO BE REDONE"""
global compared_trajectories_number
variances = calculate_variances(exact_trajectory, trial_trajectory)
sum_of_variances = np.sum(variances)
plt.plot(variances)
plt.title("Total variance = " + str(sum_of_variances))
plt.ylabel("square difference")
plt.xlabel("iterations")
plt.savefig(folder_name + "Trajectory_comparison" + str(compared_trajectories_number)+".png")
compared_trajectories_number+=1
plt.clf()
return sum_of_variances
def test_pusher_algorithms():
"""TO BE REDONE"""
#Simulation parameters
N_iterations=int(1e8)
Dump_every_N_iterations=int(1e6)
N_particles=1
N_interpolation=8
velocity_scaling=1e6 #for random selection of initial velocity
dt=0.01/velocity_scaling
seed=1
dt=1e-10
RK4_path = particle_loop(pusher_function=RK4_step, field_calculation_function = test_z_field,
mode_name = "RK4", N_particles = N_particles, N_iterations=int(N_iterations),seed=seed,
save_velocities=True, continue_run=False, dt=dt, preset_r=np.array([0.008, 0., 0.]), preset_v=np.array([0,1000.,0]))
boris_path = particle_loop(pusher_function=boris_step, field_calculation_function = test_z_field,
mode_name = "boris", N_particles = N_particles, N_iterations=int(N_iterations),seed=seed,
save_velocities=True, continue_run=False, dt=dt, preset_r=np.array([0.008, 0., 0.]), preset_v=np.array([0,1000.,0]))
print("Finished calculation.")
RK4_x = RK4_path[:,0]
RK4_y = RK4_path[:,1]
boris_x = boris_path[:,0]
boris_y = boris_path[:,1]
plt.plot(RK4_x, RK4_y, "ro-", label="RK4")
plt.plot(boris_x, boris_y, "bo-", label="Boris")
plt.grid()
plt.legend()
plt.show()
print("Finished display")
plot_energies("boris", "RK4")
#####################Visualization
def display_wires(N_wires=1, r_wires=0):
"""TO BE REDONE"""
print("Loading wires")
for i in range(N_wires):
angle = 2*i*np.pi/N_wires
x_wire_pos=r_wires*np.cos(angle)
y_wire_pos=r_wires*np.sin(angle)
z_wire=np.linspace(zmin,zmax,N)
x_wire=np.ones_like(z_wire)*x_wire_pos
y_wire=np.ones_like(z_wire)*y_wire_pos
mlab.plot3d(x_wire,y_wire,z_wire, np.zeros_like(z_wire), tube_radius=None)
def display_quiver(grid_mode_name="", field_mode_name="", display_every_n_point=1):
"""TO BE REDONE"""
print("Loading quiver")
grid_positions=np.loadtxt(folder_name+grid_mode_name+"grid_positions.dat")
grid_B=np.loadtxt(folder_name+grid_mode_name+field_mode_name+"grid_B.dat")
x_display=grid_positions[::display_every_n_point,0]
y_display=grid_positions[::display_every_n_point,1]
z_display=grid_positions[::display_every_n_point,2]
bx_display=grid_B[::display_every_n_point,0]
by_display=grid_B[::display_every_n_point,1]
bz_display=grid_B[::display_every_n_point,2]
quiver=mlab.quiver3d(x_display, y_display, z_display, bx_display, by_display, bz_display, opacity=0.01)
# mlab.vectorbar(quiver, orientation='vertical')
def display_difference_quiver(grid1, grid2, display_every_n_point=1):
"""TO BE REDONE"""
x_display=grid_positions[::display_every_n_point,0]
y_display=grid_positions[::display_every_n_point,1]
z_display=grid_positions[::display_every_n_point,2]
grid1fig = mlab.figure()
grid1plot=mlab.quiver3d(x_display, y_display, z_display, grid1[:,0], grid1[:,1], grid1[:,2], opacity = 0.2, figure=grid1fig, colormap="Blues")
grid2fig = mlab.figure()
grid2plot=mlab.quiver3d(x_display, y_display, z_display, grid2[:,0], grid2[:,1], grid2[:,2], opacity = 0.2, figure=grid2fig, colormap="Reds")
scale=np.max(grid2)/np.max(grid1)
print("======SCALE " + str(scale) + "==========")
# grid1*=scale
difference = grid1-grid2
bx_display=difference[::display_every_n_point,0]
by_display=difference[::display_every_n_point,1]
bz_display=difference[::display_every_n_point,2]
difffig = mlab.figure()
diffplot=mlab.quiver3d(x_display, y_display, z_display, bx_display, by_display, bz_display, opacity = 0.2, figure=difffig)
mlab.quiver3d(x_display, y_display, z_display, grid2[:,0], grid2[:,1], grid2[:,2], opacity = 0.2, figure=grid2fig)
mlab.colorbar(diffplot)
mlab.colorbar(grid1plot)
mlab.colorbar(grid2plot)
return scale
def plot_xy_positions(*args):
"""Displays particle trajectories in the XY plane using Matplotlib.
Takes in (mode_name, style) pairs such as
("boris", "bo-"), ("rk4", "ro-")
and displays each mode using the given pyplot line style.
"""
for mode_name, style in args:
with h5py.File(folder_name+mode_name+".hdf5", "r") as f:
particle_positions=f['positions']
for particle in xrange(f.attrs['N_particles']):
x=particle_positions[particle,0,:]
y=particle_positions[particle,1,:]
plt.plot(x,y, style, label=mode_name)
plt.grid()
plt.legend()
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.xlabel("x [m]")
plt.ylabel("y [m]")
plt.show()
def plot_xz_positions(*args):
"""Displays particle trajectories in the XZ plane using Matplotlib.
Takes in (mode_name, style) pairs such as
("boris", "bo-"), ("rk4", "ro-")
and displays each mode using the given pyplot line style.
"""
for mode_name, style in args:
with h5py.File(folder_name+mode_name+".hdf5", "r") as f:
particle_positions=f['positions']
for particle in xrange(f.attrs['N_particles']):
x=particle_positions[particle,0,:]
z=particle_positions[particle,2,:]
plt.plot(x,z, style, label=mode_name)
plt.grid()
plt.legend()
plt.xlim(xmin, xmax)
plt.ylim(zmin, zmax)
plt.xlabel("x [m]")
plt.ylabel("z [m]")
plt.show()
def display_particles(modes, particles=None):
"""Displays particle trajectories from hdf5 files.
Takes in (mode_name, style) pairs such as
("boris", "Blues"), ("rk4", "Reds)
and displays each mode using the given mayavi colormap name."""
print(modes)
for mode_name, style in modes:
print("Displaying particles from mode " + mode_name)
with h5py.File(folder_name+mode_name+".hdf5", "r") as f:
particle_positions=f['positions']
if particles==None:
particle_set=xrange(f.attrs['N_particles'])
else:
particle_set=particles
for particle in particle_set:
x=particle_positions[particle,0,:]
y=particle_positions[particle,1,:]
z=particle_positions[particle,2,:]
finite_indices=np.isfinite(z)
x=x[finite_indices]
y=y[finite_indices]
z=z[finite_indices]
time = np.arange(len(z))
plot = mlab.plot3d(x, y, z, time, colormap=style, tube_radius=None)
mlab.colorbar(plot)
print("Finished displaying particles")
def display_particles_velocity_magnitude(*args):
"""Displays particle trajectories from hdf5 files.
Takes in (mode_name, style) pairs such as
("boris", "Blues"), ("rk4", "Reds)
and displays each mode using the given mayavi colormap name."""
for mode_name, style in args:
print("Displaying particles from mode " + mode_name + " as velocity quivers")
with h5py.File(folder_name+mode_name+".hdf5", "r") as f:
particle_positions=f['positions']
particle_velocities=f['velocities']
for particle in xrange(f.attrs['N_particles']):
x=particle_positions[particle,0,:]
y=particle_positions[particle,1,:]
z=particle_positions[particle,2,:]
finite_indices=np.isfinite(z)
x=x[finite_indices]
y=y[finite_indices]
z=z[finite_indices]
v_magnitude = np.sqrt(np.sum(particle_velocities[particle,:,:]**2, axis=0))
v_magnitude=v_magnitude[finite_indices,...]
plot = mlab.plot3d(x, y, z, v_magnitude, colormap=style, tube_radius=None)
mlab.colorbar(plot, label_fmt='%.6f')
print("Finished displaying particles")
def display_particles_radial_distance(*args):
"""Displays particle trajectories from hdf5 files.
Takes in (mode_name, style) pairs such as
("boris", "Blues"), ("rk4", "Reds)
and displays each mode using the given mayavi colormap name."""
for mode_name, style in args:
print("Displaying particles from mode " + mode_name + " as velocity quivers")
with h5py.File(folder_name+mode_name+".hdf5", "r") as f:
particle_positions=f['positions']
particle_velocities=f['velocities']
for particle in xrange(f.attrs['N_particles']):
x=particle_positions[particle,0,:]
y=particle_positions[particle,1,:]
z=particle_positions[particle,2,:]
finite_indices=np.isfinite(z)
x=x[finite_indices]
y=y[finite_indices]
z=z[finite_indices]
radial_distance = np.sqrt(x**2+y**2)
plot = mlab.plot3d(x, y, z, radial_distance, colormap=style, tube_radius=None)
mlab.colorbar(plot, label_fmt='%.6f')
print("Finished displaying particles")
def plot_energies(*args):
"""TO BE REDONE"""
for mode_name, style in args:
print(mode_name, style)
print("Displaying particle energies from mode " + mode_name)
with h5py.File(folder_name+mode_name+".hdf5","r") as f:
particle_velocities=f['velocities']
time=np.arange(f.attrs['N_iterations']/f.attrs['save_every_n_iterations'])*f.attrs['dt']
energies = np.sum(particle_velocities[...]**2, axis=1) * deuteron_mass/2.
for particle in xrange(f.attrs['N_particles']):
particle_energy=energies[particle,:]
plt.plot(time,particle_energy, style, label="%s p.%d"%(mode_name, particle))
change_in_particle_energy=particle_energy[-1]-particle_energy[0]
relative_change_in_particle_energy=change_in_particle_energy/particle_energy[0]
print("Particle %d"%particle)
print("Initial energy %e"%particle_energy[0])
print("Energy changed by %e"%change_in_particle_energy)
print("Relative change in energy: %e"%relative_change_in_particle_energy)
plt.grid()
plt.legend()
plt.xlabel("Time [s]")
plt.ylabel("Energy [J]")
plt.show()
def read_parameters(*args):
"""TO BE REDONE"""
for mode_name in args:
print("MODE : %s"%mode_name)
with h5py.File(folder_name+mode_name+".hdf5","r") as f:
for key, value in f.attrs.items():
print("%s : %s" %(str(key), str(value)))
def load_particle_trajectory(mode_name=""):
"""TO BE REDONE"""
print("Loagin particle from mode " + mode_name)
particle_file_name=folder_name+mode_name+"0positions.dat"
if(os.path.isfile(particle_file_name)):
positions=np.loadtxt(particle_file_name)
return positions
# if __name__ =="__main__":
# pass
| gpl-3.0 |
hlin117/scikit-learn | sklearn/datasets/samples_generator.py | 9 | 56766 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Initialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms. Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_out, dtype=np.intp),
np.ones(n_samples_in, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std : float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box : pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components : int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary : array of shape [n_features, n_components]
The dictionary with normalized components (D).
code : array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim : integer, optional (default=1)
The size of the random matrix to generate.
alpha : float between 0 and 1, optional (default=0.95)
The probability that a coefficient is zero (see notes). Larger values
enforce more sparsity.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://seat.massey.ac.nz/personal/s.r.marsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
sergiopasra/numina | numina/array/display/pause_debugplot.py | 3 | 2450 | #
# Copyright 2015-2016 Universidad Complutense de Madrid
#
# This file is part of Numina
#
# SPDX-License-Identifier: GPL-3.0+
# License-Filename: LICENSE.txt
#
from __future__ import division
from __future__ import print_function
import sys
from numina.array.display.matplotlib_qt import plt
DEBUGPLOT_CODES = (0, -1, 1, -2, 2, -10, 10, -11, 11, -12, 12,
-21, 21, -22, 22)
def pause_debugplot(debugplot, optional_prompt=None, pltshow=False,
tight_layout=True):
"""Ask the user to press RETURN to continue after plotting.
Parameters
----------
debugplot : int
Determines whether intermediate computations and/or plots
are displayed:
00 : no debug, no plots
01 : no debug, plots without pauses
02 : no debug, plots with pauses
10 : debug, no plots
11 : debug, plots without pauses
12 : debug, plots with pauses
21 : debug, extra plots without pauses
22 : debug, extra plots with pause
NOTE: negative values are also valid and indicate that a call
to plt.close() is also performed
optional_prompt : string
Optional prompt.
pltshow : bool
If True, a call to plt.show() is also performed.
tight_layout : bool
If True, and pltshow=True, a call to plt.tight_layout() is
also performed.
"""
if debugplot not in DEBUGPLOT_CODES:
raise ValueError('Invalid debugplot value:', debugplot)
if debugplot < 0:
debugplot_ = -debugplot
pltclose = True
else:
debugplot_ = debugplot
pltclose = False
if pltshow:
if debugplot_ in [1, 2, 11, 12, 21, 22]:
if tight_layout:
plt.tight_layout()
if debugplot_ in [1, 11, 21]:
plt.show(block=False)
plt.pause(0.2)
elif debugplot_ in [2, 12, 22]:
print('Press "q" to continue...', end='')
sys.stdout.flush()
plt.show()
print('')
else:
if debugplot_ in [2, 12, 22]:
if optional_prompt is None:
print('Press <RETURN> to continue...', end='')
else:
print(optional_prompt, end='')
sys.stdout.flush()
cdummy = sys.stdin.readline().strip()
if debugplot_ in [1, 2, 11, 12, 21, 22] and pltclose:
plt.close()
| gpl-3.0 |
liberorbis/libernext | env/lib/python2.7/site-packages/IPython/kernel/inprocess/ipkernel.py | 9 | 6881 | """An in-process kernel"""
#-----------------------------------------------------------------------------
# Copyright (C) 2012 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from contextlib import contextmanager
import logging
import sys
# Local imports
from IPython.core.interactiveshell import InteractiveShellABC
from IPython.utils.jsonutil import json_clean
from IPython.utils.traitlets import Any, Enum, Instance, List, Type
from IPython.kernel.zmq.ipkernel import Kernel
from IPython.kernel.zmq.zmqshell import ZMQInteractiveShell
from .socket import DummySocket
#-----------------------------------------------------------------------------
# Main kernel class
#-----------------------------------------------------------------------------
class InProcessKernel(Kernel):
#-------------------------------------------------------------------------
# InProcessKernel interface
#-------------------------------------------------------------------------
# The frontends connected to this kernel.
frontends = List(
Instance('IPython.kernel.inprocess.client.InProcessKernelClient')
)
# The GUI environment that the kernel is running under. This need not be
# specified for the normal operation for the kernel, but is required for
# IPython's GUI support (including pylab). The default is 'inline' because
# it is safe under all GUI toolkits.
gui = Enum(('tk', 'gtk', 'wx', 'qt', 'qt4', 'inline'),
default_value='inline')
raw_input_str = Any()
stdout = Any()
stderr = Any()
#-------------------------------------------------------------------------
# Kernel interface
#-------------------------------------------------------------------------
shell_class = Type()
shell_streams = List()
control_stream = Any()
iopub_socket = Instance(DummySocket, ())
stdin_socket = Instance(DummySocket, ())
def __init__(self, **traits):
# When an InteractiveShell is instantiated by our base class, it binds
# the current values of sys.stdout and sys.stderr.
with self._redirected_io():
super(InProcessKernel, self).__init__(**traits)
self.iopub_socket.on_trait_change(self._io_dispatch, 'message_sent')
self.shell.kernel = self
def execute_request(self, stream, ident, parent):
""" Override for temporary IO redirection. """
with self._redirected_io():
super(InProcessKernel, self).execute_request(stream, ident, parent)
def start(self):
""" Override registration of dispatchers for streams. """
self.shell.exit_now = False
def _abort_queue(self, stream):
""" The in-process kernel doesn't abort requests. """
pass
def _raw_input(self, prompt, ident, parent):
# Flush output before making the request.
self.raw_input_str = None
sys.stderr.flush()
sys.stdout.flush()
# Send the input request.
content = json_clean(dict(prompt=prompt))
msg = self.session.msg(u'input_request', content, parent)
for frontend in self.frontends:
if frontend.session.session == parent['header']['session']:
frontend.stdin_channel.call_handlers(msg)
break
else:
logging.error('No frontend found for raw_input request')
return str()
# Await a response.
while self.raw_input_str is None:
frontend.stdin_channel.process_events()
return self.raw_input_str
#-------------------------------------------------------------------------
# Protected interface
#-------------------------------------------------------------------------
@contextmanager
def _redirected_io(self):
""" Temporarily redirect IO to the kernel.
"""
sys_stdout, sys_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = self.stdout, self.stderr
yield
sys.stdout, sys.stderr = sys_stdout, sys_stderr
#------ Trait change handlers --------------------------------------------
def _io_dispatch(self):
""" Called when a message is sent to the IO socket.
"""
ident, msg = self.session.recv(self.iopub_socket, copy=False)
for frontend in self.frontends:
frontend.iopub_channel.call_handlers(msg)
#------ Trait initializers -----------------------------------------------
def _log_default(self):
return logging.getLogger(__name__)
def _session_default(self):
from IPython.kernel.zmq.session import Session
return Session(parent=self)
def _shell_class_default(self):
return InProcessInteractiveShell
def _stdout_default(self):
from IPython.kernel.zmq.iostream import OutStream
return OutStream(self.session, self.iopub_socket, u'stdout', pipe=False)
def _stderr_default(self):
from IPython.kernel.zmq.iostream import OutStream
return OutStream(self.session, self.iopub_socket, u'stderr', pipe=False)
#-----------------------------------------------------------------------------
# Interactive shell subclass
#-----------------------------------------------------------------------------
class InProcessInteractiveShell(ZMQInteractiveShell):
kernel = Instance('IPython.kernel.inprocess.ipkernel.InProcessKernel')
#-------------------------------------------------------------------------
# InteractiveShell interface
#-------------------------------------------------------------------------
def enable_gui(self, gui=None):
"""Enable GUI integration for the kernel."""
from IPython.kernel.zmq.eventloops import enable_gui
if not gui:
gui = self.kernel.gui
return enable_gui(gui, kernel=self.kernel)
def enable_matplotlib(self, gui=None):
"""Enable matplotlib integration for the kernel."""
if not gui:
gui = self.kernel.gui
return super(InProcessInteractiveShell, self).enable_matplotlib(gui)
def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
"""Activate pylab support at runtime."""
if not gui:
gui = self.kernel.gui
return super(InProcessInteractiveShell, self).enable_pylab(gui, import_all,
welcome_message)
InteractiveShellABC.register(InProcessInteractiveShell)
| gpl-2.0 |
victorbergelin/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
waterponey/scikit-learn | examples/model_selection/grid_search_text_feature_extraction.py | 99 | 4163 |
"""
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
| bsd-3-clause |
MartinDelzant/scikit-learn | benchmarks/bench_mnist.py | 76 | 6136 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rat
------------------------------------------------------------
Nystroem-SVM 105.07s 0.91s 0.0227
ExtraTrees 48.20s 1.22s 0.0288
RandomForest 47.17s 1.21s 0.0304
SampledRBF-SVM 140.45s 0.84s 0.0486
CART 22.84s 0.16s 0.1214
dummy 0.01s 0.02s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
from sklearn.linear_model import LogisticRegression
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM':
make_pipeline(Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM':
make_pipeline(RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'LinearRegression-SAG': LogisticRegression(solver='sag', tol=1e-1, C=1e4)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
WendyLiuLab/elisascripts | setup.py | 2 | 1270 | from setuptools import setup
versionfile = 'elisa/version.py'
exec(compile(open(versionfile, 'rb').read(), versionfile, 'exec'))
setup(
name='liulab_elisa',
version=__version__,
url='https://github.com/WendyLiuLab/elisascripts',
license='MIT',
author='Tim D. Smith',
author_email='[email protected]',
description='Analyses single-cell ELISA image stacks.',
packages=['elisa', 'elisa.test'],
package_data={'elisa.test': ['*.tif']},
platforms='any',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
],
install_requires=[
# 'cairo', but it isn't on pypi
'ijroi',
'numpy',
'matplotlib',
'pandas',
'pillow',
'scipy',
'shapely',
'tifffile',
'typing',
],
entry_points={
'console_scripts': [
'elisa_id_singlets = elisa.id_singlets:main',
'elisa_normalize_bg = elisa.normalize_bg:main',
'elisa_register = elisa.register:main',
'elisa_stitch = elisa.stitch:main',
]
},
)
| bsd-3-clause |
tartopum/MPF | mpf/views/differencing.py | 2 | 1307 | """Contain the class to generate a view for differenced data."""
from os.path import join
import matplotlib.pyplot as plt
import pylatex
from mpf.views.abstracts import View
from mpf.models import mongo
__all__ = ('Differencing')
class Differencing(View):
"""Provide a view for differenced data."""
def __init__(self, path, title, _ids):
super().__init__(join(path, 'differenced'))
self.title = title
self._ids = _ids
self.cow = mongo.cow(_ids[0])
def generate(self):
"""Generate the view of differenced production."""
for _id in self._ids:
data = mongo.data(_id)
degree = mongo.settings(_id)['degree']
self.plot(data, degree)
plt.clf()
def plot(self, data, degree):
"""Add the plot of differenced data with the degree ``degree``.
:param data: TODO
:param degree: The degree with which the data have been differenced.
:type data: list
:type degree: int
"""
days = list(range(degree, degree + len(data)))
plt.plot(days, data)
plt.xlabel(self.DAY_LABEL)
plt.ylabel(self.PROD_LABEL)
with self.doc.create(pylatex.Section('Degree = {}'.format(degree))):
self.add_plot()
| mit |
btabibian/scikit-learn | examples/applications/wikipedia_principal_eigenvector.py | 50 | 7817 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
https://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
https://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
from sklearn.externals.six.moves.urllib.request import urlopen
from sklearn.externals.six import iteritems
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in iteritems(index_map))
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest components of the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <[email protected]>
Dan Schult <[email protected]>
Pieter Swart <[email protected]>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
h2oai/h2o-3 | h2o-py/tests/testdir_algos/kmeans/pyunit_benignKmeans.py | 2 | 1289 | from __future__ import print_function
from builtins import range
import sys
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
import numpy as np
from sklearn.cluster import KMeans
from sklearn.impute import SimpleImputer
from h2o.estimators.kmeans import H2OKMeansEstimator
def benign_kmeans():
print("Importing benign.csv data...")
benign_h2o = h2o.import_file(path=pyunit_utils.locate("smalldata/logreg/benign.csv"))
benign_sci = np.genfromtxt(pyunit_utils.locate("smalldata/logreg/benign.csv"), delimiter=",")
# Impute missing values with column mean
imp = SimpleImputer(missing_values=np.nan, strategy="mean")
benign_sci = imp.fit_transform(benign_sci)
for i in range(1,7):
print("H2O K-Means with " + str(i) + " clusters:")
benign_h2o_km = H2OKMeansEstimator(k=i)
benign_h2o_km.train(x=list(range(benign_h2o.ncol)), training_frame=benign_h2o)
print("H2O centers")
print(benign_h2o_km.centers())
benign_sci_km = KMeans(n_clusters=i, init='k-means++', n_init=1)
benign_sci_km.fit(benign_sci)
print("sckit centers")
print(benign_sci_km.cluster_centers_)
if __name__ == "__main__":
pyunit_utils.standalone_test(benign_kmeans)
else:
benign_kmeans()
| apache-2.0 |
aestrivex/mne-python | mne/viz/decoding.py | 13 | 8804 | """Functions to plot decoding results
"""
from __future__ import print_function
# Authors: Denis Engemann <[email protected]>
# Clement Moutard <[email protected]>
# Jean-Remi King <[email protected]>
#
# License: Simplified BSD
import numpy as np
import warnings
def plot_gat_matrix(gat, title=None, vmin=None, vmax=None, tlim=None,
ax=None, cmap='RdBu_r', show=True, colorbar=True,
xlabel=True, ylabel=True):
"""Plotting function of GeneralizationAcrossTime object
Predict each classifier. If multiple classifiers are passed, average
prediction across all classifier to result in a single prediction per
classifier.
Parameters
----------
gat : instance of mne.decoding.GeneralizationAcrossTime
The gat object.
title : str | None
Figure title. Defaults to None.
vmin : float | None
Min color value for scores. If None, sets to min(gat.scores_).
Defaults to None.
vmax : float | None
Max color value for scores. If None, sets to max(gat.scores_).
Defaults to None.
tlim : array-like, (4,) | None
The temporal boundaries. If None, expands to
[tmin_train, tmax_train, tmin_test, tmax_test]. Defaults to None.
ax : object | None
Plot pointer. If None, generate new figure. Defaults to None.
cmap : str | cmap object
The color map to be used. Defaults to 'RdBu_r'.
show : bool
If True, the figure will be shown. Defaults to True.
colorbar : bool
If True, the colorbar of the figure is displayed. Defaults to True.
xlabel : bool
If True, the xlabel is displayed. Defaults to True.
ylabel : bool
If True, the ylabel is displayed. Defaults to True.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
if not hasattr(gat, 'scores_'):
raise RuntimeError('Please score your data before trying to plot '
'scores')
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots(1, 1)
# Define time limits
if tlim is None:
tt_times = gat.train_times_['times']
tn_times = gat.test_times_['times']
tlim = [tn_times[0][0], tn_times[-1][-1], tt_times[0], tt_times[-1]]
# Plot scores
im = ax.imshow(gat.scores_, interpolation='nearest', origin='lower',
extent=tlim, vmin=vmin, vmax=vmax, cmap=cmap)
if xlabel is True:
ax.set_xlabel('Testing Time (s)')
if ylabel is True:
ax.set_ylabel('Training Time (s)')
if title is not None:
ax.set_title(title)
ax.axvline(0, color='k')
ax.axhline(0, color='k')
ax.set_xlim(tlim[:2])
ax.set_ylim(tlim[2:])
if colorbar is True:
plt.colorbar(im, ax=ax)
if show is True:
plt.show()
return fig if ax is None else ax.get_figure()
def plot_gat_times(gat, train_time='diagonal', title=None, xmin=None,
xmax=None, ymin=None, ymax=None, ax=None, show=True,
color=None, xlabel=True, ylabel=True, legend=True,
chance=True, label='Classif. score'):
"""Plotting function of GeneralizationAcrossTime object
Plot the scores of the classifier trained at 'train_time'.
Parameters
----------
gat : instance of mne.decoding.GeneralizationAcrossTime
The gat object.
train_time : 'diagonal' | float | list or array of float
Plot a 1d array of a portion of gat.scores_.
If set to 'diagonal', plots the gat.scores_ of classifiers
trained and tested at identical times
if set to float | list or array of float, plots scores of the
classifier(s) trained at (a) specific training time(s).
Default to 'diagonal'.
title : str | None
Figure title. Defaults to None.
xmin : float | None, optional
Min time value. Defaults to None.
xmax : float | None, optional
Max time value. Defaults to None.
ymin : float | None, optional
Min score value. If None, sets to min(scores). Defaults to None.
ymax : float | None, optional
Max score value. If None, sets to max(scores). Defaults to None.
ax : object | None
Plot pointer. If None, generate new figure. Defaults to None.
show : bool, optional
If True, the figure will be shown. Defaults to True.
color : str
Score line color. Defaults to 'steelblue'.
xlabel : bool
If True, the xlabel is displayed. Defaults to True.
ylabel : bool
If True, the ylabel is displayed. Defaults to True.
legend : bool
If True, a legend is displayed. Defaults to True.
chance : bool | float.
Plot chance level. If True, chance level is estimated from the type
of scorer. Defaults to None.
label : str
Score label used in the legend. Defaults to 'Classif. score'.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
if not hasattr(gat, 'scores_'):
raise RuntimeError('Please score your data before trying to plot '
'scores')
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots(1, 1)
# Find and plot chance level
if chance is not False:
if chance is True:
chance = _get_chance_level(gat.scorer_, gat.y_train_)
ax.axhline(float(chance), color='k', linestyle='--',
label="Chance level")
ax.axvline(0, color='k', label='')
if isinstance(train_time, (str, float)):
train_time = [train_time]
label = [label]
elif isinstance(train_time, (list, np.ndarray)):
label = train_time
else:
raise ValueError("train_time must be 'diagonal' | float | list or "
"array of float.")
if color is None or isinstance(color, str):
color = np.tile(color, len(train_time))
for _train_time, _color, _label in zip(train_time, color, label):
_plot_gat_time(gat, _train_time, ax, _color, _label)
if title is not None:
ax.set_title(title)
if ymin is not None and ymax is not None:
ax.set_ylim(ymin, ymax)
if xmin is not None and xmax is not None:
ax.set_xlim(xmin, xmax)
if xlabel is True:
ax.set_xlabel('Time (s)')
if ylabel is True:
ax.set_ylabel('Classif. score ({0})'.format(
'AUC' if 'roc' in repr(gat.scorer_) else r'%'))
if legend is True:
ax.legend(loc='best')
if show is True:
plt.show()
return fig if ax is None else ax.get_figure()
def _plot_gat_time(gat, train_time, ax, color, label):
"""Aux function of plot_gat_time
Plots a unique score 1d array"""
# Detect whether gat is a full matrix or just its diagonal
if np.all(np.unique([len(t) for t in gat.test_times_['times']]) == 1):
scores = gat.scores_
elif train_time == 'diagonal':
# Get scores from identical training and testing times even if GAT
# is not square.
scores = np.zeros(len(gat.scores_))
for train_idx, train_time in enumerate(gat.train_times_['times']):
for test_times in gat.test_times_['times']:
# find closest testing time from train_time
lag = test_times - train_time
test_idx = np.abs(lag).argmin()
# check that not more than 1 classifier away
if np.abs(lag[test_idx]) > gat.train_times_['step']:
score = np.nan
else:
score = gat.scores_[train_idx][test_idx]
scores[train_idx] = score
elif isinstance(train_time, float):
train_times = gat.train_times_['times']
idx = np.abs(train_times - train_time).argmin()
if train_times[idx] - train_time > gat.train_times_['step']:
raise ValueError("No classifier trained at %s " % train_time)
scores = gat.scores_[idx]
else:
raise ValueError("train_time must be 'diagonal' or a float.")
kwargs = dict()
if color is not None:
kwargs['color'] = color
ax.plot(gat.train_times_['times'], scores, label=str(label), **kwargs)
def _get_chance_level(scorer, y_train):
# XXX JRK This should probably be solved within sklearn?
if scorer.__name__ == 'accuracy_score':
chance = np.max([np.mean(y_train == c) for c in np.unique(y_train)])
elif scorer.__name__ == 'roc_auc_score':
chance = 0.5
else:
chance = np.nan
warnings.warn('Cannot find chance level from %s, specify chance'
' level' % scorer.func_name)
return chance
| bsd-3-clause |
saltstar/spark | python/pyspark/worker.py | 4 | 10236 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Worker that receives input from Piped RDD.
"""
from __future__ import print_function
import os
import sys
import time
import socket
import traceback
from pyspark.accumulators import _accumulatorRegistry
from pyspark.broadcast import Broadcast, _broadcastRegistry
from pyspark.taskcontext import TaskContext
from pyspark.files import SparkFiles
from pyspark.rdd import PythonEvalType
from pyspark.serializers import write_with_length, write_int, read_long, \
write_long, read_int, SpecialLengths, UTF8Deserializer, PickleSerializer, \
BatchedSerializer, ArrowStreamPandasSerializer
from pyspark.sql.types import to_arrow_type
from pyspark import shuffle
pickleSer = PickleSerializer()
utf8_deserializer = UTF8Deserializer()
def report_times(outfile, boot, init, finish):
write_int(SpecialLengths.TIMING_DATA, outfile)
write_long(int(1000 * boot), outfile)
write_long(int(1000 * init), outfile)
write_long(int(1000 * finish), outfile)
def add_path(path):
# worker can be used, so donot add path multiple times
if path not in sys.path:
# overwrite system packages
sys.path.insert(1, path)
def read_command(serializer, file):
command = serializer._read_with_length(file)
if isinstance(command, Broadcast):
command = serializer.loads(command.value)
return command
def chain(f, g):
"""chain two functions together """
return lambda *a: g(f(*a))
def wrap_udf(f, return_type):
if return_type.needConversion():
toInternal = return_type.toInternal
return lambda *a: toInternal(f(*a))
else:
return lambda *a: f(*a)
def wrap_pandas_scalar_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def verify_result_length(*a):
result = f(*a)
if not hasattr(result, "__len__"):
raise TypeError("Return type of the user-defined functon should be "
"Pandas.Series, but is {}".format(type(result)))
if len(result) != len(a[0]):
raise RuntimeError("Result vector from pandas_udf was not the required length: "
"expected %d, got %d" % (len(a[0]), len(result)))
return result
return lambda *a: (verify_result_length(*a), arrow_return_type)
def wrap_pandas_group_map_udf(f, return_type):
def wrapped(*series):
import pandas as pd
result = f(pd.concat(series, axis=1))
if not isinstance(result, pd.DataFrame):
raise TypeError("Return type of the user-defined function should be "
"pandas.DataFrame, but is {}".format(type(result)))
if not len(result.columns) == len(return_type):
raise RuntimeError(
"Number of columns of the returned pandas.DataFrame "
"doesn't match specified schema. "
"Expected: {} Actual: {}".format(len(return_type), len(result.columns)))
arrow_return_types = (to_arrow_type(field.dataType) for field in return_type)
return [(result[result.columns[i]], arrow_type)
for i, arrow_type in enumerate(arrow_return_types)]
return wrapped
def read_single_udf(pickleSer, infile, eval_type):
num_arg = read_int(infile)
arg_offsets = [read_int(infile) for i in range(num_arg)]
row_func = None
for i in range(read_int(infile)):
f, return_type = read_command(pickleSer, infile)
if row_func is None:
row_func = f
else:
row_func = chain(row_func, f)
# the last returnType will be the return type of UDF
if eval_type == PythonEvalType.SQL_PANDAS_SCALAR_UDF:
return arg_offsets, wrap_pandas_scalar_udf(row_func, return_type)
elif eval_type == PythonEvalType.SQL_PANDAS_GROUP_MAP_UDF:
return arg_offsets, wrap_pandas_group_map_udf(row_func, return_type)
else:
return arg_offsets, wrap_udf(row_func, return_type)
def read_udfs(pickleSer, infile, eval_type):
num_udfs = read_int(infile)
udfs = {}
call_udf = []
for i in range(num_udfs):
arg_offsets, udf = read_single_udf(pickleSer, infile, eval_type)
udfs['f%d' % i] = udf
args = ["a[%d]" % o for o in arg_offsets]
call_udf.append("f%d(%s)" % (i, ", ".join(args)))
# Create function like this:
# lambda a: (f0(a0), f1(a1, a2), f2(a3))
# In the special case of a single UDF this will return a single result rather
# than a tuple of results; this is the format that the JVM side expects.
mapper_str = "lambda a: (%s)" % (", ".join(call_udf))
mapper = eval(mapper_str, udfs)
func = lambda _, it: map(mapper, it)
if eval_type == PythonEvalType.SQL_PANDAS_SCALAR_UDF \
or eval_type == PythonEvalType.SQL_PANDAS_GROUP_MAP_UDF:
timezone = utf8_deserializer.loads(infile)
ser = ArrowStreamPandasSerializer(timezone)
else:
ser = BatchedSerializer(PickleSerializer(), 100)
# profiling is not supported for UDF
return func, None, ser, ser
def main(infile, outfile):
try:
boot_time = time.time()
split_index = read_int(infile)
if split_index == -1: # for unit tests
exit(-1)
version = utf8_deserializer.loads(infile)
if version != "%d.%d" % sys.version_info[:2]:
raise Exception(("Python in worker has different version %s than that in " +
"driver %s, PySpark cannot run with different minor versions." +
"Please check environment variables PYSPARK_PYTHON and " +
"PYSPARK_DRIVER_PYTHON are correctly set.") %
("%d.%d" % sys.version_info[:2], version))
# initialize global state
taskContext = TaskContext._getOrCreate()
taskContext._stageId = read_int(infile)
taskContext._partitionId = read_int(infile)
taskContext._attemptNumber = read_int(infile)
taskContext._taskAttemptId = read_long(infile)
shuffle.MemoryBytesSpilled = 0
shuffle.DiskBytesSpilled = 0
_accumulatorRegistry.clear()
# fetch name of workdir
spark_files_dir = utf8_deserializer.loads(infile)
SparkFiles._root_directory = spark_files_dir
SparkFiles._is_running_on_worker = True
# fetch names of includes (*.zip and *.egg files) and construct PYTHONPATH
add_path(spark_files_dir) # *.py files that were added will be copied here
num_python_includes = read_int(infile)
for _ in range(num_python_includes):
filename = utf8_deserializer.loads(infile)
add_path(os.path.join(spark_files_dir, filename))
if sys.version > '3':
import importlib
importlib.invalidate_caches()
# fetch names and values of broadcast variables
num_broadcast_variables = read_int(infile)
for _ in range(num_broadcast_variables):
bid = read_long(infile)
if bid >= 0:
path = utf8_deserializer.loads(infile)
_broadcastRegistry[bid] = Broadcast(path=path)
else:
bid = - bid - 1
_broadcastRegistry.pop(bid)
_accumulatorRegistry.clear()
eval_type = read_int(infile)
if eval_type == PythonEvalType.NON_UDF:
func, profiler, deserializer, serializer = read_command(pickleSer, infile)
else:
func, profiler, deserializer, serializer = read_udfs(pickleSer, infile, eval_type)
init_time = time.time()
def process():
iterator = deserializer.load_stream(infile)
serializer.dump_stream(func(split_index, iterator), outfile)
if profiler:
profiler.profile(process)
else:
process()
except Exception:
try:
write_int(SpecialLengths.PYTHON_EXCEPTION_THROWN, outfile)
write_with_length(traceback.format_exc().encode("utf-8"), outfile)
except IOError:
# JVM close the socket
pass
except Exception:
# Write the error to stderr if it happened while serializing
print("PySpark worker failed with exception:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
exit(-1)
finish_time = time.time()
report_times(outfile, boot_time, init_time, finish_time)
write_long(shuffle.MemoryBytesSpilled, outfile)
write_long(shuffle.DiskBytesSpilled, outfile)
# Mark the beginning of the accumulators section of the output
write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
write_int(len(_accumulatorRegistry), outfile)
for (aid, accum) in _accumulatorRegistry.items():
pickleSer._write_with_length((aid, accum._value), outfile)
# check end of stream
if read_int(infile) == SpecialLengths.END_OF_STREAM:
write_int(SpecialLengths.END_OF_STREAM, outfile)
else:
# write a different value to tell JVM to not reuse this worker
write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
exit(-1)
if __name__ == '__main__':
# Read a local port to connect to from stdin
java_port = int(sys.stdin.readline())
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", java_port))
sock_file = sock.makefile("rwb", 65536)
main(sock_file, sock_file)
| apache-2.0 |
bhzunami/Immo | immo/scikit/data_analysis.py | 1 | 18933 | import matplotlib
matplotlib.use('agg')
"""
Data Analysis
Load data from database or a csv File
Feature Selection: (http://machinelearningmastery.com/feature-selection-machine-learning-python/)
Feature selection is a important step to:
- reduce overfitting
- imporves accuracy
- reduces Training Time
"""
import os
import pdb
import json
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, load_only, Load
from models import Advertisement, Municipality, ObjectType
# Set precision to 3
np.set_printoptions(precision=3)
class DataAnalysis():
def __init__(self, file='./homegate.csv'):
self.synopsis = json.load(open('synopsis.json'))
if os.path.isfile(file):
print("Use file")
ads = pd.read_csv(file, index_col=0, engine='c')
else:
try:
engine = create_engine(os.environ.get('DATABASE_URL', None))
Session = sessionmaker(bind=engine)
self.session = Session()
ads = self.load_dataset_from_database()
ads.to_csv(file, header=True, encoding='utf-8')
except AttributeError as e:
raise Exception("If you want to load data from the database you have to export the DATABASE_URL environment")
self.ads = ads
def load_dataset_from_database(self):
""" load data from database
"""
statement = self.session.query(Advertisement, Municipality, ObjectType).join(Municipality, ObjectType).options(
Load(Advertisement).load_only(
"price_brutto",
"crawler",
"num_floors",
"living_area",
"floor",
"num_rooms",
"build_year",
"last_renovation_year",
"cubature",
"room_height",
"effective_area",
"longitude",
"latitude",
"noise_level",
"plot_area",
"tags"),
Load(Municipality).load_only(
"name",
"canton_id",
"district_id",
"mountain_region_id",
"language_region_id",
"job_market_region_id",
"agglomeration_id",
"metropole_region_id",
"tourism_region_id",
"is_town",
"noise_level",
"urban_character_id",
"steuerfuss_gde",
"steuerfuss_kanton",
"degurba_id",
"planning_region_id",
"ase",
"greater_region_id",
"ms_region_id",
"municipal_size_class_id",
"agglomeration_size_class_id",
"municipal_type22_id",
"municipal_type9_id"),
Load(ObjectType).load_only("name", "grouping")
).with_labels().statement
data = pd.read_sql_query(statement, self.session.bind)
data.drop(['advertisements_id', 'municipalities_id', 'object_types_id'], axis=1, inplace=True)
# Rename
return data.rename(columns={'advertisements_price_brutto': 'price',
'advertisements_crawler': 'crawler',
'advertisements_living_area': 'living_area',
'advertisements_floor': 'floor',
'advertisements_num_rooms': 'num_rooms',
'advertisements_num_floors': 'num_floors',
'advertisements_build_year': 'build_year',
'advertisements_last_renovation_year': 'last_renovation_year',
'advertisements_cubature': 'cubature',
'advertisements_room_height': 'room_height',
'advertisements_effective_area': 'effective_area',
'advertisements_plot_area': 'plot_area',
'advertisements_longitude': 'longitude',
'advertisements_latitude': 'latitude',
'advertisements_noise_level': 'noise_level',
'advertisements_tags': 'tags',
'municipalities_name': 'municipality',
'municipalities_canton_id': 'canton_id',
'municipalities_district_id': 'district_id',
'municipalities_planning_region_id': 'planning_region_id',
'municipalities_mountain_region_id': 'mountain_region_id',
'municipalities_ase': 'ase',
'municipalities_greater_region_id': 'greater_region_id',
'municipalities_language_region_id': 'language_region_id',
'municipalities_ms_region_id': 'ms_region_id',
'municipalities_job_market_region_id': 'job_market_region_id',
'municipalities_agglomeration_id': 'agglomeration_id',
'municipalities_metropole_region_id': 'metropole_region_id',
'municipalities_tourism_region_id': 'tourism_region_id',
'municipalities_municipal_size_class_id': 'municipal_size_class_id',
'municipalities_urban_character_id': 'urban_character_id',
'municipalities_agglomeration_size_class_id': 'agglomeration_size_class_id',
'municipalities_is_town': 'is_town',
'municipalities_degurba_id': 'degurba_id',
'municipalities_municipal_type22_id': 'municipal_type22_id',
'municipalities_municipal_type9_id': 'municipal_type9_id',
'municipalities_noise_level': 'm_noise_level',
'municipalities_steuerfuss_gde': 'steuerfuss_gde',
'municipalities_steuerfuss_kanton': 'steuerfuss_kanton',
'object_types_name': 'otype',
'object_types_grouping': 'ogroup'})
# Cleanup the datakeys
def simple_stats(self):
print("We have total {} values".format(len(self.ads)))
print("{:25} | {:6} | {:6}".format("Feature",
"NaN-Values",
"usable Values"))
print("-"*70)
for i, key in enumerate(self.ads.keys()):
if key == 'id' or key == 'Unnamed': # Keys from pandas we do not want
continue
nan_values = self.ads[key].isnull().sum()
useful_values = len(self.ads) - nan_values
print("{:25} {:6} ({:02.2f}%) | {:6} ({:02.0f}%)".format(key,
nan_values,
(nan_values/len(self.ads))*100,
useful_values,
(useful_values/len(self.ads))*100))
# Missing data
# Calculate percent of missing data
missing_data = (self.ads.isnull().sum() / len(self.ads)) * 100
# Remove itmes we have 100% and sort
missing_data = missing_data.drop(missing_data[missing_data == 0].index).sort_values(ascending=False)
b = sns.barplot(x=missing_data.index, y=missing_data)
plt.xlabel('Features')
plt.ylabel('% von fehlenden Werten')
plt.title('Fehlende Features in %')
plt.xticks(rotation='90')
plt.tight_layout()
for text in b.get_xticklabels():
text.set_text(text.get_text().replace("_", " ").title())
plt.savefig("images/analysis/missing_values.png", dpi=250)
plt.clf()
plt.close()
def clean_dataset(self):
print("="*70)
print("Dataset preparation:")
print("-"*70)
# Remove elements with no price
ads = self.ads.dropna(subset=['price'])
removed_ads_with_missing_price = len(self.ads) - len(ads)
print("Removed {} ads because we do not have a price.".format(removed_ads_with_missing_price))
# Cleanup some outliers
ads = ads.drop(ads[ads['num_floors'] > 20].index)
ads = ads.drop(ads[ads['price'] > 20000000].index)
ads = ads.drop(ads[ads['price'] < 10].index)
ads = ads.drop(ads[ads['living_area'] > 5000].index)
ads = ads.drop(ads[ads['num_rooms'] > 20].index)
ads = ads.drop(ads[ads['build_year'] < 1200].index)
ads = ads.drop(ads[ads['build_year'] > 2050].index)
ads = ads.drop(ads[ads['last_renovation_year'] < 1200].index)
ads = ads.drop(ads[ads['last_renovation_year'] > 2050].index)
ads = ads.drop(ads[ads['cubature'] > 20000].index)
ads = ads.drop(ads[ads['floor'] > 30].index)
# Remove to lower values
# ads = ads.drop(ads[ads['living_area'] < 20].index)
# ads = ads.drop(ads[ads['cubature'] < 20].index)
# ads = ads.drop(ads[ads['num_rooms'] < 1].index)
print("Removed {} outliers. Dataset size: {}".format(len(self.ads) - len(ads) - removed_ads_with_missing_price, len(ads)))
#print("Describe: \n{}".format(ads.describe()))
print("Nummerical features:")
print(ads.num_rooms.describe())
print(ads.living_area.describe())
print(ads.build_year.describe())
print(ads.num_floors.describe())
print(ads.cubature.describe())
print(ads.floor.describe())
print(ads.noise_level.describe())
print(ads.last_renovation_year.describe())
self.ads = ads
def plot_numerical_values(self):
ax = plt.axes()
ax.set_title("Verteilung des Kaufpreises")
sns.distplot(self.ads['price'], kde=True, bins=50, ax=ax)
ax.set_xlabel("Kaufpreis CHF")
plt.savefig("images/analysis/Verteilung_des_kauf_preises.png", dpi=250)
print("Distplot - OK")
plt.clf()
plt.close()
ax = plt.axes()
ax.set_title("Verteilung des Kaufpreises")
ax.hist(self.ads['price'], bins=100)
ax.set_xlabel("Kaufpreis CHF")
ax.set_ylabel("Anzahl Inserate")
plt.savefig("images/analysis/bar_des_kauf_preises.png", dpi=250)
print("bar - OK")
plt.clf()
plt.close()
ax = plt.axes()
ax.set_title("Verteilung des Kaufpreises")
ax.hist(self.ads.drop(self.ads[self.ads['price'] > 3500000].index).price, bins=100)
ax.set_xlabel("Kaufpreis CHF")
ax.set_ylabel("Anzahl Inserate")
plt.savefig("images/analysis/bar_des_kauf_preises_cut.png", dpi=250)
print("bar - OK")
plt.clf()
plt.close()
pdb.set_trace()
ax = plt.axes()
ax.set_title("Verteilung des Kaufpreises mit log")
sns.distplot(np.log1p(self.ads['price']), kde=True, bins=100, hist_kws={'alpha': 0.6}, ax=ax)
ax.set_xlabel("Kaufpreis CHF (log)")
plt.savefig("images/analysis/Verteilung_des_kauf_preises_log.png", dpi=250)
print("Distplot - OK")
plt.clf()
plt.close()
for f, name in [('num_rooms', 'Anzahl Zimmer'),
('living_area', 'Fläche [m^2]'),
('noise_level', 'Lärmbelastung')]:
ax = plt.axes()
ax.set_title("Verteilung der {}".format(name))
sns.distplot(self.ads[f].dropna(), kde=False, bins=100, hist_kws={'alpha': 0.6}, ax=ax)
ax.set_xlabel("{}".format(name))
plt.savefig("images/analysis/Verteilung_{}.png".format(f), dpi=250)
print("Distplot - OK")
plt.clf()
plt.close()
# Heatmap of features:
corr = self.ads.select_dtypes(include = ['float64', 'int64']).corr()
plt.figure(figsize=(12, 12))
hm = sns.heatmap(corr, vmin=-1, vmax=1, square=True)
for text in hm.get_xticklabels():
text.set_text(text.get_text().replace("_", " ").replace("id", "").title())
hm.set_xticklabels(hm.get_xticklabels(), rotation=90)
hm.set_yticklabels(reversed(hm.get_xticklabels()), rotation=0)
hm.set_title("Heatmap aller Features", fontsize=20)
plt.savefig("images/analysis/Heatmap_all.png", dpi=250)
print("Heatmap all - OK")
plt.clf()
plt.close()
corr = self.ads.select_dtypes(include = ['float64']).corr()
plt.figure(figsize=(12, 12))
hm = sns.heatmap(corr, vmin=-1, vmax=1, square=True)
for text in hm.get_xticklabels():
text.set_text(text.get_text().replace("_", " ").replace("id", "").title())
hm.set_xticklabels(hm.get_xticklabels(), rotation=90)
hm.set_yticklabels(reversed(hm.get_xticklabels()), rotation=0)
hm.set_title("Heatmap numerischer Features", fontsize=20)
plt.savefig("images/analysis/Heatmap_numerical.png", dpi=250)
print("Heatmap Numerical - OK")
plt.clf()
plt.close()
cor_dict = corr['price'].to_dict()
del cor_dict['price']
print("List the numerical features decendingly by their correlation with Sale Price:\n")
for ele in sorted(cor_dict.items(), key = lambda x: -abs(x[1])):
print("{0}: \t{1}".format(*ele))
# Now all features compared to price
plt.figure(1)
f, ax = plt.subplots(4, 2, figsize=(10, 9))
price = self.ads.price.values
ax[0, 0].scatter(self.ads.num_rooms.values, price)
ax[0, 0].set_title('Anzahl Zimmer')
ax[0, 1].scatter(self.ads.living_area.values, price)
ax[0, 1].set_title('Wohnfläche [m²]')
ax[1, 0].scatter(self.ads.build_year.values, price)
ax[1, 0].set_title('Baujahr')
ax[1, 0].set_ylabel('Preis')
ax[1, 1].scatter(self.ads.num_floors.values, price)
ax[1, 1].set_title('Anzahl Stockwerke')
ax[2, 0].scatter(self.ads.cubature.values, price)
ax[2, 0].set_title('Cubature')
ax[2, 1].scatter(self.ads.floor.values, price)
ax[2, 1].set_title('Stockwerk')
ax[3, 0].scatter(self.ads.noise_level.values, price)
ax[3, 0].set_title('Lärmbelastung')
ax[3, 1].scatter(self.ads.last_renovation_year.values, price)
ax[3, 1].set_title('Letzte Renovaton')
plt.tight_layout()
plt.savefig("images/analysis/Vergleich_zum_preis.png", dpi=250)
print("Vergleich - OK")
plt.clf()
plt.close()
fig = plt.figure()
from scipy import stats
res = stats.probplot(self.ads['price'], plot=plt)
plt.savefig("images/analysis/skewness.png", dpi=250)
print("skewness - OK")
plt.clf()
plt.close()
fig = plt.figure()
res = stats.probplot(np.log1p(self.ads['price']), plot=plt)
plt.savefig("images/analysis/log_skewness.png", dpi=250)
print("Log skewness - OK")
plt.clf()
plt.close()
def plot_categorical_features(self):
ax = plt.axes()
b = sns.boxplot(x='canton_id', y='price', data=self.ads, ax=ax)
b.set_xticklabels(self.synopsis['CANTON_ID'], rotation=90)
ax.set_xlabel("")
ax.set_ylabel("Kaufpreis CHF")
ax.set_title("Kaufpreise auf Kantone")
plt.tight_layout()
plt.savefig("images/analysis/boxPlot_cantons.png", dpi=250)
print("boxplot cantons - OK")
plt.clf()
plt.close()
ax = plt.axes()
b = sns.barplot(x='canton_id', y='price', data=self.ads, ax=ax)
b.set_xticklabels(self.synopsis['CANTON_ID'], rotation=90)
ax.set_xlabel("")
ax.set_ylabel("Kaufpreis CHF (Durchschnitt)")
ax.set_title("Kaufpreise auf Kantone")
plt.tight_layout()
plt.savefig("images/analysis/barplot_canton.png", dpi=250)
print("barplot canton - OK")
plt.clf()
plt.close()
ax = plt.axes()
b = sns.barplot(x='otype', y='price', data=self.ads, ax=ax)
b.set_xticklabels(b.get_xticklabels(), rotation=90)
plt.tight_layout()
ax.set_xlabel("")
ax.set_ylabel("Kaufpreis CHF (Durchschnitt)")
plt.savefig("images/analysis/barplot_gruppen.png", dpi=250)
print("barplot Gruppen - OK")
plt.clf()
plt.close()
for key in ['TOURISM_REGION_ID', 'METROPOLE_REGION_ID', 'JOB_MARKET_REGION_ID',
'MOUNTAIN_REGION_ID', 'LANGUAGE_REGION_ID', 'MUNICIPAL_SIZE_CLASS_ID',
'GREATER_REGION_ID', 'AGGLOMERATION_SIZE_CLASS_ID',
'IS_TOWN', 'DEGURBA_ID']:
ax = plt.axes()
b = sns.barplot(x=key.lower(), y='price', data=self.ads, ax=ax)
b.set_xticklabels(self.synopsis[key], rotation=90)
ax.set_xlabel("")
ax.set_ylabel("Kaufpreis CHF (Durchschnitt)")
ax.set_title(key.replace('_', ' ').replace('ID', '').title())
plt.tight_layout()
plt.savefig("images/analysis/barplot_{}.png".format(key.lower()), dpi=250)
print("barplot {} - OK".format(key.lower()))
plt.clf()
plt.close()
# Boxplot only have data where price is lower 5 millions (Graphical better)
ax = plt.axes()
b = sns.boxplot(x=key.lower(), y='price',
data=self.ads[self.ads.price < 5000000],
ax=ax)
b.set_xticklabels(self.synopsis[key], rotation=90)
ax.set_xlabel("")
ax.set_ylabel("Kaufpreis CHF (Durchschnitt)")
ax.set_title(key.replace('_', ' ').replace('ID', '').title())
plt.tight_layout()
plt.savefig("images/analysis/boxplot_{}.png".format(key.lower()), dpi=250)
print("boxplot {} - OK".format(key.lower()))
plt.clf()
plt.close()
def main():
data_analysis = DataAnalysis(file='advertisements.csv')
data_analysis.simple_stats()
data_analysis.clean_dataset()
data_analysis.plot_numerical_values()
data_analysis.plot_categorical_features()
if __name__ == "__main__":
main()
| mit |
VasLem/KinectPainting | action_recognition_alg.py | 1 | 86242 |
import sys
import os
import warnings
import logging
import glob
from math import pi
import numpy as np
from numpy.linalg import pinv
import cv2
import class_objects as co
import sparse_coding as sc
import hand_segmentation_alg as hsa
import hist4d as h4d
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.widgets import Button
import cPickle as pickle
import time
# Kinect Intrinsics
PRIM_X = 256.92
PRIM_Y = 204.67
FLNT = 365.98
# Senz3d Intrinsics
'''
PRIM_X = 317.37514566554989
PRIM_Y = 246.61273826510859
FLNT = 595.333159044648 / (30.48 / 1000.0)
'''
def initialize_logger(logger):
if not getattr(logger, 'handler_set', None):
CH = logging.StreamHandler()
CH.setFormatter(logging.Formatter(
'%(name)s-%(funcName)s()(%(lineno)s)-%(levelname)s:%(message)s'))
logger.addHandler(CH)
logger.handler_set = True
logger.propagate = False
def checktypes(objects, classes):
'''
Checks type of input objects and prints caller's doc string
and exits if there is a problem
'''
frame = sys._getframe(1)
try:
if not all([isinstance(obj, instance) for
obj, instance in zip(objects, classes)]):
raise TypeError(getattr(frame.f_locals['self'].__class__,
frame.f_code.co_name).__doc__)
finally:
del frame
def timeit(func):
'''
Decorator to time extraction
'''
def wrapper(self,*arg, **kw):
t1 = time.time()
res = func(self,*arg, **kw)
t2 = time.time()
self.time.append(t2-t1)
del self.time[:-5000]
return res
return wrapper
def find_nonzero(arr):
'''
Finds nonzero elements positions
'''
return np.fliplr(cv2.findNonZero(arr).squeeze())
def prepare_dexter_im(img):
'''
Compute masks for images
'''
binmask = img < 6000
contours = cv2.findContours(
(binmask).astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[1]
contours_area = [cv2.contourArea(contour) for contour in contours]
hand_contour = contours[np.argmax(contours_area)].squeeze()
hand_patch = img[np.min(hand_contour[:, 1]):np.max(hand_contour[:, 1]),
np.min(hand_contour[:, 0]):np.max(hand_contour[:, 0])]
hand_patch_max = np.max(hand_patch)
hand_patch[hand_patch == hand_patch_max] = 0
img[img == hand_patch_max] = 0
med_filt = np.median(hand_patch[hand_patch != 0])
thres = np.min(img) + 0.1 * (np.max(img) - np.min(img))
binmask[np.abs(img - med_filt) > thres] = False
hand_patch[np.abs(hand_patch - med_filt) > thres] = 0
hand_patch_pos = np.array(
[np.min(hand_contour[:, 1]), np.min(hand_contour[:, 0])])
return img * binmask,\
hand_patch, hand_patch_pos
def prepare_im(img, contour=None, square=False):
'''
<square> for display reasons, it returns a square patch of the hand, with
the hand centered inside.
'''
if img is None:
return None, None, None
if contour is None:
contours = cv2.findContours(
(img).astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[1]
contours_area = [cv2.contourArea(contour) for contour in contours]
try:
contour = contours[np.argmax(contours_area)].squeeze()
except ValueError:
return None, None, None
hand_contour = contour.squeeze()
if hand_contour.size == 2:
return None, None, None
if square:
edge_size = max(np.max(hand_contour[:, 1]) - np.min(hand_contour[:, 1]),
np.max(hand_contour[:, 0]) - np.min(hand_contour[:, 0]))
center = np.mean(hand_contour, axis=0).astype(int)
hand_patch = img[center[1] - edge_size / 2:
center[1] + edge_size / 2,
center[0] - edge_size / 2:
center[0] + edge_size / 2
]
else:
hand_patch = img[np.min(hand_contour[:, 1]):np.max(hand_contour[:, 1]),
np.min(hand_contour[:, 0]):np.max(hand_contour[:, 0])]
hand_patch_pos = np.array(
[np.min(hand_contour[:, 1]), np.min(hand_contour[:, 0])])
return hand_patch, hand_patch_pos, contour
class SpaceHistogram(object):
'''
Create Histograms for 3DHOG and GHOF
'''
def __init__(self):
self.bin_size = None
self.range = None
def hist_data(self, sample):
'''
Compute normalized N-D histograms
'''
hist, edges = np.histogramdd(sample, self.bin_size, range=self.range)
return hist, edges
class BufferOperations(object):
def __init__(self, parameters, reset_time=True):
self.logger = logging.getLogger('BufferOperations')
initialize_logger(self.logger)
self.parameters = parameters
self.buffer = []
self.depth = []
self.testing = parameters['testing']
self.action_type = parameters['action_type']
self.samples_indices = []
self.buffer_start_inds = []
self.buffer_end_inds = []
if not self.action_type == 'Passive':
self.ptpca = parameters['PTPCA']
self.ptpca_components = parameters['PTPCA_params'][
'PTPCA_components']
self.bbuffer = [[] for i in range(len(parameters['descriptors']))]
if not self.action_type == 'Passive':
self.buffer_size = parameters['dynamic_params']['buffer_size']
try:
self.buffer_confidence_tol = parameters['dynamic_params'][
'buffer_confidence_tol']
self.ptpca = parameters['PTPCA']
self.ptpca_components = parameters['PTPCA_params'][
'PTPCA_components']
except (KeyError, IndexError, TypeError):
self.buffer_confidence_tol = None
self.pca_features = []
else:
self.buffer_size = 1
self.sync = []
self.frames_inds = []
self.samples_inds = []
self.buffer_components = []
self.depth_components = []
self.real_samples_inds = []
if reset_time:
self.time = []
def reset(self, reset_time=False):
self.__init__(self.parameters, reset_time=reset_time)
def check_buffer_integrity(self, buffer):
check_sam = True
check_cont = True
check_len = len(buffer) == self.buffer_size
if check_len:
if not self.action_type == 'Passive':
check_cont = np.all(np.abs(np.diff(self.frames_inds[-self.buffer_size:])) <=
self.buffer_size * self.buffer_confidence_tol)
# check if buffer frames belong to the same sample, in case of
# training
check_sam = self.testing or len(np.unique(
self.samples_inds[-self.buffer_size:])) == 1
else:
check_cont = True
check_sam = True
check_len = True
return check_len and check_cont and check_sam
@timeit
def perform_post_time_pca(self, inp):
reshaped = False
if self.buffer_size == 1:
return
if np.shape(inp)[0] == 1 or len(np.shape(inp))==1:
reshaped = True
inp = np.reshape(inp, (self.buffer_size, -1))
mean, inp = cv2.PCACompute(
np.array(inp),
np.array([]),
maxComponents=self.ptpca_components)
inp = (np.array(inp) + mean)
if reshaped:
return inp.ravel()
return inp
def update_buffer_info(self, sync, samples_index=0,
samples=None, depth=None):
self.frames_inds.append(sync)
self.samples_inds.append(samples_index)
if samples is not None:
self.buffer_components.append(samples)
del self.buffer_components[:-self.buffer_size]
if depth is not None:
self.depth_components.append(depth)
del self.depth_components[:-self.buffer_size]
def add_buffer(self, buffer=None, depth=None, sample_count=None,
already_checked=False):
'''
<buffer> should have always the same size.
<self.bbuffer> is a list of buffers. It can have a size limit, after which it
acts as a buffer (useful for shifting window
operations (filtering etc.))
'''
# check buffer contiguousness
if buffer is None:
buffer = self.buffer_components
if depth is None:
fmask = np.isfinite(self.depth_components)
if np.sum(fmask):
depth = np.mean(np.array(self.depth_components)[fmask])
if not already_checked:
check = self.check_buffer_integrity(buffer[-self.buffer_size:])
else:
check = True
if not self.parameters['testing_params']['online']:
self.real_samples_inds += [-1] * (self.frames_inds[-1] + 1 -
len(self.buffer))
self.depth += [None] * (self.frames_inds[-1] + 1
- len(self.buffer))
self.buffer += [None] * (self.frames_inds[-1] + 1
- len(self.buffer))
if check:
self.buffer_start_inds.append(self.frames_inds[-self.buffer_size])
self.buffer_end_inds.append(self.frames_inds[-1])
if not self.parameters['testing_params']['online']:
self.buffer[self.frames_inds[-1]] = np.array(
buffer)
self.depth[self.frames_inds[-1]] = depth
else:
self.buffer = np.array(buffer)
self.depth = depth
if not self.parameters['testing_params']['online']:
self.real_samples_inds[self.frames_inds[-1]] = (np.unique(self.samples_inds[
-self.buffer_size:])[0])
else:
if self.parameters['testing_params']['online']:
self.buffer = None
self.depth = None
def extract_buffer_list(self):
'''
Returns a 2d numpy array, which has as first dimension the number of
saved features sets inside <self.bbuffer>,
as second dimension a flattened buffer. In case it is online, the first
dimension is 1. In case there are None samples inside, those are turned
to None arrays.
'''
if self.parameters['testing_params']['online']:
if self.bbuffer is None:
return None
else:
buffer_len = 0
for _buffer in self.buffer:
if _buffer is not None:
buffer_len = np.size(_buffer)
break
if not buffer_len:
self.logger.debug('No valid buffer')
return None
npbuffer = np.zeros((len(self.buffer),buffer_len))
for buffer_count in range(len(self.buffer)):
if self.buffer[buffer_count] is None:
self.buffer[buffer_count] = np.zeros(buffer_len)
self.buffer[buffer_count][:] = np.nan
npbuffer[buffer_count, ...] =\
np.array(self.buffer[buffer_count]).T.ravel()
return npbuffer, self.real_samples_inds, self.depth
class Action(object):
'''
Class to hold an action
'''
def __init__(self, parameters, name, coders=None):
self.name = name
self.parameters = parameters
self.features = []
self.sync = []
self.frames_inds = []
self.samples_inds = []
self.length = 0
self.start_inds = []
self.end_inds = []
self.real_data = []
class Actions(object):
'''
Class to hold multiple actions
'''
def __init__(self, parameters, coders=None, feat_filename=None):
self.logger = logging.getLogger(self.__class__.__name__)
initialize_logger(self.logger)
self.slflogger = logging.getLogger('save_load_features')
FH = logging.FileHandler('save_load_features.log', mode='w')
FH.setFormatter(logging.Formatter(
'%(asctime)s (%(lineno)s): %(message)s',
"%Y-%m-%d %H:%M:%S"))
self.slflogger.addHandler(FH)
self.slflogger.setLevel(logging.INFO)
self.parameters = parameters
self.sparsecoded = parameters['sparsecoded']
self.available_descriptors = {'3DHOF': Descriptor3DHOF,
'ZHOF': DescriptorZHOF,
'GHOG': DescriptorGHOG,
'3DXYPCA': Descriptor3DXYPCA}
self.actions = []
self.names = []
self.coders = coders
if coders is None:
self.coders = [None] * len(self.parameters['descriptors'])
self.save_path = (os.getcwd() +
os.sep + 'saved_actions.pkl')
self.features_extract = None
self.preproc_time = []
self.features_db = None
self.feat_filename = feat_filename
self.candid_d_actions = None
self.valid_feats = None
self.all_data = [None] * len(self.parameters['descriptors'])
self.name = None
self.frames_preproc = None
self.descriptors = {feature:None for feature in
self.parameters['descriptors']}
self.descriptors_id = [None] * len(self.parameters['descriptors'])
self.coders_info = [None] * len(self.parameters['descriptors'])
self.buffer_class= ([BufferOperations(self.parameters)] *
len(self.parameters['descriptors']))
def save_action_features_to_mem(self, data, filename=None,
action_name=None):
'''
features_db has tree structure, with the following order:
features type->list of instances dicts ->[params which are used to
identify the features, data of each instance->actions]
This order allows to search only once for each descriptor and get all
actions corresponding to a matching instance, as it is assumed that
descriptors are fewer than actions.
<data> is a list of length same as the descriptors number
'''
if filename is None:
if self.feat_filename is None:
return
else:
filename = self.feat_filename
if action_name is None:
action_name = self.name
for dcount, descriptor in enumerate(
self.parameters['descriptors']):
if self.candid_d_actions[dcount] is None:
self.candid_d_actions[dcount] = {}
self.candid_d_actions[dcount][action_name] = data[dcount]
co.file_oper.save_labeled_data([descriptor,
str(co.dict_oper.
create_sorted_dict_view(
self.parameters[
'features_params'][
descriptor]))],
self.candid_d_actions[dcount],
filename, fold_lev=1)
def load_action_features_from_mem(self, filename=None):
'''
features_db has tree structure, with the following order:
features type->list of instances dicts ->[params which are used to
identify the features, data of each instance->actions]
This order allows to search only once for each descriptor and get all
actions corresponding to a matching instance, as it is assumed that
descriptors are fewer than actions
'''
features_to_extract = self.parameters['descriptors'][:]
data = [None] * len(features_to_extract)
if self.candid_d_actions is None:
self.candid_d_actions = []
if filename is None:
if self.feat_filename is None:
return features_to_extract, data
else:
filename = self.feat_filename
for descriptor in self.parameters['descriptors']:
self.candid_d_actions.append(
co.file_oper.load_labeled_data([descriptor,
str(co.dict_oper.create_sorted_dict_view(
self.parameters[
'features_params'][
descriptor]))],
filename, fold_lev=1))
'''
Result is <candid_d_actions>, a list which holds matching
instances of actions for each descriptor, or None if not found.
'''
for dcount, instance in enumerate(self.candid_d_actions):
self.slflogger.info('Descriptor: ' + self.parameters['descriptors'][
dcount])
if instance is not None:
self.slflogger.info('Finding action \'' + self.name +
'\' inside matching instance')
if self.name in instance and np.array(
instance[self.name][0]).size > 0:
self.slflogger.info('Action Found')
data[dcount] = instance[self.name]
features_to_extract.remove(self.parameters['descriptors']
[dcount])
else:
self.slflogger.info('Action not Found')
else:
self.slflogger.info('No matching instance exists')
return features_to_extract, data
def train_sparse_dictionary(self):
'''
Train missing sparse dictionaries. add_action should have been executed
first
'''
for count, (data, info) in enumerate(
zip(self.all_data, self.coders_info)):
if not self.coders[count]:
if data is not None:
coder = sc.SparseCoding(
sparse_dim_rat=self.parameters['features_params'][
self.parameters['descriptors'][count]][
'sparse_params']['_dim_rat'],
name=self.parameters['descriptors'][count])
finite_samples = np.prod(np.isfinite(data),
axis=1).astype(bool)
coder.train_sparse_dictionary(data[finite_samples,:])
co.file_oper.save_labeled_data(info, coder)
else:
raise Exception('No data available, run add_action first')
self.coders[count] = coder
co.file_oper.save_labeled_data(self.coders_info[count], self.coders[count])
def load_sparse_coder(self, count):
self.coders_info[count] = (['Sparse Coders']+
[self.parameters['sparsecoded']]+
[str(self.parameters['descriptors'][
count])]+
[str(co.dict_oper.create_sorted_dict_view(
self.parameters['coders_params'][
str(self.parameters['descriptors'][count])]))])
if self.coders[count] is None:
self.coders[count] = co.file_oper.load_labeled_data(
self.coders_info[count])
return self.coders_info[count]
def retrieve_descriptor_possible_ids(self, count, assume_existence=False):
descriptor = self.parameters['descriptors'][count]
file_ids = [co.dict_oper.create_sorted_dict_view(
{'Descriptor':descriptor}),
co.dict_oper.create_sorted_dict_view(
{'ActionType':self.parameters['action_type']}),
co.dict_oper.create_sorted_dict_view(
{'DescriptorParams':co.dict_oper.create_sorted_dict_view(
self.parameters['features_params'][descriptor]['params'])})]
ids = ['Features']
if self.sparsecoded:
self.load_sparse_coder(count)
if (self.parameters['sparsecoded'] == 'Features'
and (self.coders[count] is not None or assume_existence)):
file_ids.append(co.dict_oper.create_sorted_dict_view(
{'SparseFeaturesParams':
co.dict_oper.create_sorted_dict_view(
self.parameters[
'features_params'][descriptor]['sparse_params'])}))
ids.append('Sparse Features')
file_ids.append(co.dict_oper.create_sorted_dict_view(
{'BufferParams':
co.dict_oper.create_sorted_dict_view(
self.parameters['dynamic_params'])}))
ids.append('Buffered Features')
if self.parameters['action_type']!='Passive':
if (self.parameters['sparsecoded'] == 'Buffer'
and (self.coders[count] is not None or assume_existence)):
file_ids.append(co.dict_oper.create_sorted_dict_view(
{'SparseBufferParams':
co.dict_oper.create_sorted_dict_view(
self.parameters[
'features_params'][descriptor]['sparse_params'])}))
ids.append('Sparse Buffers')
if not (self.parameters['sparsecoded'] == 'Buffer'
and self.coders[count] is None) or assume_existence:
if self.parameters['PTPCA']:
file_ids.append(co.dict_oper.create_sorted_dict_view(
{'PTPCAParams':
co.dict_oper.create_sorted_dict_view(
self.parameters[
'PTPCA_params'])}))
ids.append('PTPCA')
return ids, file_ids
def add_action(self, data=None,
mv_obj_fold_name=None,
hnd_mk_fold_name=None,
masks_needed=True,
use_dexter=False,
visualize_=False,
isderotated=False,
action_type='Dynamic',
max_act_samples=None,
fss_max_iter=None,
derot_centers=None,
derot_angles=None,
name=None,
feature_extraction_method=None,
save=True,
load=True,
feat_filename=None,
calc_mean_depths=False,
to_visualize=[],
n_vis_frames=9,
exit_after_visualization=False,
offline_vis=False):
'''
parameters=dictionary having at least a 'descriptors' key, which holds
a sublist of ['3DXYPCA', 'GHOG', '3DHOF', 'ZHOF']. It can have a
'features_params' key, which holds specific parameters for the
features to be extracted.
features_extract= FeatureExtraction Class
data= (Directory with depth frames) OR (list of depth frames)
use_dexter= True if Dexter 1 TOF Dataset is used
visualize= True to visualize features extracted from frames
'''
self.name = name
if name is None:
self.name = os.path.basename(data)
loaded_data = [[] for i in range(len(self.parameters['descriptors']))]
readimagedata = False
features = [None] * len(self.parameters['descriptors'])
buffers = [None] * len(self.parameters['descriptors'])
samples_indices = [None] * len(self.parameters['descriptors'])
median_depth = [None] * len(self.parameters['descriptors'])
times = {}
valid = False
redo = False
if 'raw' in to_visualize:
load = False
while not valid:
for count, descriptor in enumerate(self.parameters['descriptors']):
nloaded_ids = {}
loaded_ids = {}
ids, file_ids = self.retrieve_descriptor_possible_ids(count)
try_ids = ids[:]
for try_count in range(len(try_ids)):
loaded_data = co.file_oper.load_labeled_data(
[try_ids[-1]] + file_ids + [self.name])
if loaded_data is not None and not redo and load:
loaded_ids[try_ids[-1]] = file_ids[:]
break
else:
nloaded_ids[try_ids[-1]] = file_ids[:]
try_ids = try_ids[:-1]
file_ids = file_ids[:-1]
for _id in ids:
try:
nloaded_file_id = nloaded_ids[_id]
nloaded_id = _id
except:
continue
if nloaded_id == 'Features':
if not readimagedata:
(imgs, masks, sync, angles,
centers,
samples_inds) = co.imfold_oper.load_frames_data(
data,mv_obj_fold_name,
hnd_mk_fold_name, masks_needed,
derot_centers,derot_angles)
if 'raw' in to_visualize:
montage = co.draw_oper.create_montage(imgs[:],
max_ims=n_vis_frames,
draw_num=False)
fig = plt.figure()
tmp_axes = fig.add_subplot(111)
tmp_axes.imshow(montage[:,:,:-1])
plt.axis('off')
fig.savefig('frames_sample.pdf',
bbox_inches='tight')
to_visualize.remove('raw')
if (not to_visualize and
exit_after_visualization):
return
for cnt in range(len(samples_indices)):
samples_indices[cnt] = samples_inds.copy()
readimagedata = True
if not self.frames_preproc:
self.frames_preproc = FramesPreprocessing(self.parameters)
else:
self.frames_preproc.reset()
if not self.descriptors[descriptor]:
self.descriptors[
descriptor] = self.available_descriptors[
descriptor](parameters=self.parameters,
datastreamer=self.frames_preproc,
viewer=(
FeatureVisualization(
offline_vis=offline_vis,
n_frames=len(imgs)) if
to_visualize else None))
else:
self.descriptors[descriptor].reset()
features[count] = []
median_depth[count] = []
valid = []
for img_count, img in enumerate(imgs):
'''
#DEBUGGING
cv2.imshow('t', (img%256).astype(np.uint8))
cv2.waitKey(30)
'''
check = self.frames_preproc.update(img,
sync[img_count],
mask=masks[img_count],
angle=angles[img_count],
center=centers[img_count])
if 'features' in to_visualize:
self.descriptors[descriptor].set_curr_frame(img_count)
if check:
extracted_features = self.descriptors[descriptor].extract()
if extracted_features is not None:
features[count].append(extracted_features)
median_depth[count].append(np.median(self.frames_preproc.curr_patch))
else:
features[count].append(None)
median_depth[count].append(None)
if 'features' in to_visualize:
self.descriptors[descriptor].visualize()
self.descriptors[descriptor].draw()
if (len(to_visualize) == 1 and
exit_after_visualization):
continue
else:
if (len(to_visualize) == 1
and exit_after_visualization):
self.descriptors[descriptor].draw()
continue
features[count].append(None)
median_depth[count].append(None)
if 'Features' not in times:
times['Features'] = []
times['Features'] += self.descriptors[descriptor].time
if self.preproc_time is None:
self.preproc_time = []
self.preproc_time+=self.frames_preproc.time
loaded_ids[nloaded_id] = nloaded_file_id
co.file_oper.save_labeled_data([nloaded_id]
+loaded_ids[nloaded_id]+
[self.name],
[np.array(features[count]),
(sync,
samples_indices[count]),
median_depth[count],
times])
elif nloaded_id == 'Sparse Features':
if features[count] is None:
[features[count],
(sync,
samples_indices[count]),
median_depth[count],
times] = co.file_oper.load_labeled_data(
[ids[ids.index(nloaded_id)-1]]+
loaded_ids[ids[ids.index(nloaded_id)-1]]+[self.name])
if self.coders[count] is None:
self.load_sparse_coder(count)
features[count] = self.coders[
count].multicode(features[count])
if 'Sparse Features' not in times:
times['Sparse Features'] = []
times['Sparse Features'] += self.coders[
count].time
loaded_ids[nloaded_id] = nloaded_file_id
co.file_oper.save_labeled_data([nloaded_id] +
loaded_ids[nloaded_id]+
[self.name],
[np.array(features[count]),
(sync,
samples_indices[count]),
median_depth[count],
times])
elif nloaded_id == 'Buffered Features':
if features[count] is None or samples_indices[count] is None:
[features[
count],
(sync,
samples_indices[count]),
median_depth[count],
times] = co.file_oper.load_labeled_data(
[ids[ids.index(nloaded_id) -1]] +
loaded_ids[
ids[ids.index(nloaded_id) - 1]] +
[self.name])
self.buffer_class[count].reset()
new_samples_indices = []
for sample_count in range(len(features[count])):
self.buffer_class[count].update_buffer_info(
sync[sample_count],
samples_indices[count][sample_count],
samples = features[count][sample_count],
depth=median_depth[count][sample_count])
self.buffer_class[count].add_buffer()
features[count],samples_indices[count],median_depth[count] = self.buffer_class[count].extract_buffer_list()
loaded_ids[nloaded_id] = nloaded_file_id
co.file_oper.save_labeled_data([nloaded_id]+loaded_ids[nloaded_id]
+[self.name],
[np.array(features[count]),
samples_indices[count],
median_depth[count],
times])
elif nloaded_id == 'Sparse Buffers':
if features[count] is None:
[features[count],
samples_indices[count],
median_depth[count],
times] = co.file_oper.load_labeled_data(
['Buffered Features']+loaded_ids['Buffered Features']
+[self.name])
if self.coders[count] is None:
self.load_sparse_coder(count)
features[count] = self.coders[count].multicode(features[count])
if 'Sparse Buffer' not in times:
times['Sparse Buffer'] = []
times['Sparse Buffer'] += self.coders[
count].time
loaded_ids[nloaded_id] = nloaded_file_id
co.file_oper.save_labeled_data([nloaded_id] +
loaded_ids[nloaded_id]
+[self.name],
[np.array(features[count]),
samples_indices[count],
median_depth[count],
times])
elif nloaded_id == 'PTPCA':
if features[count] is None:
[features[count],
samples_indices[count],
median_depth[count],
times] = co.file_oper.load_labeled_data(
[ids[ids.index('PTPCA')-1]] +
loaded_ids[ids[ids.index('PTPCA') - 1]]
+[self.name])
self.buffer_class[count].reset()
features[count] = [
self.buffer_class[count].perform_post_time_pca(
_buffer) for _buffer in features[count]]
if 'PTPCA' not in times:
times['PTPCA'] = []
times['PTPCA'] += self.buffer_class[
count].time
loaded_ids[nloaded_id] = nloaded_file_id
co.file_oper.save_labeled_data([nloaded_id] +
loaded_ids[nloaded_id]+
[self.name],
[np.array(
features[count]),
samples_indices[count],
median_depth[count],
times])
if features[count] is None:
try:
[features[count],
samples_indices[count],
median_depth[count],
times] = loaded_data
if isinstance(samples_indices[count], tuple):
samples_indices[count] = samples_indices[count][-1]
except TypeError:
pass
self.descriptors_id[count] = loaded_ids[ids[-1]]
if (self.parameters['sparsecoded'] and not self.coders[count]):
finite_features = []
for feat in features[count]:
if feat is not None:
finite_features.append(feat)
if self.all_data[count] is None:
self.all_data[count] = np.array(finite_features)
else:
self.all_data[count] = np.concatenate((self.all_data[count],
finite_features),axis=0)
try:
if np.unique([len(feat) for feat in features]).size == 1:
valid = True
redo = False
else:
self.logger.warning('Unequal samples dimension of loaded features:'
+ str([len(feat) for feat in features])
+' ...repeating')
redo = True
except Exception as e:
for count,feat in enumerate(features):
if feat is None:
print 'Features[' + str(count) + '] is None'
self.logger.warning(str(e))
redo = True
pass
return (features,
samples_indices[np.argmax([len(sample) for sample in
samples_indices])],
median_depth[np.argmax([len(median_depth) for
median_depth in
samples_indices])],
self.name, self.coders, self.descriptors_id)
def save(self, save_path=None):
'''
Save actions to file
'''
if save_path is None:
actions_path = self.save_path
else:
actions_path = save_path
self.logger.info('Saving actions to ' + actions_path)
with open(actions_path, 'wb') as output:
pickle.dump(self.actions, output, -1)
class ActionsSparseCoding(object):
'''
Class to hold sparse coding coders
'''
def __init__(self, parameters):
self.features = parameters['descriptors']
self.logger = logging.getLogger(self.__class__.__name__)
initialize_logger(self.logger)
self.parameters = parameters
self.sparse_dim_rat = []
try:
for feat in self.features:
self.sparse_dim_rat.append(parameters['sparse_params'][feat])
except (KeyError, TypeError):
self.sparse_dim_rat = [None] * len(self.features)
self.sparse_coders = []
self.codebooks = []
self.initialized = True
self.save_path = (os.getcwd() +
os.sep + 'saved_coders.pkl')
def train(self, data, feat_count, display=0, min_iterations=10,
init_traindata_num=200, incr_rate=2, sp_opt_max_iter=200,
debug=False, save_traindata=True):
'''
feat_count: features position inside
actions.actions[act_num].features list
'''
try:
self.sparse_coders[feat_count].display = display
except:
self.sparse_coders[feat_count] = sc.SparseCoding(
sparse_dim_rat=self.sparse_dim_rat[feat_count],
name=str(feat_count))
self.sparse_coders[feat_count].display = display
self.logger.info('Training Dictionaries using data of shape:'
+ str(data.shape))
if save_traindata:
savepath = ('SparseTraining-' +
self.parameters['descriptors'][
feat_count] + '.npy')
self.logger.info('TrainData is saved to ' + savepath)
np.save(savepath, data, allow_pickle=False)
self.sparse_coders[feat_count].train_sparse_dictionary(data,
init_traindata_num=init_traindata_num,
incr_rate=incr_rate,
sp_opt_max_iter=sp_opt_max_iter,
min_iterations=min_iterations,
n_jobs=4)
self.codebooks[feat_count] = (
pinv(self.sparse_coders[feat_count].codebook_comps))
return 1
def initialize(self):
'''
initialize / reset all codebooks that refer to the given <sparse_dim_rat>
and feature combination
'''
self.sparse_coders = []
for count, feature in enumerate(self.features):
self.sparse_coders.append(sc.SparseCoding(
sparse_dim_rat=self.sparse_dim_rat[count],
name=str(count)))
self.codebooks.append(None)
self.initialized = True
def flush(self, feat_count='all'):
'''
Reinitialize all or one dictionary
'''
if feat_count == 'all':
iter_quant = self.sparse_coders
iter_range = range(len(self.features))
else:
iter_quant = [self.sparse_coders[feat_count]]
iter_range = [feat_count]
feat_dims = []
for feat_count, inv_dict in zip(iter_range, iter_quant):
feat_dims[feat_count] = None
try:
feat_dim = inv_dict.codebook_comps.shape[0]
feat_dims[feat_count] = feat_dim
except AttributeError:
feat_dims[feat_count] = None
for feature in self.sparse_coders:
if feat_dims[feature] is not None:
self.sparse_coders[feat_count].flush_variables()
self.sparse_coders[feat_count].initialize(feat_dims[feature])
def save(self, save_dict=None, save_path=None):
'''
Save coders to file
'''
if save_dict is not None:
for feat_count, feature in enumerate(self.features):
if not self.parameters['PTPCA']:
save_dict[feature + ' ' +
str(self.sparse_dim_rat[feat_count])] = \
self.sparse_coders[feat_count]
else:
save_dict[feature + ' ' +
str(self.sparse_dim_rat[feat_count]) +
' PCA ' +
str(self.parameters['PTPCA_params'][
'PTPCA_components'])] = \
self.sparse_coders[feat_count]
return
if save_path is None:
coders_path = self.save_path
else:
coders_path = save_path
self.logger.info('Saving Dictionaries to ' + coders_path)
with open(coders_path, 'wb') as output:
pickle.dump((self.sparse_coders, self.codebooks), output, -1)
def grad_angles(patch):
'''
Compute gradient angles on image patch for GHOG
'''
y_size = 30
x_size = int(y_size/float(np.shape(patch)[0])
* np.shape(patch)[1])
patch = cv2.resize(patch, (x_size, y_size),
interpolation=cv2.INTER_NEAREST)
grady, gradx = np.gradient(patch)
ang = np.arctan2(grady, gradx)
#ang[ang < 0] = ang[ang < 0] + pi
return ang.ravel() # returns values 0 to pi
class FramesPreprocessing(object):
def __init__(self, parameters,reset_time=True):
self.logger = logging.getLogger(self.__class__.__name__)
initialize_logger(self.logger)
self.parameters = parameters
self.action_type = parameters['action_type']
self.skeleton = None
self.prev_patch = None
self.curr_patch = None
self.prev_patch_original = None
self.curr_patch_original = None
self.prev_roi_patch = None
self.curr_roi_patch = None
self.prev_roi_patch_original = None
self.curr_roi_patch_original = None
self.prev_patch_pos = None
self.curr_patch_pos = None
self.prev_patch_pos_original = None
self.curr_patch_pos_original = None
self.prev_count = 0
self.curr_count = 0
self.prev_depth_im = np.zeros(0)
self.curr_depth_im = np.zeros(0)
self.hand_contour = None
self.fig = None
self.kernel = np.ones((5, 5), np.uint8)
self.curr_full_depth_im = None
self.prev_full_depth_im = None
self.prev_cnt = None
self.curr_cnt = None
self.angle = None
self.center = None
self.hand_img = None
if reset_time:
self.time = []
def reset(self,reset_time=False):
self.__init__(self.parameters, reset_time=reset_time)
@timeit
def update(self, img, img_count, use_dexter=False, mask=None, angle=None,
center=None, masks_needed=False, isderotated=False):
'''
Update frames
'''
if use_dexter:
mask, hand_patch, hand_patch_pos = prepare_dexter_im(
img)
else:
cnt = None
#try:
if masks_needed and mask is None:
mask1 = cv2.morphologyEx(
img.copy(), cv2.MORPH_OPEN, self.kernel)
_, cnts, _ = cv2.findContours(mask1.astype(np.uint8),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
if not cnts:
img = None
else:
cnts_areas = [cv2.contourArea(cnts[i]) for i in
xrange(len(cnts))]
cnt = cnts[np.argmax(cnts_areas)]
if self.skeleton is None:
self.skeleton = hsa.FindArmSkeleton(img.copy())
skeleton_found = self.skeleton.run(img, cnt,
'longest_ray')
if skeleton_found:
mask = self.skeleton.hand_mask
last_link = (self.skeleton.skeleton[-1][1] -
self.skeleton.skeleton[-1][0])
angle = np.arctan2(
last_link[0], last_link[1])
center = self.skeleton.hand_start
else:
img = None
if img is not None and not isderotated and angle is None:
raise Exception('mask is not None, derotation is True ' +
'and angle and center are missing, ' +
'cannot proceed with this combination')
if self.action_type is not 'Passive':
if img is not None:
(self.prev_full_depth_im,
self.curr_full_depth_im) = (self.curr_full_depth_im,
img)
imgs = [self.prev_full_depth_im,
self.curr_full_depth_im]
#if self.prev_full_depth_im is None:
# return False
else:
imgs = [img]
curr_img = img
any_none = any([im is None for im in imgs])
if not any_none:
imgs = [im.copy() for im in imgs]
for im in imgs:
if not any_none:
if np.sum(mask * img > 0) == 0:
any_none = True
if not any_none:
im = im * (mask > 0)
if not isderotated:
if angle is not None and center is not None:
self.angle = angle
self.center = center
processed_img = co.pol_oper.derotate(
im,
angle, center)
else:
processed_img = im
else:
processed_img = None
self.hand_img = im
if processed_img is not None:
hand_patch, hand_patch_pos, self.hand_contour = prepare_im(
processed_img)
else:
hand_patch, hand_patch_pos, self.hand_contour = (None,
None,
None)
# DEBUGGING
# cv2.imshow('test',((hand_patch)%255).astype(np.uint8))
# cv2.waitKey(10)
#if hand_patch is None:
# return False
(self.prev_depth_im,
self.curr_depth_im) = (self.curr_depth_im,
processed_img)
(self.curr_count,
self.prev_count) = (img_count,
self.curr_count)
(self.prev_patch,
self.curr_patch) = (self.curr_patch,
hand_patch)
(self.prev_patch_pos,
self.curr_patch_pos) = (self.curr_patch_pos,
hand_patch_pos)
if not self.action_type == 'Passive':
if not any_none:
(hand_patch_original,
hand_patch_pos_original,
self.hand_contour_original) = prepare_im(
im)
else:
(hand_patch_original,
hand_patch_pos_original,
self.hand_contour_original) = (None, None, None)
(self.prev_patch_original,
self.curr_patch_original) = (self.curr_patch_original,
hand_patch_original)
(self.prev_patch_pos_original,
self.curr_patch_pos_original) = (
self.curr_patch_pos_original,
hand_patch_pos_original)
#except ValueError:
# return False
return not (any_none or self.curr_patch is None)
class Descriptor(object):
'''
<parameters>: dictionary with parameters
<datastreamer> : FramesPreprocessing Class
<viewer>: FeatureVisualization Class
'''
def __init__(self, parameters, datastreamer, viewer=None,
reset_time=True):
self.name = ''
self.features = None
self.roi = None
self.roi_original = None
self.parameters = parameters
self.plots = None
self.edges = None
self.ds = datastreamer
self.action_type = parameters['action_type']
if reset_time:
self.time = []
self.view = viewer
def reset(self, visualize=False, reset_time=False):
self.__init__(self.parameters, self.ds, visualize,
reset_time=reset_time)
def draw_flow(self, img, flow, step=16):
h, w = img.shape[:2]
y, x = np.mgrid[step / 2:h:step, step /
2:w:step].reshape(2, -1).astype(int)
fx, fy = flow[y, x].T
lines = np.vstack([x, y, x + fx, y + fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.polylines(vis, lines, 0, (0, 255, 0))
for (x1, y1), (x2, y2) in lines:
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
return vis
def draw_hsv(self, flow):
h, w = flow.shape[:2]
fx, fy = flow[:, :, 0], flow[:, :, 1]
ang = np.arctan2(fy, fx) + np.pi
v = np.sqrt(fx * fx + fy * fy)
hsv = np.zeros((h, w, 3), np.uint8)
hsv[..., 0] = ang * (180 / np.pi / 2)
hsv[..., 1] = 255
hsv[..., 2] = np.minimum(v * 4, 255)
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return bgr
def convert_to_uint8(self, patch, _min=None, _max=None):
# We keep 0 value to denote pixels outside of mask and use the rest band
# for the other depth values
uint8 = np.zeros(patch.shape, np.uint8)
nnz_pixels_mask = patch > 0
nnz_pixels = patch[patch > 0]
uint8[nnz_pixels_mask] = ((nnz_pixels - _min) / float(
_max - _min) * 254 + 1).astype(np.uint8)
return uint8
def visualize_projection(self):
self.view.plot_3d_projection(self.roi,
self.ds.prev_roi_patch,
self.ds.curr_roi_patch)
def visualize_roi(self):
self.view.plot_2d_patches(self.ds.prev_roi_patch,
self.ds.curr_roi_patch)
def visualize(self):
self.view.plot(self.name.lower(), self.features, self.edges)
def draw(self):
self.view.draw()
def plot(self):
self.view.plot()
def set_curr_frame(self, frame):
self.view.set_curr_frame(frame)
def find_outliers(self, data, m=2.):
'''
median of data must not be 0
'''
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d / mdev if mdev > 0 else 0
return s > m
def find_roi(self, prev_patch, curr_patch, prev_patch_pos, curr_patch_pos):
'''
Find unified ROI, concerning 2 consecutive frames
'''
if prev_patch is None:
prev_patch = curr_patch
prev_patch_pos = curr_patch_pos
roi = np.array([[
min(prev_patch_pos[0], curr_patch_pos[0]),
max((prev_patch.shape[0] + prev_patch_pos[0],
curr_patch.shape[0] + curr_patch_pos[0]))],
[min(prev_patch_pos[1], curr_patch_pos[1]),
max(prev_patch.shape[1] + prev_patch_pos[1],
curr_patch.shape[1] + curr_patch_pos[1])]])
return roi
def extract(self):
pass
class Descriptor3DHOF(Descriptor):
def __init__(self, *args, **kwargs):
Descriptor.__init__(self, *args, **kwargs)
self.name = '3dhof'
self.logger = logging.getLogger(self.__class__.__name__)
initialize_logger(self.logger)
self.bin_size = co.CONST['3DHOF_bin_size']
self.hist = SpaceHistogram()
def compute_scene_flow(self):
'''
Computes scene flow for 3DHOF
'''
if self.ds.prev_depth_im is None or self.ds.curr_depth_im is None:
return None
roi = self.roi
prev_depth_im = self.ds.prev_depth_im
curr_depth_im = self.ds.curr_depth_im
self.prev_roi_patch = prev_depth_im[roi[0, 0]:roi[0, 1],
roi[1, 0]:roi[1, 1]].astype(float)
self.curr_roi_patch = curr_depth_im[roi[0, 0]:roi[0, 1],
roi[1, 0]:roi[1, 1]].astype(float)
curr_z = self.curr_roi_patch
prev_z = self.prev_roi_patch
# DEBUGGING
# cv2.imshow('curr_roi_patch',(self.curr_roi_patch_original).astype(np.uint8))
# cv2.waitKey(10)
prev_nnz_mask = self.prev_roi_patch > 0
curr_nnz_mask = self.curr_roi_patch > 0
nonzero_mask = prev_nnz_mask * curr_nnz_mask
if np.sum(nonzero_mask) == 0:
return None
_max = max(np.max(self.prev_roi_patch[prev_nnz_mask]),
np.max(self.curr_roi_patch[curr_nnz_mask]))
_min = min(np.min(self.prev_roi_patch[prev_nnz_mask]),
np.min(self.curr_roi_patch[curr_nnz_mask]))
prev_uint8 = self.convert_to_uint8(self.prev_roi_patch,
_min=_min, _max=_max)
curr_uint8 = self.convert_to_uint8(self.curr_roi_patch,
_min=_min, _max=_max)
flow = cv2.calcOpticalFlowFarneback(prev_uint8,
curr_uint8, None,
0.3, 3, 40,
3, 7, 1.5, 0)
# DEBUGGING
'''
cv2.imshow('flow HSV', self.draw_hsv(flow))
cv2.imshow('prev', prev_uint8)
cv2.imshow('flow', self.draw_flow(curr_uint8, flow, step = 14))
cv2.waitKey(500)
'''
y_old, x_old = np.mgrid[:self.prev_roi_patch.shape[0],
:self.prev_roi_patch.shape[1]].reshape(
2, -1).astype(int)
mask = prev_z[y_old, x_old] > 0
y_old = y_old[mask.ravel()]
x_old = x_old[mask.ravel()]
fx, fy = flow[y_old, x_old].T
y_new, x_new = ((y_old + fy).astype(int), (x_old + fx).astype(int))
y_new = np.minimum(curr_z.shape[0] - 1, y_new)
y_new = np.maximum(0, y_new)
x_new = np.minimum(curr_z.shape[1] - 1, x_new)
x_new = np.maximum(0, x_new)
mask = (self.find_outliers(curr_z[y_new, x_new], 5)
+ self.find_outliers(prev_z[y_old, x_old], 5)) == 0
if np.size(mask)<10:
return None
y_new = y_new[mask]
y_old = y_old[mask]
x_new = x_new[mask]
x_old = x_old[mask]
princ_coeff = co.pol_oper.derotate_points(
self.ds.curr_depth_im,
np.array([PRIM_Y - self.roi_original[0, 0],
PRIM_X - self.roi_original[0, 1]]),
self.ds.angle,
self.ds.center)
y_true_old = ((y_old - princ_coeff[0]) *
prev_z[y_old,
x_old] / float(FLNT))
x_true_old = ((x_old - princ_coeff[1]) *
prev_z[y_old,
x_old] / float(FLNT))
y_true_new = ((y_new - princ_coeff[0]) *
curr_z[y_new,
x_new] / float(FLNT))
x_true_new = ((x_new - princ_coeff[1]) *
curr_z[y_new,
x_new] / float(FLNT))
# DEBUGGING
#cv2.imshow('test', (self.curr_roi_patch).astype(np.uint8))
# cv2.waitKey(10)
dx = x_true_new - x_true_old
dy = y_true_new - y_true_old
dz = curr_z[y_new, x_new] - prev_z[y_old, x_old]
return np.concatenate((dx.reshape(-1, 1),
dy.reshape(-1, 1),
dz.reshape(-1, 1)), axis=1)
@timeit
def extract(self,bin_size=None):
'''
Compute 3DHOF features
'''
self.roi = self.find_roi(self.ds.prev_patch, self.ds.curr_patch,
self.ds.prev_patch_pos, self.ds.curr_patch_pos)
self.roi_original = self.find_roi(
self.ds.prev_patch_original, self.ds.curr_patch_original,
self.ds.prev_patch_pos_original,
self.ds.curr_patch_pos_original)
if bin_size is None:
self.hist.bin_size = self.bin_size
self.hist.range = [[-1.0, 1.0], [-1.0, 1.0], [-1.0, 1.0]]
disp = self.compute_scene_flow()
if disp is None:
return None
disp_norm = np.sqrt((disp[:, 0] * disp[:, 0] + disp[:, 1] *
disp[:, 1] + disp[:, 2] * disp[:, 2]))[:, None]
disp_norm[disp_norm == 0] = 1
disp = disp / disp_norm.astype(float)
hist, edges = self.hist.hist_data(disp)
self.edges = edges
self.features = hist / float(np.sum(hist))
self.features = self.features.ravel()
return self.features
class DescriptorZHOF(Descriptor):
def __init__(self, *args, **kwargs):
Descriptor.__init__(self, *args, **kwargs)
self.name = 'zhof'
self.logger = logging.getLogger(self.__class__.__name__)
initialize_logger(self.logger)
self.bin_size = co.CONST['ZHOF_bin_size']
self.hist = SpaceHistogram()
def z_flow(self, prev_depth_im, curr_depth_im):
'''
Computes vertical displacement to the camera, using static frame
xy-coordinates and changing z ones.
'''
roi = self.roi
self.prev_roi_patch = prev_depth_im[
roi[0, 0]:roi[0, 1],
roi[1, 0]:roi[1, 1]].astype(float)
self.curr_roi_patch = curr_depth_im[
roi[0, 0]:roi[0, 1],
roi[1, 0]:roi[1, 1]].astype(float)
'''
y_size = 30
resize_rat =y_size/float(np.shape(self.prev_roi_patch)[0])
x_size = int(resize_rat * np.shape(self.prev_roi_patch)[1])
self.prev_roi_patch = cv2.resize(self.prev_roi_patch, (x_size, y_size),
interpolation=cv2.INTER_NEAREST)
self.curr_roi_patch = cv2.resize(self.curr_roi_patch, (x_size, y_size),
interpolation=cv2.INTER_NEAREST)
'''
resize_rat = 1
nonzero_mask = (self.prev_roi_patch * self.curr_roi_patch) > 0
if np.sum(nonzero_mask) == 0:
return None
'''
#DEBUGGING
cv2.imshow('test_prev',(self.prev_roi_patch%255).astype(np.uint8))
cv2.imshow('test_curr', (self.curr_roi_patch%255).astype(np.uint8))
cv2.waitKey(30)
'''
try:
yx_coords = (find_nonzero(
nonzero_mask.astype(np.uint8)).astype(float)/resize_rat
-
np.array([[PRIM_Y - self.roi[0, 0],
PRIM_X - self.roi[1, 0]]]))
except ValueError:
return None
prev_z_coords = self.prev_roi_patch[nonzero_mask][:,
None].astype(float)
curr_z_coords = self.curr_roi_patch[nonzero_mask][:,
None].astype(float)
dz_coords = (curr_z_coords - prev_z_coords).astype(float)
# invariance to environment height variance:
dz_outliers = self.find_outliers(dz_coords, 3.).ravel()
dz_coords = dz_coords[dz_outliers == 0]
yx_coords = yx_coords[dz_outliers == 0, :]
yx_coords_in_space = (yx_coords * dz_coords / FLNT)
return np.concatenate((yx_coords_in_space,
dz_coords), axis=1)
@timeit
def extract(self, bin_size=None):
'''
Compute ZHOF features
'''
'''
#DEBUGGING
if self.ds.prev_patch_pos is not None:
print 'extract',self.ds.prev_patch.shape, self.ds.curr_patch.shape
'''
if self.ds.prev_patch is None or self.ds.curr_patch is None:
return None
'''
#DEBUGGING
print self.ds.prev_patch_pos, self.ds.curr_patch_pos
exit()
'''
if self.ds.curr_count - self.ds.prev_count > co.CONST[
'min_frame_count_diff']:
return None
self.roi = self.find_roi(self.ds.prev_patch, self.ds.curr_patch,
self.ds.prev_patch_pos, self.ds.curr_patch_pos)
self.roi_original = self.find_roi(
self.ds.prev_patch_original, self.ds.curr_patch_original,
self.ds.prev_patch_pos_original,
self.ds.curr_patch_pos_original)
'''
#DEBUGGING
print self.roi
'''
if bin_size is None:
self.hist.bin_size = self.bin_size
else:
self.hist.bin_size = self.bin_size
self.hist.range = [[-1.0, 1.0], [-1.0, 1.0], [-1.0, 1.0]]
disp = self.z_flow(self.ds.prev_depth_im, self.ds.curr_depth_im)
if disp is None:
return None
# print disp.max(axis=0), disp.min(axis=0)
disp_norm = np.sqrt((disp[:, 0] * disp[:, 0] + disp[:, 1] *
disp[:, 1] + disp[:, 2] * disp[:, 2]))[:, None]
disp_norm[disp_norm == 0] = 1
disp = disp / disp_norm.astype(float)
# print np.unique(np.around(disp,1))
hist, edges = self.hist.hist_data(disp)
self.edges = edges
features = hist / float(np.sum(hist))
features = features.ravel()
return features
class DescriptorGHOG(Descriptor):
def __init__(self, *args, **kwargs):
Descriptor.__init__(self, *args, **kwargs)
self.name = 'ghog'
self.logger = logging.getLogger(self.__class__.__name__)
initialize_logger(self.logger)
self.bin_size = co.CONST['GHOG_bin_size']
self.hist = SpaceHistogram()
@timeit
def extract(self, bin_size=None):
'''
Compute GHOG features
'''
im_patch = self.ds.curr_patch.astype(int)
if bin_size is None:
self.hist.bin_size = self.bin_size
else:
self.hist.bin_size = bin_size
# DEBUGGING: added -pi (check grad_angles too)
self.hist.range = [[-pi, pi]]
gradients = grad_angles(im_patch)
hist, edges = self.hist.hist_data(gradients)
self.edges = edges
#hist[0] = max(0, hist[0] - np.sum(im_patch==0))
hist = hist / float(np.sum(hist))
return hist
class Descriptor3DXYPCA(Descriptor):
def __init__(self, *args, **kwargs):
Descriptor.__init__(self, *args, **kwargs)
self.logger = logging.getLogger(self.__class__.__name__)
self.name = '3dxypca'
initialize_logger(self.logger)
self.pca_resize_size = co.CONST['3DXYPCA_size']
self.edges = [['X' + str(cnt) for cnt in range(self.pca_resize_size)]+
['Y' + str(cnt) for cnt in range(self.pca_resize_size)]]
@timeit
def extract(self, resize_size=None):
'''
Compute 3DXYPCA features
'''
if resize_size is not None:
self.pca_resize_size = resize_size
_, pca_along_2 = cv2.PCACompute(
cv2.findNonZero(self.ds.curr_patch.astype(np.uint8)).squeeze().
astype(float),
np.array([]), maxComponents=1)
rot_angle = np.arctan2(pca_along_2[0][1], pca_along_2[0][0])
patch = co.pol_oper.derotate(self.ds.curr_patch, rot_angle,
(self.ds.curr_patch.shape[0] / 2,
self.ds.curr_patch.shape[1] / 2))
# DEBUGGING
# cv2.imshow('test',patch.astype(np.uint8))
# cv2.waitKey(10)
patch_res = cv2.resize(patch, (self.pca_resize_size,
self.pca_resize_size),
interpolation=cv2.INTER_NEAREST)
patch_res_mask = patch_res == 0
masked_array = np.ma.array(patch_res, mask=patch_res_mask)
masked_mean_0 = np.ma.mean(masked_array, axis=0)
masked_mean_1 = np.ma.mean(masked_array, axis=1)
cor_patch_res_0 = patch_res.copy()
cor_patch_res_1 = patch_res.copy()
cor_patch_res_0[patch_res_mask] = np.tile(masked_mean_0[None,
:], (patch_res.shape[0], 1))[
patch_res_mask]
cor_patch_res_1[patch_res_mask] = np.tile(masked_mean_1[:, None], (
1, patch_res.shape[1]))[
patch_res_mask]
_, pca_along_0 = cv2.PCACompute(
cor_patch_res_0, np.array(
[]), maxComponents=1)
_, pca_along_1 = cv2.PCACompute(cor_patch_res_1.T, np.array([]),
maxComponents=1)
features = np.concatenate((pca_along_0[0], pca_along_1[0]), axis=0)
return features
class FeatureVisualization(object):
def __init__(self, offline_vis=False, n_frames=None,
n_saved=4, init_frames=50):
import matplotlib.pyplot as plt
self.logger = logging.getLogger(self.__class__.__name__)
if offline_vis:
self.n_to_plot = n_frames / n_saved
else:
self.n_to_plot = None
self.offline_vis = offline_vis
self.n_frames = n_frames
self.init_frames = init_frames
gs = gridspec.GridSpec(120, 100)
initialize_logger(self.logger)
if not self.offline_vis:
plt.ion()
self.fig = plt.figure()
self.patches3d_plot = self.fig.add_subplot(
gs[:50, 60:100], projection='3d')
self.patches2d_plot = self.fig.add_subplot(gs[:50, :50])
self.hist4d = h4d.Hist4D()
self.hof_plots = (self.fig.add_subplot(gs[60:100 - 5, :45], projection='3d'),
self.fig.add_subplot(gs[60:100 - 5, 45:50]),
self.fig.add_subplot(gs[100 - 4:100 - 2, :50]),
self.fig.add_subplot(gs[100 - 2:100, :50]))
self.plotted_hof = False
self.pause_key = Button(
self.fig.add_subplot(gs[110:120, 25:75]), 'Next')
self.pause_key.on_clicked(self.unpause)
self.hog_plot = self.fig.add_subplot(gs[70:100, 70:100])
plt.show()
else:
self.fig = plt.figure()
self.hog_fig = plt.figure()
self.patches3d_fig = plt.figure()
self.patches2d_fig = plt.figure()
self.xypca_fig = plt.figure()
self.patches3d_plot = self.patches2d_fig.add_subplot(111)
self.patches2d_plot = self.patches3d_fig.add_subplot(111)
self.xypca_plot = self.xypca_fig.add_subplot(111)
self.hog_plot = self.hog_fig.add_subplot(111)
self.hof_plots = (self.fig.add_subplot(gs[60:100 - 5, :45], projection='3d'),
self.fig.add_subplot(gs[60:100 - 5, 45:50]),
self.fig.add_subplot(gs[100 - 4:100 - 2, :50]),
self.fig.add_subplot(gs[100 - 2:100, :50]))
self.patches3d_fig.tight_layout()
self.patches2d_fig.tight_layout()
self.fig.tight_layout()
self.hog_fig.tight_layout()
self.xypca_fig.tight_layout()
self.hogs = []
self.patches2d = []
self.patches3d = []
self.xypca = []
self.hof = None
self.curr_frame = None
def to_plot(self):
if not self.offline_vis or self.curr_frame is None:
return True
return not ((
self.curr_frame - self.init_frames) % self.n_to_plot)
def set_curr_frame(self, num):
self.curr_frame = num
def plot(self, name, features, edges):
if 'hog' in name:
self.plot_hog(features, edges)
elif 'hof' in name:
self.plot_hof(features, edges)
elif 'pca' in name:
self.plot_xypca(features, edges)
def plot_hog(self, ghog_features, ghog_edges):
if self.to_plot():
hog_hist = ghog_features
hog_bins = ghog_edges
width = 0.7 * (hog_bins[0][1] - hog_bins[0][0])
center = (hog_bins[0][:-1] + hog_bins[0][1:]) / 2
self.hog_plot.clear()
self.hog_plot.bar(center, hog_hist, align='center', width=width)
def plot_hof(self, hof_features, hof_edges):
if self.to_plot():
if not self.plotted_hof:
if self.offline_vis:
self.plotted_hof = True
self.hist4d.draw(
hof_features,
hof_edges,
fig=self.fig,
all_axes=self.hof_plots)
self.hof = self.convert_plot2array(self.fig)
def plot_xypca(self, pca_features, xticklabels):
if self.to_plot():
width = 0.35
ind = np.arange(len(xticklabels))
self.xypca_plot.clear()
self.xypca_plot.set_xticks(ind + width / 2)
self.xypca_plot.set_xticklabels(xticklabels)
self.xypca.append(self.convert_plot2array(self.xypca_fig))
def plot_3d_projection(self, roi, prev_roi_patch, curr_roi_patch):
if self.to_plot():
nonzero_mask = (prev_roi_patch * curr_roi_patch) > 0
yx_coords = (find_nonzero(nonzero_mask.astype(np.uint8)).astype(float) -
np.array([[PRIM_Y - roi[0, 0],
PRIM_X - roi[1, 0]]]))
prev_z_coords = prev_roi_patch[nonzero_mask][:,
None].astype(float)
curr_z_coords = curr_roi_patch[nonzero_mask][:,
None].astype(float)
prev_yx_proj = yx_coords * prev_z_coords / (FLNT)
curr_yx_proj = yx_coords * curr_z_coords / (FLNT)
prev_yx_proj = prev_yx_proj[prev_z_coords.ravel() != 0]
curr_yx_proj = curr_yx_proj[curr_z_coords.ravel() != 0]
self.patches3d_plot.clear()
self.patches3d_plot.scatter(prev_yx_proj[:, 1], prev_yx_proj[:, 0],
prev_z_coords[prev_z_coords != 0],
zdir='z', s=4, c='r', depthshade=False, alpha=0.5)
self.patches3d_plot.scatter(curr_yx_proj[:, 1], curr_yx_proj[:, 0],
curr_z_coords[curr_z_coords != 0],
zdir='z', s=4, c='g', depthshade=False, alpha=0.5)
if self.offline_vis:
self.patches3d.append(self.convert_plot2array(self.patches3d_fig))
'''
zprevmin,zprevmax=self.patches3d_plot.get_zlim()
yprevmin,yprevmax=self.patches3d_plot.get_ylim()
xprevmin,xprevmax=self.patches3d_plot.get_xlim()
minlim=min(xprevmin,yprevmin,zprevmin)
maxlim=max(xprevmax,yprevmax,zprevmax)
self.patches3d_plot.set_zlim([minlim,maxlim])
self.patches3d_plot.set_xlim([minlim,maxlim])
self.patches3d_plot.set_ylim([minlim,maxlim])
'''
def convert_plot2array(self,fig):
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
def plot_3d_patches(self, roi, prev_roi_patch, curr_roi_patch):
if self.to_plot:
self.patches3d_plot.clear()
x_range = np.arange(roi[0, 0], roi[0, 1])
y_range = np.arange(roi[1, 0], roi[1, 1])
xmesh, ymesh = np.meshgrid(y_range, x_range)
xmesh = xmesh.ravel()
ymesh = ymesh.ravel()
curr_vals = curr_roi_patch.ravel()
self.patches3d_plot.scatter(xmesh[curr_vals > 0],
ymesh[curr_vals > 0],
zs=curr_vals[curr_vals > 0],
zdir='z',
s=4,
c='r',
depthshade=False,
alpha=0.5)
prev_vals = prev_roi_patch.ravel()
self.patches3d_plot.scatter(xmesh[prev_vals > 0],
ymesh[prev_vals > 0],
zs=prev_vals[prev_vals > 0],
zdir='z',
s=4,
c='g',
depthshade=False,
alpha=0.5)
if self.offline_vis:
self.patches3d.append(self.convert_plot2array(self.patches3d_fig))
def plot_2d_patches(self, prev_roi_patch, curr_roi_patch):
self.patches2d_plot.clear()
self.patches2d_plot.imshow(prev_roi_patch, cmap='Reds', alpha=0.5)
self.patches2d_plot.imshow(curr_roi_patch, cmap='Greens', alpha=0.5)
if self.offline_vis:
self.patches2d.append(self.convert_plot2array(self.patches2d_fig))
def _draw_single(self, fig):
import time
if not self.offline_vis:
fig.canvas.draw()
try:
fig.canvas.start_event_loop(30)
except:
time.sleep(1)
def draw(self):
if not self.offline_vis:
self._draw_single(self.fig)
else:
if self.to_plot():
self._draw_single(self.hog_fig)
self._draw_single(self.fig)
self._draw_single(self.patches3d_fig)
self._draw_single(self.patches2d_fig)
if self.curr_frame == self.n_frames - 1:
import pickle
with open('visualized_features','w') as out:
pickle.dump((self.hof,self.hogs,
self.patches2d,
self.patches3d), out)
tmp_fig = plt.figure()
tmp_axes = tmp_fig.add_subplot(111)
if self.hogs:
hogs_im = co.draw_oper.create_montage(
self.hogs, draw_num=False)
tmp_axes.imshow(hogs_im[:,:,:3])
plt.axis('off')
tmp_fig.savefig('ghog.pdf', bbox_inches='tight')
if self.hof is not None:
tmp_axes.imshow(self.hof[:,:,:3])
plt.axis('off')
tmp_fig.savefig('3dhof.pdf', bbox_inches='tight')
if self.patches2d:
patches2d_im = co.draw_oper.create_montage(
self.patches2d, draw_num=False)
tmp_axes.imshow(patches2d_im[:,:,:3])
plt.axis('off')
tmp_fig.savefig('patches2d.pdf', bbox_inches='tight')
if self.patches3d:
patches3d_im = co.draw_oper.create_montage(
self.patches3d, draw_num=False)
tmp_axes.imshow(patches3d_im[:,:,:3])
plt.axis('off')
tmp_fig.savefig('patches3d.pdf', bbox_inches='tight')
if self.xypca:
tmp_axes.imshow(self.hof[:,:,:3])
plt.axis('off')
tmp_fig.savefig('3dxypca.pdf', bbox_inches='tight')
def unpause(self, val):
plt.gcf().canvas.stop_event_loop()
class ActionRecognition(object):
'''
Class to hold everything about action recognition
<parameters> must be a dictionary.
'''
def __init__(self, parameters, coders=None,
feat_filename=None, log_lev='INFO'):
self.parameters = parameters
self.sparse_helper = ActionsSparseCoding(parameters)
self.sparse_helper.sparse_coders = coders
self.dict_names = self.sparse_helper.features
self.actions = Actions(parameters,
coders=self.sparse_helper.sparse_coders,
feat_filename=feat_filename)
self.log_lev = log_lev
self.logger = logging.getLogger('ActionRecognition')
initialize_logger(self.logger)
self.logger.setLevel(log_lev)
# self.logger.setLevel('SAVE_LOAD_FEATURES')
def add_action(self, *args, **kwargs):
'''
actions.add_action alias
'''
res = self.actions.add_action(*args, **kwargs)
return res
def train_sparse_coders(self,
use_dexter=False,
trained_coders_list=None,
coders_to_train=None,
codebooks_dict=None,
coders_savepath=None,
min_iterations=10,
incr_rate=2,
sp_opt_max_iter=200,
init_traindata_num=200,
save=True,
debug=False,
save_traindata=True):
'''
Add Dexter 1 TOF Dataset or depthdata + binarymaskdata and
set use_dexter to False (directory with .png or list accepted)
Inputs:
act_num: action number to use for training
use_dexter: true if Dexter 1 dataset is used
iterations: training iterations
save: save sparse coders after training
save_traindata: save training data used for sparsecoding
'''
if len(self.actions.actions) == 0:
raise Exception('Run add_action first and then call ' +
'train_sparse_coders')
feat_num = len(self.parameters['descriptors'])
# Train coders
self.sparse_helper.initialize()
for ind, coder in enumerate(trained_coders_list):
self.sparse_helper.sparse_coders[ind] = coder
all_sparse_coders = codebooks_dict
all_data = [self.actions.actions[ind].retrieve_features(concat=False) for
ind in range(len(self.actions.actions))]
'''
all_data is a list of actions. Each action is a list of descriptors.
Each descriptor is a 3d numpy array of samples-buffers with shape =
(samples number, buffer size, features size)
'''
for count, feat_name in enumerate(self.parameters['descriptors']):
if count in coders_to_train:
if self.parameters['PTPCA']:
self.logger.info('Using PCA with ' + str(
self.parameters['PTPCA_params']['PTPCA_components']) +
' components')
data = [
all_data[ind][count].reshape(
all_data[ind][count].shape[0], -1)
for ind in
range(len(self.actions.actions))]
for ind, d in enumerate(data):
self.logger.info('Descriptor of ' + feat_name + ' for action \'' +
str(self.actions.actions[ind].name) +
'\' has shape ' + str(d.shape))
data = np.concatenate(
data, axis=0)
frames_num = data.shape[0]
self.logger.info('Frames number: ' + str(frames_num))
self.logger.info('Creating coder for ' + feat_name)
self.sparse_helper.train(data[np.prod(
np.isfinite(data), axis=1).astype(bool),
:],
count,
display=1,
init_traindata_num=init_traindata_num,
incr_rate=incr_rate,
sp_opt_max_iter=sp_opt_max_iter,
min_iterations=min_iterations,
debug=debug,
save_traindata=save_traindata)
save_name = feat_name + ' ' + str(self.parameters['sparse_params'][
feat_name])
if self.parameters['PTPCA']:
comp = self.parameters['PTPCA_params'][
'PTPCA_components']
save_name += ' PCA ' + str(comp)
all_sparse_coders[save_name
] = self.sparse_helper.sparse_coders[
count]
if codebooks_dict is not None and save:
self.sparse_helper.save(save_dict=codebooks_dict)
if coders_savepath is not None:
self.logger.info('Saving ' +
str(self.parameters['descriptors'][count]) +
' coder..')
with open(coders_savepath, 'w') as output:
pickle.dump(all_sparse_coders, output, -1)
self.parameters['sparse_params']['trained_coders'] = True
return
| bsd-3-clause |