repo_name
stringlengths 6
96
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 762
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ricardog/raster-project | get-sheets.py | 1 | 1293 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import click
import os
import pandas as pd
def file_split(file):
s = file.split('.')
name = '.'.join(s[:-1]) # get directory name
return name
def getsheets(inputfile):
name = file_split(inputfile)
try:
os.makedirs(name)
except:
pass
df1 = pd.ExcelFile(inputfile)
for x in df1.sheet_names:
print(x + '.xlsx', 'Done!')
df2 = pd.read_excel(inputfile, sheetname=x)
filename = os.path.join(name, x.lower() + '.csv')
df2.to_csv(filename, index=False, encoding='utf-8')
print('\nAll Done!')
def get_sheet_names(inputfile):
df = pd.ExcelFile(inputfile)
for i, flavor in enumerate(df.sheet_names):
print('{0:>3}: {1}'.format(i + 1, flavor))
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.command(context_settings=CONTEXT_SETTINGS)
@click.option('-n', '--sheet-names', is_flag=True)
@click.argument('inputfile')
def cli(sheet_names, inputfile):
'''Convert a Excel file with multiple sheets to several file with one sheet.
Examples:
\b
getsheets filename
\b
getsheets -n filename
'''
if sheet_names:
get_sheet_names(inputfile)
else:
getsheets(inputfile)
cli()
| apache-2.0 |
gclenaghan/scikit-learn | sklearn/datasets/tests/test_rcv1.py | 322 | 2414 | """Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
| bsd-3-clause |
astocko/statsmodels | statsmodels/graphics/mosaicplot.py | 6 | 26886 | """Create a mosaic plot from a contingency table.
It allows to visualize multivariate categorical data in a rigorous
and informative way.
see the docstring of the mosaic function for more informations.
"""
# Author: Enrico Giampieri - 21 Jan 2013
from __future__ import division
from statsmodels.compat.python import (iteritems, iterkeys, lrange, string_types, lzip,
itervalues, zip, range)
import numpy as np
from statsmodels.compat.collections import OrderedDict
from itertools import product
from numpy import iterable, r_, cumsum, array
from statsmodels.graphics import utils
from pandas import DataFrame
__all__ = ["mosaic"]
def _normalize_split(proportion):
"""
return a list of proportions of the available space given the division
if only a number is given, it will assume a split in two pieces
"""
if not iterable(proportion):
if proportion == 0:
proportion = array([0.0, 1.0])
elif proportion >= 1:
proportion = array([1.0, 0.0])
elif proportion < 0:
raise ValueError("proportions should be positive,"
"given value: {}".format(proportion))
else:
proportion = array([proportion, 1.0 - proportion])
proportion = np.asarray(proportion, dtype=float)
if np.any(proportion < 0):
raise ValueError("proportions should be positive,"
"given value: {}".format(proportion))
if np.allclose(proportion, 0):
raise ValueError("at least one proportion should be"
"greater than zero".format(proportion))
# ok, data are meaningful, so go on
if len(proportion) < 2:
return array([0.0, 1.0])
left = r_[0, cumsum(proportion)]
left /= left[-1] * 1.0
return left
def _split_rect(x, y, width, height, proportion, horizontal=True, gap=0.05):
"""
Split the given rectangle in n segments whose proportion is specified
along the given axis if a gap is inserted, they will be separated by a
certain amount of space, retaining the relative proportion between them
a gap of 1 correspond to a plot that is half void and the remaining half
space is proportionally divided among the pieces.
"""
x, y, w, h = float(x), float(y), float(width), float(height)
if (w < 0) or (h < 0):
raise ValueError("dimension of the square less than"
"zero w={} h=()".format(w, h))
proportions = _normalize_split(proportion)
# extract the starting point and the dimension of each subdivision
# in respect to the unit square
starting = proportions[:-1]
amplitude = proportions[1:] - starting
# how much each extrema is going to be displaced due to gaps
starting += gap * np.arange(len(proportions) - 1)
# how much the squares plus the gaps are extended
extension = starting[-1] + amplitude[-1] - starting[0]
# normalize everything for fit again in the original dimension
starting /= extension
amplitude /= extension
# bring everything to the original square
starting = (x if horizontal else y) + starting * (w if horizontal else h)
amplitude = amplitude * (w if horizontal else h)
# create each 4-tuple for each new block
results = [(s, y, a, h) if horizontal else (x, s, w, a)
for s, a in zip(starting, amplitude)]
return results
def _reduce_dict(count_dict, partial_key):
"""
Make partial sum on a counter dict.
Given a match for the beginning of the category, it will sum each value.
"""
L = len(partial_key)
count = sum(v for k, v in iteritems(count_dict) if k[:L] == partial_key)
return count
def _key_splitting(rect_dict, keys, values, key_subset, horizontal, gap):
"""
Given a dictionary where each entry is a rectangle, a list of key and
value (count of elements in each category) it split each rect accordingly,
as long as the key start with the tuple key_subset. The other keys are
returned without modification.
"""
result = OrderedDict()
L = len(key_subset)
for name, (x, y, w, h) in iteritems(rect_dict):
if key_subset == name[:L]:
# split base on the values given
divisions = _split_rect(x, y, w, h, values, horizontal, gap)
for key, rect in zip(keys, divisions):
result[name + (key,)] = rect
else:
result[name] = (x, y, w, h)
return result
def _tuplify(obj):
"""convert an object in a tuple of strings (even if it is not iterable,
like a single integer number, but keep the string healthy)
"""
if np.iterable(obj) and not isinstance(obj, string_types):
res = tuple(str(o) for o in obj)
else:
res = (str(obj),)
return res
def _categories_level(keys):
"""use the Ordered dict to implement a simple ordered set
return each level of each category
[[key_1_level_1,key_2_level_1],[key_1_level_2,key_2_level_2]]
"""
res = []
for i in zip(*(keys)):
tuplefied = _tuplify(i)
res.append(list(OrderedDict([(j, None) for j in tuplefied])))
return res
def _hierarchical_split(count_dict, horizontal=True, gap=0.05):
"""
Split a square in a hierarchical way given a contingency table.
Hierarchically split the unit square in alternate directions
in proportion to the subdivision contained in the contingency table
count_dict. This is the function that actually perform the tiling
for the creation of the mosaic plot. If the gap array has been specified
it will insert a corresponding amount of space (proportional to the
unit lenght), while retaining the proportionality of the tiles.
Parameters
----------
count_dict : dict
Dictionary containing the contingency table.
Each category should contain a non-negative number
with a tuple as index. It expects that all the combination
of keys to be representes; if that is not true, will
automatically consider the missing values as 0
horizontal : bool
The starting direction of the split (by default along
the horizontal axis)
gap : float or array of floats
The list of gaps to be applied on each subdivision.
If the lenght of the given array is less of the number
of subcategories (or if it's a single number) it will extend
it with exponentially decreasing gaps
Returns
----------
base_rect : dict
A dictionary containing the result of the split.
To each key is associated a 4-tuple of coordinates
that are required to create the corresponding rectangle:
0 - x position of the lower left corner
1 - y position of the lower left corner
2 - width of the rectangle
3 - height of the rectangle
"""
# this is the unit square that we are going to divide
base_rect = OrderedDict([(tuple(), (0, 0, 1, 1))])
# get the list of each possible value for each level
categories_levels = _categories_level(list(iterkeys(count_dict)))
L = len(categories_levels)
# recreate the gaps vector starting from an int
if not np.iterable(gap):
gap = [gap / 1.5 ** idx for idx in range(L)]
# extend if it's too short
if len(gap) < L:
last = gap[-1]
gap = list(*gap) + [last / 1.5 ** idx for idx in range(L)]
# trim if it's too long
gap = gap[:L]
# put the count dictionay in order for the keys
# this will allow some code simplification
count_ordered = OrderedDict([(k, count_dict[k])
for k in list(product(*categories_levels))])
for cat_idx, cat_enum in enumerate(categories_levels):
# get the partial key up to the actual level
base_keys = list(product(*categories_levels[:cat_idx]))
for key in base_keys:
# for each partial and each value calculate how many
# observation we have in the counting dictionary
part_count = [_reduce_dict(count_ordered, key + (partial,))
for partial in cat_enum]
# reduce the gap for subsequents levels
new_gap = gap[cat_idx]
# split the given subkeys in the rectangle dictionary
base_rect = _key_splitting(base_rect, cat_enum, part_count, key,
horizontal, new_gap)
horizontal = not horizontal
return base_rect
def _single_hsv_to_rgb(hsv):
"""Transform a color from the hsv space to the rgb."""
from matplotlib.colors import hsv_to_rgb
return hsv_to_rgb(array(hsv).reshape(1, 1, 3)).reshape(3)
def _create_default_properties(data):
""""Create the default properties of the mosaic given the data
first it will varies the color hue (first category) then the color
saturation (second category) and then the color value
(third category). If a fourth category is found, it will put
decoration on the rectangle. Doesn't manage more than four
level of categories
"""
categories_levels = _categories_level(list(iterkeys(data)))
Nlevels = len(categories_levels)
# first level, the hue
L = len(categories_levels[0])
# hue = np.linspace(1.0, 0.0, L+1)[:-1]
hue = np.linspace(0.0, 1.0, L + 2)[:-2]
# second level, the saturation
L = len(categories_levels[1]) if Nlevels > 1 else 1
saturation = np.linspace(0.5, 1.0, L + 1)[:-1]
# third level, the value
L = len(categories_levels[2]) if Nlevels > 2 else 1
value = np.linspace(0.5, 1.0, L + 1)[:-1]
# fourth level, the hatch
L = len(categories_levels[3]) if Nlevels > 3 else 1
hatch = ['', '/', '-', '|', '+'][:L + 1]
# convert in list and merge with the levels
hue = lzip(list(hue), categories_levels[0])
saturation = lzip(list(saturation),
categories_levels[1] if Nlevels > 1 else [''])
value = lzip(list(value),
categories_levels[2] if Nlevels > 2 else [''])
hatch = lzip(list(hatch),
categories_levels[3] if Nlevels > 3 else [''])
# create the properties dictionary
properties = {}
for h, s, v, t in product(hue, saturation, value, hatch):
hv, hn = h
sv, sn = s
vv, vn = v
tv, tn = t
level = (hn,) + ((sn,) if sn else tuple())
level = level + ((vn,) if vn else tuple())
level = level + ((tn,) if tn else tuple())
hsv = array([hv, sv, vv])
prop = {'color': _single_hsv_to_rgb(hsv), 'hatch': tv, 'lw': 0}
properties[level] = prop
return properties
def _normalize_data(data, index):
"""normalize the data to a dict with tuples of strings as keys
right now it works with:
0 - dictionary (or equivalent mappable)
1 - pandas.Series with simple or hierarchical indexes
2 - numpy.ndarrays
3 - everything that can be converted to a numpy array
4 - pandas.DataFrame (via the _normalize_dataframe function)
"""
# if data is a dataframe we need to take a completely new road
# before coming back here. Use the hasattr to avoid importing
# pandas explicitly
if hasattr(data, 'pivot') and hasattr(data, 'groupby'):
data = _normalize_dataframe(data, index)
index = None
# can it be used as a dictionary?
try:
items = list(iteritems(data))
except AttributeError:
# ok, I cannot use the data as a dictionary
# Try to convert it to a numpy array, or die trying
data = np.asarray(data)
temp = OrderedDict()
for idx in np.ndindex(data.shape):
name = tuple(i for i in idx)
temp[name] = data[idx]
data = temp
items = list(iteritems(data))
# make all the keys a tuple, even if simple numbers
data = OrderedDict([_tuplify(k), v] for k, v in items)
categories_levels = _categories_level(list(iterkeys(data)))
# fill the void in the counting dictionary
indexes = product(*categories_levels)
contingency = OrderedDict([(k, data.get(k, 0)) for k in indexes])
data = contingency
# reorder the keys order according to the one specified by the user
# or if the index is None convert it into a simple list
# right now it doesn't do any check, but can be modified in the future
index = lrange(len(categories_levels)) if index is None else index
contingency = OrderedDict()
for key, value in iteritems(data):
new_key = tuple(key[i] for i in index)
contingency[new_key] = value
data = contingency
return data
def _normalize_dataframe(dataframe, index):
"""Take a pandas DataFrame and count the element present in the
given columns, return a hierarchical index on those columns
"""
#groupby the given keys, extract the same columns and count the element
# then collapse them with a mean
data = dataframe[index].dropna()
grouped = data.groupby(index, sort=False)
counted = grouped[index].count()
averaged = counted.mean(axis=1)
return averaged
def _statistical_coloring(data):
"""evaluate colors from the indipendence properties of the matrix
It will encounter problem if one category has all zeros
"""
data = _normalize_data(data, None)
categories_levels = _categories_level(list(iterkeys(data)))
Nlevels = len(categories_levels)
total = 1.0 * sum(v for v in itervalues(data))
# count the proportion of observation
# for each level that has the given name
# at each level
levels_count = []
for level_idx in range(Nlevels):
proportion = {}
for level in categories_levels[level_idx]:
proportion[level] = 0.0
for key, value in iteritems(data):
if level == key[level_idx]:
proportion[level] += value
proportion[level] /= total
levels_count.append(proportion)
# for each key I obtain the expected value
# and it's standard deviation from a binomial distribution
# under the hipothesys of independence
expected = {}
for key, value in iteritems(data):
base = 1.0
for i, k in enumerate(key):
base *= levels_count[i][k]
expected[key] = base * total, np.sqrt(total * base * (1.0 - base))
# now we have the standard deviation of distance from the
# expected value for each tile. We create the colors from this
sigmas = dict((k, (data[k] - m) / s) for k, (m, s) in iteritems(expected))
props = {}
for key, dev in iteritems(sigmas):
red = 0.0 if dev < 0 else (dev / (1 + dev))
blue = 0.0 if dev > 0 else (dev / (-1 + dev))
green = (1.0 - red - blue) / 2.0
hatch = 'x' if dev > 2 else 'o' if dev < -2 else ''
props[key] = {'color': [red, green, blue], 'hatch': hatch}
return props
def _create_labels(rects, horizontal, ax, rotation):
"""find the position of the label for each value of each category
right now it supports only up to the four categories
ax: the axis on which the label should be applied
rotation: the rotation list for each side
"""
categories = _categories_level(list(iterkeys(rects)))
if len(categories) > 4:
msg = ("maximum of 4 level supported for axes labeling..and 4"
"is alreay a lot of level, are you sure you need them all?")
raise NotImplementedError(msg)
labels = {}
#keep it fixed as will be used a lot of times
items = list(iteritems(rects))
vertical = not horizontal
#get the axis ticks and labels locator to put the correct values!
ax2 = ax.twinx()
ax3 = ax.twiny()
#this is the order of execution for horizontal disposition
ticks_pos = [ax.set_xticks, ax.set_yticks, ax3.set_xticks, ax2.set_yticks]
ticks_lab = [ax.set_xticklabels, ax.set_yticklabels,
ax3.set_xticklabels, ax2.set_yticklabels]
#for the vertical one, rotate it by one
if vertical:
ticks_pos = ticks_pos[1:] + ticks_pos[:1]
ticks_lab = ticks_lab[1:] + ticks_lab[:1]
#clean them
for pos, lab in zip(ticks_pos, ticks_lab):
pos([])
lab([])
#for each level, for each value in the level, take the mean of all
#the sublevel that correspond to that partial key
for level_idx, level in enumerate(categories):
#this dictionary keep the labels only for this level
level_ticks = dict()
for value in level:
#to which level it should refer to get the preceding
#values of labels? it's rather a tricky question...
#this is dependent on the side. It's a very crude management
#but I couldn't think a more general way...
if horizontal:
if level_idx == 3:
index_select = [-1, -1, -1]
else:
index_select = [+0, -1, -1]
else:
if level_idx == 3:
index_select = [+0, -1, +0]
else:
index_select = [-1, -1, -1]
#now I create the base key name and append the current value
#It will search on all the rects to find the corresponding one
#and use them to evaluate the mean position
basekey = tuple(categories[i][index_select[i]]
for i in range(level_idx))
basekey = basekey + (value,)
subset = dict((k, v) for k, v in items
if basekey == k[:level_idx + 1])
#now I extract the center of all the tiles and make a weighted
#mean of all these center on the area of the tile
#this should give me the (more or less) correct position
#of the center of the category
vals = list(itervalues(subset))
W = sum(w * h for (x, y, w, h) in vals)
x_lab = sum((x + w / 2.0) * w * h / W for (x, y, w, h) in vals)
y_lab = sum((y + h / 2.0) * w * h / W for (x, y, w, h) in vals)
#now base on the ordering, select which position to keep
#needs to be written in a more general form of 4 level are enough?
#should give also the horizontal and vertical alignment
side = (level_idx + vertical) % 4
level_ticks[value] = y_lab if side % 2 else x_lab
#now we add the labels of this level to the correct axis
ticks_pos[level_idx](list(itervalues(level_ticks)))
ticks_lab[level_idx](list(iterkeys(level_ticks)),
rotation=rotation[level_idx])
return labels
def mosaic(data, index=None, ax=None, horizontal=True, gap=0.005,
properties=lambda key: None, labelizer=None,
title='', statistic=False, axes_label=True,
label_rotation=0.0):
"""Create a mosaic plot from a contingency table.
It allows to visualize multivariate categorical data in a rigorous
and informative way.
Parameters
----------
data : dict, pandas.Series, np.ndarray, pandas.DataFrame
The contingency table that contains the data.
Each category should contain a non-negative number
with a tuple as index. It expects that all the combination
of keys to be representes; if that is not true, will
automatically consider the missing values as 0. The order
of the keys will be the same as the one of insertion.
If a dict of a Series (or any other dict like object)
is used, it will take the keys as labels. If a
np.ndarray is provided, it will generate a simple
numerical labels.
index: list, optional
Gives the preferred order for the category ordering. If not specified
will default to the given order. It doesn't support named indexes
for hierarchical Series. If a DataFrame is provided, it expects
a list with the name of the columns.
ax : matplotlib.Axes, optional
The graph where display the mosaic. If not given, will
create a new figure
horizontal : bool, optional (default True)
The starting direction of the split (by default along
the horizontal axis)
gap : float or array of floats
The list of gaps to be applied on each subdivision.
If the lenght of the given array is less of the number
of subcategories (or if it's a single number) it will extend
it with exponentially decreasing gaps
labelizer : function (key) -> string, optional
A function that generate the text to display at the center of
each tile base on the key of that tile
properties : function (key) -> dict, optional
A function that for each tile in the mosaic take the key
of the tile and returns the dictionary of properties
of the generated Rectangle, like color, hatch or similar.
A default properties set will be provided fot the keys whose
color has not been defined, and will use color variation to help
visually separates the various categories. It should return None
to indicate that it should use the default property for the tile.
A dictionary of the properties for each key can be passed,
and it will be internally converted to the correct function
statistic: bool, optional (default False)
if true will use a crude statistical model to give colors to the plot.
If the tile has a containt that is more than 2 standard deviation
from the expected value under independence hipotesys, it will
go from green to red (for positive deviations, blue otherwise) and
will acquire an hatching when crosses the 3 sigma.
title: string, optional
The title of the axis
axes_label: boolean, optional
Show the name of each value of each category
on the axis (default) or hide them.
label_rotation: float or list of float
the rotation of the axis label (if present). If a list is given
each axis can have a different rotation
Returns
----------
fig : matplotlib.Figure
The generate figure
rects : dict
A dictionary that has the same keys of the original
dataset, that holds a reference to the coordinates of the
tile and the Rectangle that represent it
See Also
----------
A Brief History of the Mosaic Display
Michael Friendly, York University, Psychology Department
Journal of Computational and Graphical Statistics, 2001
Mosaic Displays for Loglinear Models.
Michael Friendly, York University, Psychology Department
Proceedings of the Statistical Graphics Section, 1992, 61-68.
Mosaic displays for multi-way contingecy tables.
Michael Friendly, York University, Psychology Department
Journal of the american statistical association
March 1994, Vol. 89, No. 425, Theory and Methods
Examples
----------
The most simple use case is to take a dictionary and plot the result
>>> data = {'a': 10, 'b': 15, 'c': 16}
>>> mosaic(data, title='basic dictionary')
>>> pylab.show()
A more useful example is given by a dictionary with multiple indices.
In this case we use a wider gap to a better visual separation of the
resulting plot
>>> data = {('a', 'b'): 1, ('a', 'c'): 2, ('d', 'b'): 3, ('d', 'c'): 4}
>>> mosaic(data, gap=0.05, title='complete dictionary')
>>> pylab.show()
The same data can be given as a simple or hierarchical indexed Series
>>> rand = np.random.random
>>> from itertools import product
>>>
>>> tuples = list(product(['bar', 'baz', 'foo', 'qux'], ['one', 'two']))
>>> index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
>>> data = pd.Series(rand(8), index=index)
>>> mosaic(data, title='hierarchical index series')
>>> pylab.show()
The third accepted data structureis the np array, for which a
very simple index will be created.
>>> rand = np.random.random
>>> data = 1+rand((2,2))
>>> mosaic(data, title='random non-labeled array')
>>> pylab.show()
If you need to modify the labeling and the coloring you can give
a function tocreate the labels and one with the graphical properties
starting from the key tuple
>>> data = {'a': 10, 'b': 15, 'c': 16}
>>> props = lambda key: {'color': 'r' if 'a' in key else 'gray'}
>>> labelizer = lambda k: {('a',): 'first', ('b',): 'second',
('c',): 'third'}[k]
>>> mosaic(data, title='colored dictionary',
properties=props, labelizer=labelizer)
>>> pylab.show()
Using a DataFrame as source, specifying the name of the columns of interest
>>> gender = ['male', 'male', 'male', 'female', 'female', 'female']
>>> pet = ['cat', 'dog', 'dog', 'cat', 'dog', 'cat']
>>> data = pandas.DataFrame({'gender': gender, 'pet': pet})
>>> mosaic(data, ['pet', 'gender'])
>>> pylab.show()
"""
if isinstance(data, DataFrame) and index is None:
raise ValueError("You must pass an index if data is a DataFrame."
" See examples.")
from pylab import Rectangle
fig, ax = utils.create_mpl_ax(ax)
# normalize the data to a dict with tuple of strings as keys
data = _normalize_data(data, index)
# split the graph into different areas
rects = _hierarchical_split(data, horizontal=horizontal, gap=gap)
# if there is no specified way to create the labels
# create a default one
if labelizer is None:
labelizer = lambda k: "\n".join(k)
if statistic:
default_props = _statistical_coloring(data)
else:
default_props = _create_default_properties(data)
if isinstance(properties, dict):
color_dict = properties
properties = lambda key: color_dict.get(key, None)
for k, v in iteritems(rects):
# create each rectangle and put a label on it
x, y, w, h = v
conf = properties(k)
props = conf if conf else default_props[k]
text = labelizer(k)
Rect = Rectangle((x, y), w, h, label=text, **props)
ax.add_patch(Rect)
ax.text(x + w / 2, y + h / 2, text, ha='center',
va='center', size='smaller')
#creating the labels on the axis
#o clearing it
if axes_label:
if np.iterable(label_rotation):
rotation = label_rotation
else:
rotation = [label_rotation] * 4
labels = _create_labels(rects, horizontal, ax, rotation)
else:
ax.set_xticks([])
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_yticklabels([])
ax.set_title(title)
return fig, rects
| bsd-3-clause |
buckiracer/data-science-from-scratch | RefMaterials/code-python3/gradient_descent.py | 12 | 5816 | from collections import Counter
from linear_algebra import distance, vector_subtract, scalar_multiply
from functools import reduce
import math, random
def sum_of_squares(v):
"""computes the sum of squared elements in v"""
return sum(v_i ** 2 for v_i in v)
def difference_quotient(f, x, h):
return (f(x + h) - f(x)) / h
def plot_estimated_derivative():
def square(x):
return x * x
def derivative(x):
return 2 * x
derivative_estimate = lambda x: difference_quotient(square, x, h=0.00001)
# plot to show they're basically the same
import matplotlib.pyplot as plt
x = range(-10,10)
plt.plot(x, map(derivative, x), 'rx') # red x
plt.plot(x, map(derivative_estimate, x), 'b+') # blue +
plt.show() # purple *, hopefully
def partial_difference_quotient(f, v, i, h):
# add h to just the i-th element of v
w = [v_j + (h if j == i else 0)
for j, v_j in enumerate(v)]
return (f(w) - f(v)) / h
def estimate_gradient(f, v, h=0.00001):
return [partial_difference_quotient(f, v, i, h)
for i, _ in enumerate(v)]
def step(v, direction, step_size):
"""move step_size in the direction from v"""
return [v_i + step_size * direction_i
for v_i, direction_i in zip(v, direction)]
def sum_of_squares_gradient(v):
return [2 * v_i for v_i in v]
def safe(f):
"""define a new function that wraps f and return it"""
def safe_f(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
return float('inf') # this means "infinity" in Python
return safe_f
#
#
# minimize / maximize batch
#
#
def minimize_batch(target_fn, gradient_fn, theta_0, tolerance=0.000001):
"""use gradient descent to find theta that minimizes target function"""
step_sizes = [100, 10, 1, 0.1, 0.01, 0.001, 0.0001, 0.00001]
theta = theta_0 # set theta to initial value
target_fn = safe(target_fn) # safe version of target_fn
value = target_fn(theta) # value we're minimizing
while True:
gradient = gradient_fn(theta)
next_thetas = [step(theta, gradient, -step_size)
for step_size in step_sizes]
# choose the one that minimizes the error function
next_theta = min(next_thetas, key=target_fn)
next_value = target_fn(next_theta)
# stop if we're "converging"
if abs(value - next_value) < tolerance:
return theta
else:
theta, value = next_theta, next_value
def negate(f):
"""return a function that for any input x returns -f(x)"""
return lambda *args, **kwargs: -f(*args, **kwargs)
def negate_all(f):
"""the same when f returns a list of numbers"""
return lambda *args, **kwargs: [-y for y in f(*args, **kwargs)]
def maximize_batch(target_fn, gradient_fn, theta_0, tolerance=0.000001):
return minimize_batch(negate(target_fn),
negate_all(gradient_fn),
theta_0,
tolerance)
#
# minimize / maximize stochastic
#
def in_random_order(data):
"""generator that returns the elements of data in random order"""
indexes = [i for i, _ in enumerate(data)] # create a list of indexes
random.shuffle(indexes) # shuffle them
for i in indexes: # return the data in that order
yield data[i]
def minimize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):
data = list(zip(x, y))
theta = theta_0 # initial guess
alpha = alpha_0 # initial step size
min_theta, min_value = None, float("inf") # the minimum so far
iterations_with_no_improvement = 0
# if we ever go 100 iterations with no improvement, stop
while iterations_with_no_improvement < 100:
value = sum( target_fn(x_i, y_i, theta) for x_i, y_i in data )
if value < min_value:
# if we've found a new minimum, remember it
# and go back to the original step size
min_theta, min_value = theta, value
iterations_with_no_improvement = 0
alpha = alpha_0
else:
# otherwise we're not improving, so try shrinking the step size
iterations_with_no_improvement += 1
alpha *= 0.9
# and take a gradient step for each of the data points
for x_i, y_i in in_random_order(data):
gradient_i = gradient_fn(x_i, y_i, theta)
theta = vector_subtract(theta, scalar_multiply(alpha, gradient_i))
return min_theta
def maximize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):
return minimize_stochastic(negate(target_fn),
negate_all(gradient_fn),
x, y, theta_0, alpha_0)
if __name__ == "__main__":
print("using the gradient")
v = [random.randint(-10,10) for i in range(3)]
tolerance = 0.0000001
while True:
#print v, sum_of_squares(v)
gradient = sum_of_squares_gradient(v) # compute the gradient at v
next_v = step(v, gradient, -0.01) # take a negative gradient step
if distance(next_v, v) < tolerance: # stop if we're converging
break
v = next_v # continue if we're not
print("minimum v", v)
print("minimum value", sum_of_squares(v))
print()
print("using minimize_batch")
v = [random.randint(-10,10) for i in range(3)]
v = minimize_batch(sum_of_squares, sum_of_squares_gradient, v)
print("minimum v", v)
print("minimum value", sum_of_squares(v))
| unlicense |
mblondel/scikit-learn | sklearn/metrics/cluster/supervised.py | 21 | 26876 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari: float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
CISprague/Astro.IQ | src/Optimisation.py | 1 | 12782 | '''
Astro.IQ - Optimisation
Christopher Iliffe Sprague
[email protected]
https://cisprague.github.io/Astro.IQ
'''
''' --------
Dependencies
-------- '''
from numpy import *
from numpy.linalg import *
from scipy.interpolate import *
from Trajectory import *
from PyGMO.problem import base
from PyGMO import *
import matplotlib.pyplot as plt
set_printoptions(suppress=True)
''' ---------
Approximation
--------- '''
''' ------------
Solution Guesses
------------ '''
class Guess(object):
def __init__(self, prob):
self.prob = prob
def Mid(self, nlp=True):
z = self.prob.dlb + 0.5*(self.prob.dub - self.prob.dlb)
if nlp: return z
else: return self.prob.Decode(z)
def Linear(self, lb, ub):
mesh = linspace(0, 1, self.prob.nnodes)
x = array([mesh[0], mesh[-1]])
a = []
for yi, yt in zip(lb, ub):
y = array([yi, yt])
z = interp1d(x, y)
a.append(z(mesh))
return transpose(array(a))
def Cubic(mesh, yi, dyi, yt, dyt):
ni, nt = mesh[0], mesh[-1]
A = array([
[1, ni, ni**2, ni**3],
[0, 1, 2*ni, 3*ni**2],
[1, nt, nt**2, nt**3],
[0, 1, 2*nt, 3*nt**2]
])
c = inv(A).dot(array([yi, dyi, yt, dyt]))
return c[0] + c[1]*mesh + c[2]*mesh**2 + c[3]*mesh**3
def Ballistic(self, si=None, tf=None, nlp=True):
if si is None: si = self.prob.model.si
if tf is None: tf = self.prob.model.tub
s = self.prob.model.Propagate.Ballistic(
si=si, tf=tf, nnodes=self.prob.nnodes
)
c = zeros((self.prob.nnodes, self.prob.model.cdim))
if nlp:
if self.prob.nc == 1 and self.prob.ns == 1:
return self.prob.Code(tf, s, c)
elif self.prob.nc == 2 and self.prob.ns == 1:
return self.prob.Code(tf, c, s, c)
elif self.prob.nc == 2 and self.prob.ns == 2:
return self.prob.Code(tf, s, c, s, c)
else:
return tf, s
''' ----------------------
Direct Collocation Methods
---------------------- '''
class Direct(object):
def __init__(self, model, nsegs, ns, nc):
self.nsegs = int(nsegs) # Number of segments
self.nnodes = int(nsegs + 1) # Number of nodes
self.ns = int(ns) # Number of states per segement
self.nc = int(nc) # Number of controls per segement
self.model = model # The dynamical model
self.Guess = Guess(self) # Guessing methods
class S1C1(Direct):
def __init__(self, model, nsegs):
Direct.__init__(self, model, nsegs, 1, 1)
self.dim = 1 + (model.sdim + model.cdim)*self.nnodes
self.consdim = model.sdim*nsegs + 2*model.sdim - 1
self.nobj = 1
self.dlb = array(
[model.tlb] + (list(model.slb) + list(model.clb))*self.nnodes
, float)
self.dub = array(
[model.tub] + (list(model.sub) + list(model.cub))*self.nnodes
, float)
base.__init__(self, self.dim, 0, self.nobj, self.consdim, 0, 1e-12)
self.set_bounds(self.dlb, self.dub)
def Decode(self, z):
tf = z[0]
z = array(z[1:])
z = z.reshape((self.nnodes, self.model.sdim + self.model.cdim))
s = z[:, 0:self.model.sdim]
c = z[:, self.model.sdim:self.model.sdim+self.model.cdim]
return tf, s, c
def Code(self, tf, s, c):
z = hstack((s, c))
z = list(z.reshape((self.model.sdim + self.model.cdim)*self.nnodes))
z = array([tf] + z)
return z
class S1C2(Direct):
def __init__(self, model, nsegs):
Direct.__init__(self, model, nsegs, 1, 2)
self.dim = 1 + model.sdim + model.cdim + (model.cdim*2 + model.sdim)*nsegs
self.consdim = model.sdim*nsegs + 2*model.sdim - 1
self.dlb = array(
[model.tlb] + list(model.slb) + list(model.clb) + (list(model.clb) + list(model.slb) + list(model.clb))*nsegs
, float)
self.dub = array(
[model.tub] + list(model.sub) + list(model.cub) + (list(model.cub) + list(model.sub) + list(model.cub))*nsegs
, float)
base.__init__(self, self.dim, 0, 1, self.consdim, 0, 1e-8)
self.set_bounds(self.dlb, self.dub)
def Decode(self, z):
tf = z[0]
z = hstack((zeros(self.model.cdim), z[1:]))
z = z.reshape((self.nnodes, self.model.cdim*2 + self.model.sdim))
cb = z[:,0:3] # Midpoint control
s = z[:,3:8] # States
c = z[:,8:11] # Nodal control
return tf, cb, s, c
def Code(self, tf, cb, s, c):
z = hstack((cb, s, c))
z = z.reshape((self.model.cdim*2 + self.model.sdim)*self.nnodes)
z = list(z[self.model.cdim:]) # Remove midpoint control placeholder
return array([tf] + z)
class S2C2(Direct):
def __init__(self, model, nsegs):
Direct.__init__(self, model, nsegs, 2, 2)
# Guess tf and mindpoint/node states and controls
self.zdim = 1 + model.sdim + model.cdim + (model.sdim + model.cdim)*2*nsegs
# Dynamics and boundary conditions, excluding final mass (-1)
self.condim = model.sdim*2*nsegs + 2*model.sdim - 1
# Initialise PyGMO problem
base.__init__(self, self.zdim, 0, 1, self.condim, 0, 1e-8)
# Initialise decision bound vectors
self.zlb, self.zub = empty(self.zdim), empty(self.zdim)
i, j = 0, 1
self.zlb[i:j], self.zub[i:j] = model.tlb, model.tub
i, j = j, j + model.sdim
self.zlb[i:j], self.zub[i:j] = model.slb, model.sub
i, j = j, j + model.cdim
self.zlb[i:j], self.zub[i:j] = model.clb, model.cub
for i in range(nsegs):
i, j = j, j + model.sdim
self.zlb[i:j], self.zub[i:j] = model.slb, model.sub
i, j = j, j + model.cdim
self.zlb[i:j], self.zub[i:j] = model.clb, model.cub
i, j = j, j + model.sdim
self.zlb[i:j], self.zub[i:j] = model.slb, model.sub
i, j = j, j + model.cdim
self.zlb[i:j], self.zub[i:j] = model.clb, model.cub
self.set_bounds(self.zlb, self.zub)
def Decode(self, z):
tf = z[0]
# Decision vector without tf
z = array(z[1:])
# Add dumby sbar and ubar for consistent indexing
b = zeros(self.model.sdim + self.model.cdim)
z = hstack((b, z))
z = z.reshape((self.nnodes, (self.model.sdim + self.model.cdim)*2))
i, j = 0, self.model.sdim
sb = z[:,i:j]
i, j = j, j + self.model.cdim
cb = z[:,i:j]
i, j = j, j + self.model.sdim
s = z[:,i:j]
i, j = j, j + self.model.cdim
c = z[:,i:j]
return tf, sb, cb, s, c
def Code(self, tf, sb, cb, s, c):
z = hstack((sb, cb, s, c))
z = z.flatten()
# Remove the dumby state and control
z = z[self.model.sdim + self.model.cdim:]
# Insert the final time
z = hstack((tf, z))
return z
class Euler(S1C1, base):
def __init__(self, model=Point_Lander(), nsegs=20, ns=1, nc=1):
S1C1.__init__(self, model, nsegs, ns, nc)
def _objfun_impl(self, z):
tf, s, c = self.Decode(z)
return (s[-1,-1],)
def _compute_constraints_impl(self, z):
tf, s, c = self.Decode(z)
h = tf/self.nnodes
# Boundary
ceq = list(s[0] - self.model.si)
ceq += list(s[-1,:-1] - self.st[:-1])
# Dynamics
for k in range(self.nnodes):
ceq += list(s[k+1] - s[k] - h*self.model.EOM_State(s[k], u[k]))
return ceq
class Trapezoidal(S1C1, base):
def __init__(self, model=Point_Lander(), nsegs=20):
S1C1.__init__(self, model, nsegs)
def _objfun_impl(self, z):
tf, s, c = self.Decode(z)
return (-s[-1, -1],)
def _compute_constraints_impl(self, z):
tf, s, c = self.Decode(z)
h = tf/self.nnodes
# Boundary
ceq = list(s[0] - self.model.si)
ceq += list(s[-1,:-1] - self.model.st[:-1])
# Dynamics
for k in range(self.nsegs):
f1 = self.model.EOM_State(s[k], c[k])
f2 = self.model.EOM_State(s[k+1], c[k+1])
ceq += list(s[k+1] - s[k] - h/2.*(f1 + f2))
return ceq
class Runge_Kutta(S1C2, base):
def __init__(self, model=Point_Lander(), nsegs=20, ns=1, nc=2):
S1C2.__init__(self, model, nsegs, ns, nc)
def _objfun_impl(self, z):
tf, cb, s, c = self.Decode(z)
return(-s[-1,-1],)
def _compute_constraints_impl(self, z):
tf, cb, s, c = self.Decode(z)
h = tf/self.nnodes
# Boundary
ceq = list(s[0] - self.model.si) # Mass must match
ceq += list(s[-1,:-1] - self.model.st[:-1]) # Mass free
# Dyanamics
for k in range(self.nsegs):
k1 = h*self.model.EOM_State(s[k], c[k])
k2 = h*self.model.EOM_State(s[k] + 0.5*k1, cb[k+1])
k3 = h*self.model.EOM_State(s[k] + 0.5*k2, cb[k+1])
k4 = h*self.model.EOM_State(s[k] + k3, c[k+1])
ceq += list(s[k+1] - s[k] - 1/6.*(k1 + 2*k2 + 2*k3 + k4))
return ceq
class Hermite_Simpson_Compressed(S1C2, base):
def __init__(self, model=Point_Lander(), nsegs=20):
S1C2.__init__(self, model, nsegs)
def _objfun_impl(self, z):
tf, cb, s, c = self.Decode(z)
return (-s[-1,-1],)
def _compute_constraints_impl(self, z):
tf, cb, s, c = self.Decode(z)
h = tf/self.nnodes
# Boundary
ceq = list(s[0] - self.model.si)
ceq += list(s[-1,:-1] - self.model.st[:-1])
# Dyanmics
for k in range(self.nsegs):
f1 = self.model.EOM_State(s[k], c[k])
f2 = self.model.EOM_State(s[k+1], c[k+1])
sb2 = 0.5*(s[k] + s[k+1]) + h/8.*(f1 + f2)
fb2 = self.Model.EOM_State(sb2, cb[k+1])
ceq += list(s[k+1] - s[k] -h/6.*(f1 + 4*fb2 + f2))
return ceq
class HSS(S2C2, base):
def __init__(self, model=Point_Lander(), nsegs=20):
S2C2.__init__(self, model, nsegs)
def _objfun_impl(self, z):
tf, sb, cb, s, c = self.Decode(z)
return (-s[-1,-1],)
def _compute_constraints_impl(self, z):
tf, sb, cb, s, c = self.Decode(z)
h = tf/self.nnodes
ceq = zeros(self.condim, float)
i, j = 0, self.model.sdim
ceq[i:j] = s[0] - self.model.si
i, j = j, j + self.model.sdim - 1
ceq[i:j] = s[-1,:-1] - self.model.st[:-1]
# Dynamics
for k in range(self.nsegs):
f1 = self.model.EOM_State(s[k], c[k])
f2 = self.model.EOM_State(s[k+1], c[k+1] )
fb2 = self.model.EOM_State(sb[k+1], cb[k+1])
i, j = j, j + self.model.sdim
ceq[i:j] = sb[k+1] - 0.5*(s[k+1] + s[k]) - h/8.*(f1-f2)
i, j = j, j + self.model.sdim
ceq[i:j] = s[k+1] - s[k] - h/6.*(f2 + 4*fb2 + f1)
return ceq
''' -----------
Indirect Method
----------- '''
class Indirect_Shooting(base):
def __init__(self, model=Point_Lander(), nnodes=50):
self.model = model
self.nnodes = nnodes
self.dim = model.sdim + 1 # Costates and tf
self.cdim = model.sdim + 1 # Infinite horizon
base.__init__(self, self.dim, 0, 1, self.cdim, 0, 1e-8)
self.set_bounds(
[-1e10]*model.sdim + [model.tlb],
[ 1e10]*model.sdim + [model.tub]
)
def _objfun_impl(self, z):
return (1.,)
def _compute_constraints_impl(self, z):
tf = z[-1]
li = z[:-1]
fsi = hstack((self.model.si, li))
t, fs, c = self.model.Propagate.Indirect(fsi, tf, self.nnodes)
# The final state, costate, and control
fsf = fs[-1]
sf = fsf[0:self.model.sdim]
lf = fsf[self.model.sdim:self.model.sdim*2]
cf = c[-1]
# Must land softly on target
ceq = list(sf[:-1] - self.model.st[:-1])
# Mass is free
ceq += [lf[-1]]
# Time is free
ceq += [self.model.Hamiltonian(fsf, cf)]
return ceq
def Decode(self, z):
tf = z[-1]
li = array(z[:-1])
fsi = hstack((self.model.si, li))
return tf, fsi
class Indirect_Multiple_Shooting(base):
def __init__(self, model=Point_Lander(), nnodes=50):
return None
def _objfun_impl(self, z):
return None
def _compute_constraints_impl(self, z):
return None
if __name__ == "__main__":
mod = Point_Lander()
prob = Indirect_Shooting(mod)
| mit |
calatre/epidemics_network | plt/SIR 1 plot_maxs_3d.py | 1 | 1859 | # 2016/2017 Project - Andre Calatre, 73207
# "Simulation of an epidemic" - 24/5/2017
# Plotting Multiple Simulations of a SIR Epidemic Model
# Based on the US unemployment example on Bokeh Website:
# http://bokeh.pydata.org/en/latest/docs/gallery/unemployment.html
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.mplot3d import Axes3D
#Choosing the values for c and r to study
cvalues = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1,
0.25, 0.5, 0.75, 1]
rvalues = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1,
0.25, 0.5, 0.75, 1]
#Lets open our previously generated maxima csv file
maxs = pd.read_csv('data/sqr_i_maxs.csv', index_col = 0)
print(maxs) #to check it
# reshape to 1D array
df = pd.DataFrame(maxs.stack()).reset_index()
df.apply(pd.to_numeric)
print(df) #lets se how it looks like
df.round(1) #making sure there's no weird huge numbers
points = df.values
x = np.array(points[:,0].astype(float))
y = np.array(points[:,1].astype(float))
z = np.array(points[:,2])
print(x.shape)
print(y.shape)
print(z.size)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_trisurf(x,y,z, cmap=cm.plasma, linewidth=0.1,
edgecolor = 'White', alpha = 0.8)
#fig.colorbar(surf)
ax.xaxis.set_major_locator(MaxNLocator(5))
ax.yaxis.set_major_locator(MaxNLocator(5))
ax.zaxis.set_major_locator(MaxNLocator(10))
ax.set_xlabel('Removal Probability')
ax.set_ylabel('Contagion Probability')
ax.set_zlabel('Infection Maxima (individuals)')
fig.tight_layout()
plt.show() | apache-2.0 |
0asa/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 30 | 7560 | """
Test the fastica algorithm.
"""
import itertools
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
"""
Test gram schmidt orthonormalization
"""
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
""" Test the FastICA algorithm on very simple data.
"""
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
""" Test the FastICA algorithm on very simple data.
"""
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
"""Test FastICA.fit_transform"""
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, 10]]:
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components, 10))
assert_equal(Xt.shape, (100, n_components))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
"""Test FastICA.inverse_transform"""
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
Eric89GXL/scikit-learn | sklearn/decomposition/tests/test_pca.py | 4 | 14780 | import warnings
import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less, assert_greater
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import ProbabilisticPCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
"""PCA on dense arrays"""
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision), np.eye(X.shape[1]), 12)
def test_whitening():
"""Check that PCA output has unit-variance"""
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_pca_check_projection():
"""Test that the projection of data is correct"""
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
"""Test that the projection of data can be inverted"""
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_randomized_pca_check_projection():
"""Test that the projection by RandomizedPCA on dense data is correct"""
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
"""Test that the projection by RandomizedPCA on list data is correct"""
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
"""Test that RandomizedPCA is inversible on dense data"""
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_sparse_randomized_pca_check_projection():
"""Test that the projection by RandomizedPCA on sparse data is correct"""
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
X = csr_matrix(X)
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Xt = csr_matrix(Xt)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', DeprecationWarning)
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
assert_equal(len(w), 1)
assert_equal(w[0].category, DeprecationWarning)
Yt /= np.sqrt((Yt ** 2).sum())
np.testing.assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_sparse_randomized_pca_inverse():
"""Test that RandomizedPCA is inversible on sparse data"""
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
# no large means because the sparse version of randomized pca does not do
# centering to avoid breaking the sparsity
X = csr_matrix(X)
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', DeprecationWarning)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
assert_equal(len(w), 1)
assert_equal(w[0].category, DeprecationWarning)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X.todense(), Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', DeprecationWarning)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
assert_equal(len(w), 1)
assert_equal(w[0].category, DeprecationWarning)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X.todense() - Y_inverse)
/ np.abs(X).mean()).max()
# XXX: this does not seam to work as expected:
assert_almost_equal(relative_max_delta, 0.91, decimal=2)
def test_pca_dim():
"""Check automated dimensionality setting"""
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
"""TODO: explain what this is testing
Or at least use explicit variable names...
"""
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
"""TODO: explain what this is testing
Or at least use explicit variable names...
"""
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
"""
"""
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
"""Test that probabilistic PCA scoring yields a reasonable score"""
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
"""Test that probabilistic PCA correctly separated different datasets"""
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
"""Check that probabilistic PCA selects the right model"""
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
def test_probabilistic_pca_1():
"""Test that probabilistic PCA yields a reasonable score"""
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
ppca = ProbabilisticPCA(n_components=2)
ppca.fit(X)
ll1 = ppca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1.mean() / h, 1, 0)
def test_probabilistic_pca_2():
"""Test that probabilistic PCA correctly separated different datasets"""
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
ppca = ProbabilisticPCA(n_components=2)
ppca.fit(X)
ll1 = ppca.score(X)
ll2 = ppca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1.mean(), ll2.mean())
def test_probabilistic_pca_3():
"""The homoscedastic model should work slightly worse
than the heteroscedastic one in over-fitting condition
"""
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
ppca = ProbabilisticPCA(n_components=2)
ppca.fit(X)
ll1 = ppca.score(X)
ppca.fit(X, homoscedastic=False)
ll2 = ppca.score(X)
# XXX : Don't test as homoscedastic=False is buggy
# Comment to be removed with ProbabilisticPCA is removed
def test_probabilistic_pca_4():
"""Check that ppca select the right model"""
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
ppca = ProbabilisticPCA(n_components=k)
ppca.fit(Xl)
ll[k] = ppca.score(Xt).mean()
assert_true(ll.argmax() == 1)
def test_probabilistic_pca_vs_pca():
"""Test that PCA matches ProbabilisticPCA with homoscedastic=True
"""
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2).fit(X)
ppca = ProbabilisticPCA(n_components=2).fit(X)
assert_array_almost_equal(pca.score_samples(X), ppca.score(X))
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
bsipocz/statsmodels | statsmodels/sandbox/examples/try_smoothers.py | 39 | 2655 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 01 15:17:52 2011
Author: Mike
Author: Josef
mainly script for checking Kernel Regression
"""
import numpy as np
if __name__ == "__main__":
#from statsmodels.sandbox.nonparametric import smoothers as s
from statsmodels.sandbox.nonparametric import smoothers, kernels
import matplotlib.pyplot as plt
#from numpy import sin, array, random
import time
np.random.seed(500)
nobs = 250
sig_fac = 0.5
#x = np.random.normal(size=nobs)
x = np.random.uniform(-2, 2, size=nobs)
#y = np.array([np.sin(i*5)/i + 2*i + (3+i)*np.random.normal() for i in x])
y = np.sin(x*5)/x + 2*x + sig_fac * (3+x)*np.random.normal(size=nobs)
K = kernels.Biweight(0.25)
K2 = kernels.CustomKernel(lambda x: (1 - x*x)**2, 0.25, domain = [-1.0,
1.0])
KS = smoothers.KernelSmoother(x, y, K)
KS2 = smoothers.KernelSmoother(x, y, K2)
KSx = np.arange(-3, 3, 0.1)
start = time.time()
KSy = KS.conf(KSx)
KVar = KS.std(KSx)
print(time.time() - start) # This should be significantly quicker...
start = time.time() #
KS2y = KS2.conf(KSx) #
K2Var = KS2.std(KSx) #
print(time.time() - start) # ...than this.
KSConfIntx, KSConfInty = KS.conf(15)
print("Norm const should be 0.9375")
print(K2.norm_const)
print("L2 Norms Should Match:")
print(K.L2Norm)
print(K2.L2Norm)
print("Fit values should match:")
#print zip(KSy, KS2y)
print(KSy[28])
print(KS2y[28])
print("Var values should match:")
#print zip(KVar, K2Var)
print(KVar[39])
print(K2Var[39])
fig = plt.figure()
ax = fig.add_subplot(221)
ax.plot(x, y, "+")
ax.plot(KSx, KSy, "-o")
#ax.set_ylim(-20, 30)
ax2 = fig.add_subplot(222)
ax2.plot(KSx, KVar, "-o")
ax3 = fig.add_subplot(223)
ax3.plot(x, y, "+")
ax3.plot(KSx, KS2y, "-o")
#ax3.set_ylim(-20, 30)
ax4 = fig.add_subplot(224)
ax4.plot(KSx, K2Var, "-o")
fig2 = plt.figure()
ax5 = fig2.add_subplot(111)
ax5.plot(x, y, "+")
ax5.plot(KSConfIntx, KSConfInty, "-o")
import statsmodels.nonparametric.smoothers_lowess as lo
ys = lo.lowess(y, x)
ax5.plot(ys[:,0], ys[:,1], 'b-')
ys2 = lo.lowess(y, x, frac=0.25)
ax5.plot(ys2[:,0], ys2[:,1], 'b--', lw=2)
#need to sort for matplolib plot ?
xind = np.argsort(x)
pmod = smoothers.PolySmoother(5, x[xind])
pmod.fit(y[xind])
yp = pmod(x[xind])
ax5.plot(x[xind], yp, 'k-')
ax5.set_title('Kernel regression, lowess - blue, polysmooth - black')
#plt.show()
| bsd-3-clause |
erh3cq/hyperspy | hyperspy/tests/learn/test_learning_results.py | 3 | 1749 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pytest
from hyperspy.misc.machine_learning.import_sklearn import sklearn_installed
from hyperspy.signals import Signal1D
def test_learning_results_decom():
rng = np.random.RandomState(123)
s1 = Signal1D(rng.random_sample(size=(20, 100)))
s1.decomposition(output_dimension=2)
out = str(s1.learning_results)
assert "Decomposition parameters" in out
assert "algorithm=SVD" in out
assert "output_dimension=2" in out
assert "Demixing parameters" not in out
@pytest.mark.skipif(not sklearn_installed, reason="sklearn not installed")
def test_learning_results_bss():
rng = np.random.RandomState(123)
s1 = Signal1D(rng.random_sample(size=(20, 100)))
s1.decomposition(output_dimension=2)
s1.blind_source_separation(number_of_components=2)
out = str(s1.learning_results)
assert "Decomposition parameters" in out
assert "Demixing parameters" in out
assert "algorithm=sklearn_fastica" in out
assert "n_components=2" in out
| gpl-3.0 |
karstenw/nodebox-pyobjc | examples/Extended Application/sklearn/examples/linear_model/plot_lasso_and_elasticnet.py | 1 | 2888 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
# #############################################################################
# Generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal(size=n_samples)
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples // 2], y[:n_samples // 2]
X_test, y_test = X[n_samples // 2:], y[n_samples // 2:]
# #############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
# #############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, color='lightgreen', linewidth=2,
label='Elastic net coefficients')
plt.plot(lasso.coef_, color='gold', linewidth=2,
label='Lasso coefficients')
plt.plot(coef, '--', color='navy', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
# plt.show()
pltshow(plt)
| mit |
fyffyt/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 226 | 9457 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.grid_search import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
| bsd-3-clause |
jarn0ld/gnuradio | gr-digital/examples/example_fll.py | 49 | 5715 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fll(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise, foffset, toffset, poffset):
gr.top_block.__init__(self)
rrc_taps = filter.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
data = 2.0*scipy.random.randint(0, 2, N) - 1.0
data = scipy.exp(1j*poffset) * data
self.src = blocks.vector_source_c(data.tolist(), False)
self.rrc = filter.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = channels.channel_model(noise, foffset, toffset)
self.fll = digital.fll_band_edge_cc(sps, rolloff, ntaps, bw)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_fll = blocks.vector_sink_c()
self.vsnk_frq = blocks.vector_sink_f()
self.vsnk_phs = blocks.vector_sink_f()
self.vsnk_err = blocks.vector_sink_f()
self.connect(self.src, self.rrc, self.chn, self.fll, self.vsnk_fll)
self.connect(self.rrc, self.vsnk_src)
self.connect((self.fll,1), self.vsnk_frq)
self.connect((self.fll,2), self.vsnk_phs)
self.connect((self.fll,3), self.vsnk_err)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=2000,
help="Set the number of samples to process [default=%default]")
parser.add_option("-S", "--sps", type="int", default=4,
help="Set the samples per symbol [default=%default]")
parser.add_option("-r", "--rolloff", type="eng_float", default=0.35,
help="Set the rolloff factor [default=%default]")
parser.add_option("-W", "--bandwidth", type="eng_float", default=2*scipy.pi/100.0,
help="Set the loop bandwidth [default=%default]")
parser.add_option("-n", "--ntaps", type="int", default=45,
help="Set the number of taps in the filters [default=%default]")
parser.add_option("", "--noise", type="eng_float", default=0.0,
help="Set the simulation noise voltage [default=%default]")
parser.add_option("-f", "--foffset", type="eng_float", default=0.2,
help="Set the simulation's normalized frequency offset (in Hz) [default=%default]")
parser.add_option("-t", "--toffset", type="eng_float", default=1.0,
help="Set the simulation's timing offset [default=%default]")
parser.add_option("-p", "--poffset", type="eng_float", default=0.0,
help="Set the simulation's phase offset [default=%default]")
(options, args) = parser.parse_args ()
# Adjust N for the interpolation by sps
options.nsamples = options.nsamples // options.sps
# Set up the program-under-test
put = example_fll(options.nsamples, options.sps, options.rolloff,
options.ntaps, options.bandwidth, options.noise,
options.foffset, options.toffset, options.poffset)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_err = scipy.array(put.vsnk_err.data())
# Convert the FLL's LO frequency from rads/sec to Hz
data_frq = scipy.array(put.vsnk_frq.data()) / (2.0*scipy.pi)
# adjust this to align with the data. There are 2 filters of
# ntaps long and the channel introduces another 4 sample delay.
data_fll = scipy.array(put.vsnk_fll.data()[2*options.ntaps-4:])
# Plot the FLL's LO frequency
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(2,2,1)
s1.plot(data_frq)
s1.set_title("FLL LO")
s1.set_xlabel("Samples")
s1.set_ylabel("Frequency (normalized Hz)")
# Plot the FLL's error
s2 = f1.add_subplot(2,2,2)
s2.plot(data_err)
s2.set_title("FLL Error")
s2.set_xlabel("Samples")
s2.set_ylabel("FLL Loop error")
# Plot the IQ symbols
s3 = f1.add_subplot(2,2,3)
s3.plot(data_src.real, data_src.imag, "o")
s3.plot(data_fll.real, data_fll.imag, "rx")
s3.set_title("IQ")
s3.set_xlabel("Real part")
s3.set_ylabel("Imag part")
# Plot the symbols in time
s4 = f1.add_subplot(2,2,4)
s4.plot(data_src.real, "o-")
s4.plot(data_fll.real, "rx-")
s4.set_title("Symbols")
s4.set_xlabel("Samples")
s4.set_ylabel("Real Part of Signals")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
southpaw94/MachineLearning | DimensionalityReduction/lda_sk.py | 1 | 1276 | from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from plots import plot_decision_regions
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
np.set_printoptions(precision=4)
pca = PCA(n_components = 2)
lr = LogisticRegression()
data = pd.read_csv('Wine.csv', header=None)
X = data.iloc[:, 1:].values
y = data.iloc[:, 0].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
scaler = StandardScaler()
X_train_std = scaler.fit_transform(X_train)
X_test_std = scaler.transform(X_test)
lda = LDA(n_components = 2)
X_train_lda = lda.fit_transform(X_train_std, y_train)
lr = LogisticRegression()
lr = lr.fit(X_train_lda, y_train)
plot_decision_regions(X_train_lda, y_train, classifier=lr)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower left')
plt.title('Training Data')
plt.show()
X_test_lda = lda.transform(X_test_std)
plot_decision_regions(X_test_lda, y_test, classifier=lr)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower left')
plt.title('Test Data')
plt.show()
| gpl-2.0 |
mhdella/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | 267 | 6813 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| bsd-3-clause |
lakshayg/tensorflow | tensorflow/examples/learn/boston.py | 75 | 2549 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNRegressor for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
from sklearn import preprocessing
import tensorflow as tf
def main(unused_argv):
# Load dataset
boston = datasets.load_boston()
x, y = boston.data, boston.target
# Split dataset into train / test
x_train, x_test, y_train, y_test = model_selection.train_test_split(
x, y, test_size=0.2, random_state=42)
# Scale data (training set) to 0 mean and unit standard deviation.
scaler = preprocessing.StandardScaler()
x_train = scaler.fit_transform(x_train)
# Build 2 layer fully connected DNN with 10, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column('x', shape=np.array(x_train).shape[1:])]
regressor = tf.estimator.DNNRegressor(
feature_columns=feature_columns, hidden_units=[10, 10])
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': x_train}, y=y_train, batch_size=1, num_epochs=None, shuffle=True)
regressor.train(input_fn=train_input_fn, steps=2000)
# Predict.
x_transformed = scaler.transform(x_test)
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': x_transformed}, y=y_test, num_epochs=1, shuffle=False)
predictions = regressor.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['predictions'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score_sklearn = metrics.mean_squared_error(y_predicted, y_test)
print('MSE (sklearn): {0:f}'.format(score_sklearn))
# Score with tensorflow.
scores = regressor.evaluate(input_fn=test_input_fn)
print('MSE (tensorflow): {0:f}'.format(scores['average_loss']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
aashish24/seaborn | seaborn/tests/test_palettes.py | 3 | 8431 | import colorsys
import numpy as np
import matplotlib as mpl
import nose.tools as nt
import numpy.testing as npt
from .. import palettes, utils, rcmod, husl
from ..xkcd_rgb import xkcd_rgb
class TestColorPalettes(object):
def test_current_palette(self):
pal = palettes.color_palette(["red", "blue", "green"], 3)
rcmod.set_palette(pal, 3)
nt.assert_equal(pal, mpl.rcParams["axes.color_cycle"])
rcmod.set()
def test_palette_context(self):
default_pal = palettes.color_palette()
context_pal = palettes.color_palette("muted")
with palettes.color_palette(context_pal):
nt.assert_equal(mpl.rcParams["axes.color_cycle"], context_pal)
nt.assert_equal(mpl.rcParams["axes.color_cycle"], default_pal)
def test_big_palette_context(self):
default_pal = palettes.color_palette()
context_pal = palettes.color_palette("husl", 10)
with palettes.color_palette(context_pal, 10):
nt.assert_equal(mpl.rcParams["axes.color_cycle"], context_pal)
nt.assert_equal(mpl.rcParams["axes.color_cycle"], default_pal)
def test_seaborn_palettes(self):
pals = "deep", "muted", "pastel", "bright", "dark", "colorblind"
for name in pals:
pal_out = palettes.color_palette(name)
nt.assert_equal(len(pal_out), 6)
def test_hls_palette(self):
hls_pal1 = palettes.hls_palette()
hls_pal2 = palettes.color_palette("hls")
npt.assert_array_equal(hls_pal1, hls_pal2)
def test_husl_palette(self):
husl_pal1 = palettes.husl_palette()
husl_pal2 = palettes.color_palette("husl")
npt.assert_array_equal(husl_pal1, husl_pal2)
def test_mpl_palette(self):
mpl_pal1 = palettes.mpl_palette("Reds")
mpl_pal2 = palettes.color_palette("Reds")
npt.assert_array_equal(mpl_pal1, mpl_pal2)
def test_mpl_dark_palette(self):
mpl_pal1 = palettes.mpl_palette("Blues_d")
mpl_pal2 = palettes.color_palette("Blues_d")
npt.assert_array_equal(mpl_pal1, mpl_pal2)
def test_bad_palette_name(self):
with nt.assert_raises(ValueError):
palettes.color_palette("IAmNotAPalette")
def test_terrible_palette_name(self):
with nt.assert_raises(ValueError):
palettes.color_palette("jet")
def test_bad_palette_colors(self):
pal = ["red", "blue", "iamnotacolor"]
with nt.assert_raises(ValueError):
palettes.color_palette(pal)
def test_palette_desat(self):
pal1 = palettes.husl_palette(6)
pal1 = [utils.desaturate(c, .5) for c in pal1]
pal2 = palettes.color_palette("husl", desat=.5)
npt.assert_array_equal(pal1, pal2)
def test_palette_is_list_of_tuples(self):
pal_in = np.array(["red", "blue", "green"])
pal_out = palettes.color_palette(pal_in, 3)
nt.assert_is_instance(pal_out, list)
nt.assert_is_instance(pal_out[0], tuple)
nt.assert_is_instance(pal_out[0][0], float)
nt.assert_equal(len(pal_out[0]), 3)
def test_palette_cycles(self):
deep = palettes.color_palette("deep")
double_deep = palettes.color_palette("deep", 12)
nt.assert_equal(double_deep, deep + deep)
def test_hls_values(self):
pal1 = palettes.hls_palette(6, h=0)
pal2 = palettes.hls_palette(6, h=.5)
pal2 = pal2[3:] + pal2[:3]
npt.assert_array_almost_equal(pal1, pal2)
pal_dark = palettes.hls_palette(5, l=.2)
pal_bright = palettes.hls_palette(5, l=.8)
npt.assert_array_less(list(map(sum, pal_dark)),
list(map(sum, pal_bright)))
pal_flat = palettes.hls_palette(5, s=.1)
pal_bold = palettes.hls_palette(5, s=.9)
npt.assert_array_less(list(map(np.std, pal_flat)),
list(map(np.std, pal_bold)))
def test_husl_values(self):
pal1 = palettes.husl_palette(6, h=0)
pal2 = palettes.husl_palette(6, h=.5)
pal2 = pal2[3:] + pal2[:3]
npt.assert_array_almost_equal(pal1, pal2)
pal_dark = palettes.husl_palette(5, l=.2)
pal_bright = palettes.husl_palette(5, l=.8)
npt.assert_array_less(list(map(sum, pal_dark)),
list(map(sum, pal_bright)))
pal_flat = palettes.husl_palette(5, s=.1)
pal_bold = palettes.husl_palette(5, s=.9)
npt.assert_array_less(list(map(np.std, pal_flat)),
list(map(np.std, pal_bold)))
def test_cbrewer_qual(self):
pal_short = palettes.mpl_palette("Set1", 4)
pal_long = palettes.mpl_palette("Set1", 6)
nt.assert_equal(pal_short, pal_long[:4])
pal_full = palettes.mpl_palette("Set2", 8)
pal_long = palettes.mpl_palette("Set2", 10)
nt.assert_equal(pal_full, pal_long[:8])
def test_mpl_reversal(self):
pal_forward = palettes.mpl_palette("BuPu", 6)
pal_reverse = palettes.mpl_palette("BuPu_r", 6)
nt.assert_equal(pal_forward, pal_reverse[::-1])
def test_rgb_from_hls(self):
color = .5, .8, .4
rgb_got = palettes._color_to_rgb(color, "hls")
rgb_want = colorsys.hls_to_rgb(*color)
nt.assert_equal(rgb_got, rgb_want)
def test_rgb_from_husl(self):
color = 120, 50, 40
rgb_got = palettes._color_to_rgb(color, "husl")
rgb_want = husl.husl_to_rgb(*color)
nt.assert_equal(rgb_got, rgb_want)
def test_rgb_from_xkcd(self):
color = "dull red"
rgb_got = palettes._color_to_rgb(color, "xkcd")
rgb_want = xkcd_rgb[color]
nt.assert_equal(rgb_got, rgb_want)
def test_light_palette(self):
pal_forward = palettes.light_palette("red")
pal_reverse = palettes.light_palette("red", reverse=True)
npt.assert_array_almost_equal(pal_forward, pal_reverse[::-1])
red = tuple(mpl.colors.colorConverter.to_rgba("red"))
nt.assert_equal(tuple(pal_forward[-1]), red)
pal_cmap = palettes.light_palette("blue", as_cmap=True)
nt.assert_is_instance(pal_cmap, mpl.colors.LinearSegmentedColormap)
def test_dark_palette(self):
pal_forward = palettes.dark_palette("red")
pal_reverse = palettes.dark_palette("red", reverse=True)
npt.assert_array_almost_equal(pal_forward, pal_reverse[::-1])
red = tuple(mpl.colors.colorConverter.to_rgba("red"))
nt.assert_equal(tuple(pal_forward[-1]), red)
pal_cmap = palettes.dark_palette("blue", as_cmap=True)
nt.assert_is_instance(pal_cmap, mpl.colors.LinearSegmentedColormap)
def test_blend_palette(self):
colors = ["red", "yellow", "white"]
pal_cmap = palettes.blend_palette(colors, as_cmap=True)
nt.assert_is_instance(pal_cmap, mpl.colors.LinearSegmentedColormap)
def test_cubehelix_against_matplotlib(self):
x = np.linspace(0, 1, 8)
mpl_pal = mpl.cm.cubehelix(x)[:, :3].tolist()
sns_pal = palettes.cubehelix_palette(8, start=0.5, rot=-1.5, hue=1,
dark=0, light=1, reverse=True)
nt.assert_list_equal(sns_pal, mpl_pal)
def test_cubehelix_n_colors(self):
for n in [3, 5, 8]:
pal = palettes.cubehelix_palette(n)
nt.assert_equal(len(pal), n)
def test_cubehelix_reverse(self):
pal_forward = palettes.cubehelix_palette()
pal_reverse = palettes.cubehelix_palette(reverse=True)
nt.assert_list_equal(pal_forward, pal_reverse[::-1])
def test_cubehelix_cmap(self):
cmap = palettes.cubehelix_palette(as_cmap=True)
nt.assert_is_instance(cmap, mpl.colors.ListedColormap)
pal = palettes.cubehelix_palette()
x = np.linspace(0, 1, 6)
npt.assert_array_equal(cmap(x)[:, :3], pal)
cmap_rev = palettes.cubehelix_palette(as_cmap=True, reverse=True)
x = np.linspace(0, 1, 6)
pal_forward = cmap(x).tolist()
pal_reverse = cmap_rev(x[::-1]).tolist()
nt.assert_list_equal(pal_forward, pal_reverse)
def test_xkcd_palette(self):
names = list(xkcd_rgb.keys())[10:15]
colors = palettes.xkcd_palette(names)
for name, color in zip(names, colors):
as_hex = mpl.colors.rgb2hex(color)
nt.assert_equal(as_hex, xkcd_rgb[name])
| bsd-3-clause |
AnasGhrab/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tests/frame/test_alter_axes.py | 7 | 26538 | # -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
from pandas.compat import lrange
from pandas import (DataFrame, Series, Index, MultiIndex,
RangeIndex)
import pandas as pd
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameAlterAxes(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_set_index(self):
idx = Index(np.arange(len(self.mixed_frame)))
# cache it
_ = self.mixed_frame['foo'] # noqa
self.mixed_frame.index = idx
self.assertIs(self.mixed_frame['foo'].index, idx)
with assertRaisesRegexp(ValueError, 'Length mismatch'):
self.mixed_frame.index = idx[::2]
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame({'A': [1.1, 2.2, 3.3], 'B': [5.0, 6.1, 7.2]},
index=[2010, 2011, 2012])
expected = df.ix[2010]
new_index = df.index.astype(np.int32)
df.index = new_index
result = df.ix[2010]
assert_series_equal(result, expected)
def test_set_index2(self):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],
'B': ['one', 'two', 'three', 'one', 'two'],
'C': ['a', 'b', 'c', 'd', 'e'],
'D': np.random.randn(5),
'E': np.random.randn(5)})
# new object, single-column
result = df.set_index('C')
result_nodrop = df.set_index('C', drop=False)
index = Index(df['C'], name='C')
expected = df.ix[:, ['A', 'B', 'D', 'E']]
expected.index = index
expected_nodrop = df.copy()
expected_nodrop.index = index
assert_frame_equal(result, expected)
assert_frame_equal(result_nodrop, expected_nodrop)
self.assertEqual(result.index.name, index.name)
# inplace, single
df2 = df.copy()
df2.set_index('C', inplace=True)
assert_frame_equal(df2, expected)
df3 = df.copy()
df3.set_index('C', drop=False, inplace=True)
assert_frame_equal(df3, expected_nodrop)
# create new object, multi-column
result = df.set_index(['A', 'B'])
result_nodrop = df.set_index(['A', 'B'], drop=False)
index = MultiIndex.from_arrays([df['A'], df['B']], names=['A', 'B'])
expected = df.ix[:, ['C', 'D', 'E']]
expected.index = index
expected_nodrop = df.copy()
expected_nodrop.index = index
assert_frame_equal(result, expected)
assert_frame_equal(result_nodrop, expected_nodrop)
self.assertEqual(result.index.names, index.names)
# inplace
df2 = df.copy()
df2.set_index(['A', 'B'], inplace=True)
assert_frame_equal(df2, expected)
df3 = df.copy()
df3.set_index(['A', 'B'], drop=False, inplace=True)
assert_frame_equal(df3, expected_nodrop)
# corner case
with assertRaisesRegexp(ValueError, 'Index has duplicate keys'):
df.set_index('A', verify_integrity=True)
# append
result = df.set_index(['A', 'B'], append=True)
xp = df.reset_index().set_index(['index', 'A', 'B'])
xp.index.names = [None, 'A', 'B']
assert_frame_equal(result, xp)
# append to existing multiindex
rdf = df.set_index(['A'], append=True)
rdf = rdf.set_index(['B', 'C'], append=True)
expected = df.set_index(['A', 'B', 'C'], append=True)
assert_frame_equal(rdf, expected)
# Series
result = df.set_index(df.C)
self.assertEqual(result.index.name, 'C')
def test_set_index_nonuniq(self):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],
'B': ['one', 'two', 'three', 'one', 'two'],
'C': ['a', 'b', 'c', 'd', 'e'],
'D': np.random.randn(5),
'E': np.random.randn(5)})
with assertRaisesRegexp(ValueError, 'Index has duplicate keys'):
df.set_index('A', verify_integrity=True, inplace=True)
self.assertIn('A', df)
def test_set_index_bug(self):
# GH1590
df = DataFrame({'val': [0, 1, 2], 'key': ['a', 'b', 'c']})
df2 = df.select(lambda indx: indx >= 1)
rs = df2.set_index('key')
xp = DataFrame({'val': [1, 2]},
Index(['b', 'c'], name='key'))
assert_frame_equal(rs, xp)
def test_set_index_pass_arrays(self):
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
# multiple columns
result = df.set_index(['A', df['B'].values], drop=False)
expected = df.set_index(['A', 'B'], drop=False)
# TODO should set_index check_names ?
assert_frame_equal(result, expected, check_names=False)
def test_construction_with_categorical_index(self):
ci = tm.makeCategoricalIndex(10)
# with Categorical
df = DataFrame({'A': np.random.randn(10),
'B': ci.values})
idf = df.set_index('B')
str(idf)
tm.assert_index_equal(idf.index, ci, check_names=False)
self.assertEqual(idf.index.name, 'B')
# from a CategoricalIndex
df = DataFrame({'A': np.random.randn(10),
'B': ci})
idf = df.set_index('B')
str(idf)
tm.assert_index_equal(idf.index, ci, check_names=False)
self.assertEqual(idf.index.name, 'B')
idf = df.set_index('B').reset_index().set_index('B')
str(idf)
tm.assert_index_equal(idf.index, ci, check_names=False)
self.assertEqual(idf.index.name, 'B')
new_df = idf.reset_index()
new_df.index = df.B
tm.assert_index_equal(new_df.index, ci, check_names=False)
self.assertEqual(idf.index.name, 'B')
def test_set_index_cast_datetimeindex(self):
df = DataFrame({'A': [datetime(2000, 1, 1) + timedelta(i)
for i in range(1000)],
'B': np.random.randn(1000)})
idf = df.set_index('A')
tm.assertIsInstance(idf.index, pd.DatetimeIndex)
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
i = (pd.DatetimeIndex(
pd.tseries.tools.to_datetime(['2013-1-1 13:00',
'2013-1-2 14:00'], errors="raise"))
.tz_localize('US/Pacific'))
df = DataFrame(np.random.randn(2, 1), columns=['A'])
expected = Series(np.array([pd.Timestamp('2013-01-01 13:00:00-0800',
tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800',
tz='US/Pacific')],
dtype="object"))
# convert index to series
result = Series(i)
assert_series_equal(result, expected)
# assignt to frame
df['B'] = i
result = df['B']
assert_series_equal(result, expected, check_names=False)
self.assertEqual(result.name, 'B')
# keep the timezone
result = i.to_series(keep_tz=True)
assert_series_equal(result.reset_index(drop=True), expected)
# convert to utc
df['C'] = i.to_series().reset_index(drop=True)
result = df['C']
comp = pd.DatetimeIndex(expected.values).copy()
comp.tz = None
self.assert_numpy_array_equal(result.values, comp.values)
# list of datetimes with a tz
df['D'] = i.to_pydatetime()
result = df['D']
assert_series_equal(result, expected, check_names=False)
self.assertEqual(result.name, 'D')
# GH 6785
# set the index manually
import pytz
df = DataFrame(
[{'ts': datetime(2014, 4, 1, tzinfo=pytz.utc), 'foo': 1}])
expected = df.set_index('ts')
df.index = df['ts']
df.pop('ts')
assert_frame_equal(df, expected)
# GH 3950
# reset_index with single level
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']:
idx = pd.date_range('1/1/2011', periods=5,
freq='D', tz=tz, name='idx')
df = pd.DataFrame(
{'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = pd.DataFrame({'idx': [datetime(2011, 1, 1),
datetime(2011, 1, 2),
datetime(2011, 1, 3),
datetime(2011, 1, 4),
datetime(2011, 1, 5)],
'a': range(5),
'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx', 'a', 'b'])
expected['idx'] = expected['idx'].apply(
lambda d: pd.Timestamp(d, tz=tz))
assert_frame_equal(df.reset_index(), expected)
def test_set_index_timezone(self):
# GH 12358
# tz-aware Series should retain the tz
i = pd.to_datetime(["2014-01-01 10:10:10"],
utc=True).tz_convert('Europe/Rome')
df = DataFrame({'i': i})
self.assertEqual(df.set_index(i).index[0].hour, 11)
self.assertEqual(pd.DatetimeIndex(pd.Series(df.i))[0].hour, 11)
self.assertEqual(df.set_index(df.i).index[0].hour, 11)
def test_set_index_dst(self):
di = pd.date_range('2006-10-29 00:00:00', periods=3,
req='H', tz='US/Pacific')
df = pd.DataFrame(data={'a': [0, 1, 2], 'b': [3, 4, 5]},
index=di).reset_index()
# single level
res = df.set_index('index')
exp = pd.DataFrame(data={'a': [0, 1, 2], 'b': [3, 4, 5]},
index=pd.Index(di, name='index'))
tm.assert_frame_equal(res, exp)
# GH 12920
res = df.set_index(['index', 'a'])
exp_index = pd.MultiIndex.from_arrays([di, [0, 1, 2]],
names=['index', 'a'])
exp = pd.DataFrame({'b': [3, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([('foo', 1), ('foo', 2), ('bar', 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
rs = df.set_index(df.columns[0])
xp = df.ix[:, 1:]
xp.index = df.ix[:, 0].values
xp.index.names = [df.columns[0]]
assert_frame_equal(rs, xp)
def test_set_index_empty_column(self):
# #1971
df = DataFrame([
dict(a=1, p=0),
dict(a=2, m=10),
dict(a=3, m=11, p=20),
dict(a=4, m=12, p=21)
], columns=('a', 'm', 'p', 'x'))
# it works!
result = df.set_index(['a', 'x'])
repr(result)
def test_set_columns(self):
cols = Index(np.arange(len(self.mixed_frame.columns)))
self.mixed_frame.columns = cols
with assertRaisesRegexp(ValueError, 'Length mismatch'):
self.mixed_frame.columns = cols[::2]
# Renaming
def test_rename(self):
mapping = {
'A': 'a',
'B': 'b',
'C': 'c',
'D': 'd'
}
renamed = self.frame.rename(columns=mapping)
renamed2 = self.frame.rename(columns=str.lower)
assert_frame_equal(renamed, renamed2)
assert_frame_equal(renamed2.rename(columns=str.upper),
self.frame, check_names=False)
# index
data = {
'A': {'foo': 0, 'bar': 1}
}
# gets sorted alphabetical
df = DataFrame(data)
renamed = df.rename(index={'foo': 'bar', 'bar': 'foo'})
tm.assert_index_equal(renamed.index, pd.Index(['foo', 'bar']))
renamed = df.rename(index=str.upper)
tm.assert_index_equal(renamed.index, pd.Index(['BAR', 'FOO']))
# have to pass something
self.assertRaises(TypeError, self.frame.rename)
# partial columns
renamed = self.frame.rename(columns={'C': 'foo', 'D': 'bar'})
tm.assert_index_equal(renamed.columns,
pd.Index(['A', 'B', 'foo', 'bar']))
# other axis
renamed = self.frame.T.rename(index={'C': 'foo', 'D': 'bar'})
tm.assert_index_equal(renamed.index,
pd.Index(['A', 'B', 'foo', 'bar']))
# index with name
index = Index(['foo', 'bar'], name='name')
renamer = DataFrame(data, index=index)
renamed = renamer.rename(index={'foo': 'bar', 'bar': 'foo'})
tm.assert_index_equal(renamed.index,
pd.Index(['bar', 'foo'], name='name'))
self.assertEqual(renamed.index.name, renamer.index.name)
# MultiIndex
tuples_index = [('foo1', 'bar1'), ('foo2', 'bar2')]
tuples_columns = [('fizz1', 'buzz1'), ('fizz2', 'buzz2')]
index = MultiIndex.from_tuples(tuples_index, names=['foo', 'bar'])
columns = MultiIndex.from_tuples(
tuples_columns, names=['fizz', 'buzz'])
renamer = DataFrame([(0, 0), (1, 1)], index=index, columns=columns)
renamed = renamer.rename(index={'foo1': 'foo3', 'bar2': 'bar3'},
columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'})
new_index = MultiIndex.from_tuples([('foo3', 'bar1'),
('foo2', 'bar3')],
names=['foo', 'bar'])
new_columns = MultiIndex.from_tuples([('fizz3', 'buzz1'),
('fizz2', 'buzz3')],
names=['fizz', 'buzz'])
self.assert_index_equal(renamed.index, new_index)
self.assert_index_equal(renamed.columns, new_columns)
self.assertEqual(renamed.index.names, renamer.index.names)
self.assertEqual(renamed.columns.names, renamer.columns.names)
def test_rename_nocopy(self):
renamed = self.frame.rename(columns={'C': 'foo'}, copy=False)
renamed['foo'] = 1.
self.assertTrue((self.frame['C'] == 1.).all())
def test_rename_inplace(self):
self.frame.rename(columns={'C': 'foo'})
self.assertIn('C', self.frame)
self.assertNotIn('foo', self.frame)
c_id = id(self.frame['C'])
frame = self.frame.copy()
frame.rename(columns={'C': 'foo'}, inplace=True)
self.assertNotIn('C', frame)
self.assertIn('foo', frame)
self.assertNotEqual(id(frame['foo']), c_id)
def test_rename_bug(self):
# GH 5344
# rename set ref_locs, and set_index was not resetting
df = DataFrame({0: ['foo', 'bar'], 1: ['bah', 'bas'], 2: [1, 2]})
df = df.rename(columns={0: 'a'})
df = df.rename(columns={1: 'b'})
df = df.set_index(['a', 'b'])
df.columns = ['2001-01-01']
expected = DataFrame([[1], [2]],
index=MultiIndex.from_tuples(
[('foo', 'bah'), ('bar', 'bas')],
names=['a', 'b']),
columns=['2001-01-01'])
assert_frame_equal(df, expected)
def test_reorder_levels(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]],
names=['L0', 'L1', 'L2'])
df = DataFrame({'A': np.arange(6), 'B': np.arange(6)}, index=index)
# no change, position
result = df.reorder_levels([0, 1, 2])
assert_frame_equal(df, result)
# no change, labels
result = df.reorder_levels(['L0', 'L1', 'L2'])
assert_frame_equal(df, result)
# rotate, position
result = df.reorder_levels([1, 2, 0])
e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],
labels=[[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0]],
names=['L1', 'L2', 'L0'])
expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},
index=e_idx)
assert_frame_equal(result, expected)
result = df.reorder_levels([0, 0, 0])
e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],
labels=[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
names=['L0', 'L0', 'L0'])
expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},
index=e_idx)
assert_frame_equal(result, expected)
result = df.reorder_levels(['L0', 'L0', 'L0'])
assert_frame_equal(result, expected)
def test_reset_index(self):
stacked = self.frame.stack()[::2]
stacked = DataFrame({'foo': stacked, 'bar': stacked})
names = ['first', 'second']
stacked.index.names = names
deleveled = stacked.reset_index()
for i, (lev, lab) in enumerate(zip(stacked.index.levels,
stacked.index.labels)):
values = lev.take(lab)
name = names[i]
tm.assert_index_equal(values, Index(deleveled[name]))
stacked.index.names = [None, None]
deleveled2 = stacked.reset_index()
tm.assert_series_equal(deleveled['first'], deleveled2['level_0'],
check_names=False)
tm.assert_series_equal(deleveled['second'], deleveled2['level_1'],
check_names=False)
# default name assigned
rdf = self.frame.reset_index()
exp = pd.Series(self.frame.index.values, name='index')
self.assert_series_equal(rdf['index'], exp)
# default name assigned, corner case
df = self.frame.copy()
df['index'] = 'foo'
rdf = df.reset_index()
exp = pd.Series(self.frame.index.values, name='level_0')
self.assert_series_equal(rdf['level_0'], exp)
# but this is ok
self.frame.index.name = 'index'
deleveled = self.frame.reset_index()
self.assert_series_equal(deleveled['index'],
pd.Series(self.frame.index))
self.assert_index_equal(deleveled.index,
pd.Index(np.arange(len(deleveled))))
# preserve column names
self.frame.columns.name = 'columns'
resetted = self.frame.reset_index()
self.assertEqual(resetted.columns.name, 'columns')
# only remove certain columns
frame = self.frame.reset_index().set_index(['index', 'A', 'B'])
rs = frame.reset_index(['A', 'B'])
# TODO should reset_index check_names ?
assert_frame_equal(rs, self.frame, check_names=False)
rs = frame.reset_index(['index', 'A', 'B'])
assert_frame_equal(rs, self.frame.reset_index(), check_names=False)
rs = frame.reset_index(['index', 'A', 'B'])
assert_frame_equal(rs, self.frame.reset_index(), check_names=False)
rs = frame.reset_index('A')
xp = self.frame.reset_index().set_index(['index', 'B'])
assert_frame_equal(rs, xp, check_names=False)
# test resetting in place
df = self.frame.copy()
resetted = self.frame.reset_index()
df.reset_index(inplace=True)
assert_frame_equal(df, resetted, check_names=False)
frame = self.frame.reset_index().set_index(['index', 'A', 'B'])
rs = frame.reset_index('A', drop=True)
xp = self.frame.copy()
del xp['A']
xp = xp.set_index(['B'], append=True)
assert_frame_equal(rs, xp, check_names=False)
def test_reset_index_right_dtype(self):
time = np.arange(0.0, 10, np.sqrt(2) / 2)
s1 = Series((9.81 * time ** 2) / 2,
index=Index(time, name='time'),
name='speed')
df = DataFrame(s1)
resetted = s1.reset_index()
self.assertEqual(resetted['time'].dtype, np.float64)
resetted = df.reset_index()
self.assertEqual(resetted['time'].dtype, np.float64)
def test_reset_index_multiindex_col(self):
vals = np.random.randn(3, 3).astype(object)
idx = ['x', 'y', 'z']
full = np.hstack(([[x] for x in idx], vals))
df = DataFrame(vals, Index(idx, name='a'),
columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])
rs = df.reset_index()
xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],
['', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index(col_fill=None)
xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index(col_level=1, col_fill='blah')
xp = DataFrame(full, columns=[['blah', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
df = DataFrame(vals,
MultiIndex.from_arrays([[0, 1, 2], ['x', 'y', 'z']],
names=['d', 'a']),
columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])
rs = df.reset_index('a', )
xp = DataFrame(full, Index([0, 1, 2], name='d'),
columns=[['a', 'b', 'b', 'c'],
['', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index('a', col_fill=None)
xp = DataFrame(full, Index(lrange(3), name='d'),
columns=[['a', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index('a', col_fill='blah', col_level=1)
xp = DataFrame(full, Index(lrange(3), name='d'),
columns=[['blah', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
def test_reset_index_with_datetimeindex_cols(self):
# GH5818
#
df = pd.DataFrame([[1, 2], [3, 4]],
columns=pd.date_range('1/1/2013', '1/2/2013'),
index=['A', 'B'])
result = df.reset_index()
expected = pd.DataFrame([['A', 1, 2], ['B', 3, 4]],
columns=['index', datetime(2013, 1, 1),
datetime(2013, 1, 2)])
assert_frame_equal(result, expected)
def test_reset_index_range(self):
# GH 12071
df = pd.DataFrame([[0, 0], [1, 1]], columns=['A', 'B'],
index=RangeIndex(stop=2))
result = df.reset_index()
tm.assertIsInstance(result.index, RangeIndex)
expected = pd.DataFrame([[0, 0, 0], [1, 1, 1]],
columns=['index', 'A', 'B'],
index=RangeIndex(stop=2))
assert_frame_equal(result, expected)
def test_set_index_names(self):
df = pd.util.testing.makeDataFrame()
df.index.name = 'name'
self.assertEqual(df.set_index(df.index).index.names, ['name'])
mi = MultiIndex.from_arrays(df[['A', 'B']].T.values, names=['A', 'B'])
mi2 = MultiIndex.from_arrays(df[['A', 'B', 'A', 'B']].T.values,
names=['A', 'B', 'A', 'B'])
df = df.set_index(['A', 'B'])
self.assertEqual(df.set_index(df.index).index.names, ['A', 'B'])
# Check that set_index isn't converting a MultiIndex into an Index
self.assertTrue(isinstance(df.set_index(df.index).index, MultiIndex))
# Check actual equality
tm.assert_index_equal(df.set_index(df.index).index, mi)
# Check that [MultiIndex, MultiIndex] yields a MultiIndex rather
# than a pair of tuples
self.assertTrue(isinstance(df.set_index(
[df.index, df.index]).index, MultiIndex))
# Check equality
tm.assert_index_equal(df.set_index([df.index, df.index]).index, mi2)
def test_rename_objects(self):
renamed = self.mixed_frame.rename(columns=str.upper)
self.assertIn('FOO', renamed)
self.assertNotIn('foo', renamed)
def test_assign_columns(self):
self.frame['hi'] = 'there'
frame = self.frame.copy()
frame.columns = ['foo', 'bar', 'baz', 'quux', 'foo2']
assert_series_equal(self.frame['C'], frame['baz'], check_names=False)
assert_series_equal(self.frame['hi'], frame['foo2'], check_names=False)
def test_set_index_preserve_categorical_dtype(self):
# GH13743, GH13854
df = DataFrame({'A': [1, 2, 1, 1, 2],
'B': [10, 16, 22, 28, 34],
'C1': pd.Categorical(list("abaab"),
categories=list("bac"),
ordered=False),
'C2': pd.Categorical(list("abaab"),
categories=list("bac"),
ordered=True)})
for cols in ['C1', 'C2', ['A', 'C1'], ['A', 'C2'], ['C1', 'C2']]:
result = df.set_index(cols).reset_index()
result = result.reindex(columns=df.columns)
tm.assert_frame_equal(result, df)
| gpl-3.0 |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/IPython/core/magics/pylab.py | 4 | 6517 | """Implementation of magic functions for matplotlib/pylab support.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Our own packages
from traitlets.config.application import Application
from IPython.core import magic_arguments
from IPython.core.magic import Magics, magics_class, line_magic
from IPython.testing.skipdoctest import skip_doctest
from warnings import warn
from IPython.core.pylabtools import backends
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
magic_gui_arg = magic_arguments.argument(
'gui', nargs='?',
help="""Name of the matplotlib backend to use %s.
If given, the corresponding matplotlib backend is used,
otherwise it will be matplotlib's default
(which you can set in your matplotlib config file).
""" % str(tuple(sorted(backends.keys())))
)
@magics_class
class PylabMagics(Magics):
"""Magics related to matplotlib's pylab support"""
@skip_doctest
@line_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument('-l', '--list', action='store_true',
help='Show available matplotlib backends')
@magic_gui_arg
def matplotlib(self, line=''):
"""Set up matplotlib to work interactively.
This function lets you activate matplotlib interactive support
at any point during an IPython session. It does not import anything
into the interactive namespace.
If you are using the inline matplotlib backend in the IPython Notebook
you can set which figure formats are enabled using the following::
In [1]: from IPython.display import set_matplotlib_formats
In [2]: set_matplotlib_formats('pdf', 'svg')
The default for inline figures sets `bbox_inches` to 'tight'. This can
cause discrepancies between the displayed image and the identical
image created using `savefig`. This behavior can be disabled using the
`%config` magic::
In [3]: %config InlineBackend.print_figure_kwargs = {'bbox_inches':None}
In addition, see the docstring of
`IPython.display.set_matplotlib_formats` and
`IPython.display.set_matplotlib_close` for more information on
changing additional behaviors of the inline backend.
Examples
--------
To enable the inline backend for usage with the IPython Notebook::
In [1]: %matplotlib inline
In this case, where the matplotlib default is TkAgg::
In [2]: %matplotlib
Using matplotlib backend: TkAgg
But you can explicitly request a different GUI backend::
In [3]: %matplotlib qt
You can list the available backends using the -l/--list option::
In [4]: %matplotlib --list
Available matplotlib backends: ['osx', 'qt4', 'qt5', 'gtk3', 'notebook', 'wx', 'qt', 'nbagg',
'gtk', 'tk', 'inline']
"""
args = magic_arguments.parse_argstring(self.matplotlib, line)
if args.list:
backends_list = list(backends.keys())
print("Available matplotlib backends: %s" % backends_list)
else:
gui, backend = self.shell.enable_matplotlib(args.gui)
self._show_matplotlib_backend(args.gui, backend)
@skip_doctest
@line_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'--no-import-all', action='store_true', default=None,
help="""Prevent IPython from performing ``import *`` into the interactive namespace.
You can govern the default behavior of this flag with the
InteractiveShellApp.pylab_import_all configurable.
"""
)
@magic_gui_arg
def pylab(self, line=''):
"""Load numpy and matplotlib to work interactively.
This function lets you activate pylab (matplotlib, numpy and
interactive support) at any point during an IPython session.
%pylab makes the following imports::
import numpy
import matplotlib
from matplotlib import pylab, mlab, pyplot
np = numpy
plt = pyplot
from IPython.display import display
from IPython.core.pylabtools import figsize, getfigs
from pylab import *
from numpy import *
If you pass `--no-import-all`, the last two `*` imports will be excluded.
See the %matplotlib magic for more details about activating matplotlib
without affecting the interactive namespace.
"""
args = magic_arguments.parse_argstring(self.pylab, line)
if args.no_import_all is None:
# get default from Application
if Application.initialized():
app = Application.instance()
try:
import_all = app.pylab_import_all
except AttributeError:
import_all = True
else:
# nothing specified, no app - default True
import_all = True
else:
# invert no-import flag
import_all = not args.no_import_all
gui, backend, clobbered = self.shell.enable_pylab(args.gui, import_all=import_all)
self._show_matplotlib_backend(args.gui, backend)
print ("Populating the interactive namespace from numpy and matplotlib")
if clobbered:
warn("pylab import has clobbered these variables: %s" % clobbered +
"\n`%matplotlib` prevents importing * from pylab and numpy"
)
def _show_matplotlib_backend(self, gui, backend):
"""show matplotlib message backend message"""
if not gui or gui == 'auto':
print("Using matplotlib backend: %s" % backend)
| bsd-2-clause |
mjudsp/Tsallis | sklearn/tests/test_pipeline.py | 23 | 15392 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
def inverse_transform(self, X):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(svd_solver='full', n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = PCA(n_components=2, svd_solver='randomized', whiten=True)
clf = SVC(probability=True, random_state=0, decision_function_shape='ovr')
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA(svd_solver='full')
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA(svd_solver='full')
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2, svd_solver='full')
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = PCA(n_components=2, svd_solver='randomized', random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
def test_X1d_inverse_transform():
transformer = TransfT()
pipeline = make_pipeline(transformer)
X = np.ones(10)
msg = "1d X will not be reshaped in pipeline.inverse_transform"
assert_warns_message(FutureWarning, msg, pipeline.inverse_transform, X)
| bsd-3-clause |
CTSRD-SOAAP/chromium-42.0.2311.135 | native_client/pnacl/deps_update.py | 1 | 6837 | #!/usr/bin/env python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This tool helps with updating Git commit IDs in the
pnacl/COMPONENT_REVISIONS file to the latest commits. It creates a
Rietveld code review for the update, listing the new Git commits.
This tool should be run from a Git checkout of native_client.
"""
import optparse
import os
import re
import subprocess
import sys
def MatchKey(data, key):
# Search for "key=value" line in the COMPONENT_REVISIONS file.
# Also, the keys have underscores instead of dashes.
key = key.replace('-', '_')
match = re.search('^%s=(\S+)?$' % key, data, re.M)
if match is None:
raise Exception('Key %r not found' % key)
return match
def GetDepsField(data, key):
match = MatchKey(data, key)
return match.group(1)
def SetDepsField(data, key, value):
match = MatchKey(data, key)
return ''.join([data[:match.start(1)],
value,
data[match.end(1):]])
# Returns the Git commit ID for the latest revision on the given
# branch in the given Git repository.
def GetNewRev(git_dir, branch):
subprocess.check_call(['git', 'fetch'], cwd=git_dir)
output = subprocess.check_output(['git', 'rev-parse', branch], cwd=git_dir)
return output.strip()
# Extracts some information about new Git commits. Returns a tuple
# containing:
# * log: list of strings summarising the Git commits
# * authors: list of author e-mail addresses (each a string)
# * bugs: list of "BUG=..." strings to put in a commit message
def GetLog(git_dir, new_rev, old_revs):
log_args = [new_rev] + ['^' + rev for rev in old_revs]
log_data = subprocess.check_output(
['git', 'log', '--pretty=format:%h: (%ae) %s'] + log_args, cwd=git_dir)
authors_data = subprocess.check_output(
['git', 'log', '--pretty=%ae'] + log_args, cwd=git_dir)
full_log = subprocess.check_output(
['git', 'log', '--pretty=%B'] + log_args, cwd=git_dir)
log = [line + '\n' for line in reversed(log_data.strip().split('\n'))]
authors = authors_data.strip().split('\n')
bugs = []
for line in reversed(full_log.split('\n')):
if line.startswith('BUG='):
bug = line[4:].strip()
bug_line = 'BUG= %s\n' % bug
if bug_line not in bugs and bug.lower() != 'none':
bugs.append(bug_line)
if len(bugs) == 0:
bugs = ['BUG=none\n']
return log, authors, bugs
def AssertNoUncommittedChanges():
# Check for uncommitted changes. Note that this can still lose
# changes that have been committed to a detached-HEAD branch, but
# those should be retrievable via the reflog. This can also lose
# changes that have been staged to the index but then undone in the
# working files.
changes = subprocess.check_output(['git', 'diff', '--name-only', 'HEAD'])
if len(changes) != 0:
raise AssertionError('You have uncommitted changes:\n%s' % changes)
def Main(args):
parser = optparse.OptionParser('%prog [options]\n\n' + __doc__.strip())
parser.add_option('--list', action='store_true', default=False,
dest='list_only',
help='Only list the new Git revisions to be pulled in')
parser.add_option('-c', '--component', default='llvm', type='string',
help='Subdirectory of pnacl/git/ to update '
'COMPONENT_REVISIONS from (defaults to "llvm")')
parser.add_option('-r', '--revision', default=None, type='string',
help='Git revision to use')
parser.add_option('-u', '--no-upload', action='store_true', default=False,
help='Do not run "git cl upload"')
options, args = parser.parse_args(args)
if len(args) != 0:
parser.error('Got unexpected arguments')
component_name_map = {'llvm': 'LLVM',
'clang': 'Clang',
'pnacl-gcc': 'GCC',
'binutils': 'Binutils',
'libcxx': 'libc++',
'libcxxabi': 'libc++abi',
'llvm-test-suite': 'LLVM test suite',
'pnacl-newlib': 'Newlib',
'compiler-rt': 'compiler-rt',
'subzero': 'Subzero'}
src_base = 'toolchain_build/src'
git_dir = os.path.join(src_base, options.component)
component_name = component_name_map.get(options.component, options.component)
if options.component == 'pnacl-gcc':
pnacl_branch = 'origin/pnacl'
upstream_branches = []
elif options.component == 'binutils':
pnacl_branch = 'origin/pnacl/2.24/master'
upstream_branches = ['origin/ng/2.24/master']
elif options.component == 'subzero':
pnacl_branch = 'origin/master'
upstream_branches = []
else:
pnacl_branch = 'origin/master'
# Skip changes merged (but not cherry-picked) from upstream git.
upstream_branches = ['origin/upstream/master']
if not options.list_only:
AssertNoUncommittedChanges()
new_rev = options.revision
if new_rev is None:
new_rev = GetNewRev(git_dir, pnacl_branch)
subprocess.check_call(['git', 'fetch'])
deps_file = 'pnacl/COMPONENT_REVISIONS'
deps_data = subprocess.check_output(['git', 'cat-file', 'blob',
'origin/master:%s' % deps_file])
old_rev = GetDepsField(deps_data, options.component)
if new_rev == old_rev:
raise AssertionError('No new changes!')
deps_data = SetDepsField(deps_data, options.component, new_rev)
msg_logs, authors, bugs = GetLog(git_dir, new_rev,
[old_rev] + upstream_branches)
msg = 'PNaCl: Update %s revision in %s' % (component_name, deps_file)
msg += '\n\nThis pulls in the following %s %s:\n\n' % (
component_name,
{True: 'change', False: 'changes'}[len(msg_logs) == 1])
msg += ''.join(msg_logs)
msg += '\n'
msg += ''.join(bugs)
msg += 'TEST= PNaCl toolchain trybots\n'
print msg
cc_list = ', '.join(sorted(set(authors)))
print 'CC:', cc_list
if options.list_only:
return
subprocess.check_call(['git', 'checkout', 'origin/master'])
branch_name = '%s-deps-%s' % (options.component, new_rev[:8])
subprocess.check_call(['git', 'checkout', '-b', branch_name, 'origin/master'])
with open(deps_file, 'w') as fh:
fh.write(deps_data)
subprocess.check_call(['git', 'commit', '-a', '-m', msg])
if options.no_upload:
return
environ = os.environ.copy()
environ['EDITOR'] = 'true'
# TODO(mseaborn): This can ask for credentials when the cached
# credentials expire, so could fail when automated. Can we fix
# that?
subprocess.check_call(['git', 'cl', 'upload', '-m', msg, '--cc', cc_list],
env=environ)
if __name__ == '__main__':
Main(sys.argv[1:])
| bsd-3-clause |
SpaceKatt/CSPLN | apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/matplotlib/projections/__init__.py | 3 | 2213 | from geo import AitoffAxes, HammerAxes, LambertAxes, MollweideAxes
from polar import PolarAxes
from matplotlib import axes
class ProjectionRegistry(object):
"""
Manages the set of projections available to the system.
"""
def __init__(self):
self._all_projection_types = {}
def register(self, *projections):
"""
Register a new set of projection(s).
"""
for projection in projections:
name = projection.name
self._all_projection_types[name] = projection
def get_projection_class(self, name):
"""
Get a projection class from its *name*.
"""
return self._all_projection_types[name]
def get_projection_names(self):
"""
Get a list of the names of all projections currently
registered.
"""
names = self._all_projection_types.keys()
names.sort()
return names
projection_registry = ProjectionRegistry()
projection_registry.register(
axes.Axes,
PolarAxes,
AitoffAxes,
HammerAxes,
LambertAxes,
MollweideAxes)
def register_projection(cls):
projection_registry.register(cls)
def get_projection_class(projection=None):
"""
Get a projection class from its name.
If *projection* is None, a standard rectilinear projection is
returned.
"""
if projection is None:
projection = 'rectilinear'
try:
return projection_registry.get_projection_class(projection)
except KeyError:
raise ValueError("Unknown projection '%s'" % projection)
def projection_factory(projection, figure, rect, **kwargs):
"""
Get a new projection instance.
*projection* is a projection name.
*figure* is a figure to add the axes to.
*rect* is a :class:`~matplotlib.transforms.Bbox` object specifying
the location of the axes within the figure.
Any other kwargs are passed along to the specific projection
constructor being used.
"""
return get_projection_class(projection)(figure, rect, **kwargs)
def get_projection_names():
"""
Get a list of acceptable projection names.
"""
return projection_registry.get_projection_names()
| gpl-3.0 |
openbermuda/karmapi | karmapi/sunny.py | 1 | 1858 | from karmapi import pigfarm
import curio
import random
from pathlib import Path
class Sunspot(pigfarm.MagicCarpet):
def compute_data(self):
pass
def plot(self):
jup = 11.86
nep = 164.8
sat = 29.4571
x = (1/jup - 1/sat)
jupsat = 1/(2 * x)
x = (1/jup - 1/nep)
jupnep = 1/(2 * x)
jupsat, jupnep
period = [jupsat, 10.87, jup, 11.07]
phase = [2000.475, 2002.364, 1999.381, 2009]
weight = [0.83, 1.0, 0.55, 0.25]
import datetime
import pandas
import math
from karmapi import base
infile = Path('~/devel/karmapi/notebooks/SN_m_tot_V2.0.csv').expanduser()
df = pandas.read_csv(
infile,
names=['year', 'month', 'time', 'sunspot', 'sd', 'status'],
sep=';',
header=None,
index_col=False)
def add_date(x):
# FIXME -- turn time into day
return datetime.date(int(x.year), int(x.month), 1)
df.index = df.apply(add_date, axis=1)
df.index = pandas.date_range(
datetime.date(int(df.index[0].year), int(df.index[0].month), 1),
periods=len(df), freq='M')
df['year2'] = pandas.np.linspace(1749, 2016.67, 3212)
pi = math.pi
cos = pandas.np.cos
for ii in range(4):
df['h{}'.format(ii + 1)] = weight[ii] * cos((2 * pi) * ((df.year2 - phase[ii]) / period[ii]))
df['model'] = df.h1 + df.h2 + df.h3 + df.h4
df['guess'] = df.model.clip_lower(0.0) * 150
self.axes.hold(True)
self.axes.plot(df['guess'] / 2.0, 'b')
self.axes.plot((df.h3 * 20) -10, 'g')
self.axes.plot((df.h2 * 20) -40,'k')
self.axes.plot((df.sunspot / 2) + 100,'r')
| gpl-3.0 |
JPMoresmau/aifh | vol3/vol3-python-examples/examples/example_iris.py | 2 | 4131 | #!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 3: Deep Learning and Neural Networks
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2015 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
import numpy as np
import matplotlib.pyplot as plt
from lib.aifh.util import *
import types
from sklearn import svm, datasets
import sklearn
import scipy.stats
import numpy as np
from lasagne.layers import DenseLayer
from lasagne.layers import InputLayer
from lasagne.nonlinearities import sigmoid
from lasagne.nonlinearities import softmax
from lasagne.nonlinearities import rectify
from lasagne.updates import nesterov_momentum
from nolearn.lasagne import NeuralNet
# Compute a z-score with a mean and standard deviation different than the provided matrix.
def zscore(x,mean,sdev):
return (x-mean)/sdev
# Define the structure of the neural network
layers0 = [('input', InputLayer),
('dense0', DenseLayer),
('output', DenseLayer)]
net0 = NeuralNet(layers=layers0,
input_shape=(None, 4),
dense0_num_units=50,
dense0_nonlinearity = rectify,
output_num_units=3,
output_nonlinearity=softmax,
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
regression=False,
eval_size=0.0,
verbose=1,
max_epochs=100000,
on_epoch_finished=[
EarlyStopping(patience=20)
]
)
# Get the iris dataset from scipy
iris = datasets.load_iris()
# Split the iris dataset into 25% validation, and 75% train. Also shuffle with a seed of 42.
X_train, X_validate, y_train, y_validate = sklearn.cross_validation.train_test_split(
iris.data,iris.target, test_size = 0.25, random_state = 42)
# Calculate the mean and standard deviation vectors (all 4 measurements) for training data.
train_mean = np.mean(X_train, axis=0)
train_sdev = np.std(X_train, axis=0)
# Compute the z-scores for both train and validation. However, use mean and standard deviation for training
# on both. This is customary because we trained on this standard deviation and mean. Additionally, our
# prediction set might too small to calculate a meaningful mean and standard deviation.
X_train_z = zscore(X_train, train_mean, train_sdev) #scipy.stats.mstats.zscore(X_train)
X_validate_z = zscore(X_validate, train_mean, train_sdev) #scipy.stats.mstats.zscore(X_validate)
#These can be used to check my zscore calc to numpy
#print(X_train_z)
#print(scipy.stats.mstats.zscore(X_train))
# Provide our own validation set
def my_split(self, X, y, eval_size):
return X_train_z,X_validate_z,y_train,y_validate
net0.train_test_split = types.MethodType(my_split, net0)
# Train the network
net0.fit(X_train_z,y_train)
# Predict the validation set
pred_y = net0.predict(X_validate_z)
# Display predictions and count the number of incorrect predictions.
species_names = ['setosa','versicolour','virginica']
count = 0
wrong = 0
for element in zip(X_validate,y_validate,pred_y):
print("Input: sepal length: {}, sepal width: {}, petal length: {}, petal width: {}; Expected: {}; Actual: {}".format(
element[0][0],element[0][1],element[0][2],element[0][3],
species_names[element[1]],
species_names[element[2]]))
if element[1] != element[2]:
wrong = wrong + 1
count = count + 1
print("Incorrect {}/{} ({}%)".format(wrong,count,(wrong/count)*100))
| apache-2.0 |
dvro/imbalanced-learn | imblearn/over_sampling/smote.py | 2 | 21877 | """Class to perform over-sampling using SMOTE."""
from __future__ import print_function
from __future__ import division
import numpy as np
from sklearn.utils import check_array
from sklearn.utils import check_random_state
from sklearn.neighbors import NearestNeighbors
from sklearn.svm import SVC
from ..base import BaseBinarySampler
SMOTE_KIND = ('regular', 'borderline1', 'borderline2', 'svm')
class SMOTE(BaseBinarySampler):
"""Class to perform over-sampling using SMOTE.
This object is an implementation of SMOTE - Synthetic Minority
Over-sampling Technique, and the variants Borderline SMOTE 1, 2 and
SVM-SMOTE.
Parameters
----------
ratio : str or float, optional (default='auto')
If 'auto', the ratio will be defined automatically to balance
the dataset. Otherwise, the ratio is defined as the number
of samples in the minority class over the the number of samples
in the majority class.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random.
k : int, optional (default=5)
Number of nearest neighbours to used to construct synthetic samples.
m : int, optional (default=10)
Number of nearest neighbours to use to determine if a minority sample
is in danger.
out_step : float, optional (default=0.5)
Step size when extrapolating.
kind : str, optional (default='regular')
The type of SMOTE algorithm to use one of the following options:
'regular', 'borderline1', 'borderline2', 'svm'.
Attributes
----------
min_c_ : str or int
The identifier of the minority class.
max_c_ : str or int
The identifier of the majority class.
stats_c_ : dict of str/int : int
A dictionary in which the number of occurences of each class is
reported.
X_shape_ : tuple of int
Shape of the data `X` during fitting.
Notes
-----
See the original papers: [1]_, [2]_, [3]_ for more details.
It does not support multiple classes automatically, but can be called
multiple times.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.over_sampling import SMOTE
>>> X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
... n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1,
... n_samples=1000, random_state=10)
>>> print('Original dataset shape {}'.format(Counter(y)))
Original dataset shape Counter({1: 900, 0: 100})
>>> sm = SMOTE(random_state=42)
>>> X_res, y_res = sm.fit_sample(X, y)
>>> print('Resampled dataset shape {}'.format(Counter(y_res)))
Resampled dataset shape Counter({0: 900, 1: 900})
References
----------
.. [1] N. V. Chawla, K. W. Bowyer, L. O.Hall, W. P. Kegelmeyer, "SMOTE:
synthetic minority over-sampling technique," Journal of artificial
intelligence research, 321-357, 2002.
.. [2] H. Han, W. Wen-Yuan, M. Bing-Huan, "Borderline-SMOTE: a new
over-sampling method in imbalanced data sets learning," Advances in
intelligent computing, 878-887, 2005.
.. [3] H. M. Nguyen, E. W. Cooper, K. Kamei, "Borderline over-sampling for
imbalanced data classification," International Journal of Knowledge
Engineering and Soft Data Paradigms, 3(1), pp.4-21, 2001.
"""
def __init__(self,
ratio='auto',
random_state=None,
k=5,
m=10,
out_step=0.5,
kind='regular',
n_jobs=-1,
**kwargs):
super(SMOTE, self).__init__(ratio=ratio)
self.random_state = random_state
self.kind = kind
self.k = k
self.m = m
self.out_step = out_step
self.n_jobs = n_jobs
self.kwargs = kwargs
def _in_danger_noise(self, samples, y, kind='danger'):
"""Estimate if a set of sample are in danger or noise.
Parameters
----------
samples : ndarray, shape (n_samples, n_features)
The samples to check if either they are in danger or not.
y : ndarray, shape (n_samples, )
The true label in order to check the neighbour labels.
kind : str, optional (default='danger')
The type of classification to use. Can be either:
- If 'danger', check if samples are in danger,
- If 'noise', check if samples are noise.
Returns
-------
output : ndarray, shape (n_samples, )
A boolean array where True refer to samples in danger or noise.
"""
# Find the NN for each samples
# Exclude the sample itself
x = self.nearest_neighbour.kneighbors(samples,
return_distance=False)[:, 1:]
# Count how many NN belong to the minority class
# Find the class corresponding to the label in x
nn_label = (y[x] != self.min_c_).astype(int)
# Compute the number of majority samples in the NN
n_maj = np.sum(nn_label, axis=1)
if kind == 'danger':
# Samples are in danger for m/2 <= m' < m
return np.bitwise_and(n_maj >= float(self.m) / 2.,
n_maj < self.m)
elif kind == 'noise':
# Samples are noise for m = m'
return n_maj == self.m
else:
raise NotImplementedError
def _make_samples(self, X, y_type, nn_data, nn_num, n_samples,
step_size=1.):
"""A support function that returns artificial samples constructed along
the line connecting nearest neighbours.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Points from which the points will be created.
y_type : str or int
The minority target value, just so the function can return the
target values for the synthetic variables with correct length in
a clear format.
nn_data : ndarray, shape (n_samples_all, n_features)
Data set carrying all the neighbours to be used
nn_num : ndarray, shape (n_samples_all, k_nearest_neighbours)
The nearest neighbours of each sample in nn_data.
n_samples : int
The number of samples to generate.
step_size : float, optional (default=1.)
The step size to create samples.
Returns
-------
X_new : ndarray, shape (n_samples_new, n_features)
Synthetically generated samples.
y_new : ndarray, shape (n_samples_new, )
Target values for synthetic samples.
"""
# Check the consistency of X
X = check_array(X)
# Check the random state
random_state = check_random_state(self.random_state)
# A matrix to store the synthetic samples
X_new = np.zeros((n_samples, X.shape[1]))
# # Set seeds
# seeds = random_state.randint(low=0,
# high=100 * len(nn_num.flatten()),
# size=n_samples)
# Randomly pick samples to construct neighbours from
samples = random_state.randint(low=0,
high=len(nn_num.flatten()),
size=n_samples)
# Loop over the NN matrix and create new samples
for i, n in enumerate(samples):
# NN lines relate to original sample, columns to its
# nearest neighbours
row, col = divmod(n, nn_num.shape[1])
# Take a step of random size (0,1) in the direction of the
# n nearest neighbours
# if self.random_state is None:
# np.random.seed(seeds[i])
# else:
# np.random.seed(self.random_state)
step = step_size * random_state.uniform()
# Construct synthetic sample
X_new[i] = X[row] - step * (X[row] -
nn_data[nn_num[row, col]])
# The returned target vector is simply a repetition of the
# minority label
y_new = np.array([y_type] * len(X_new))
self.logger.info('Generated %s new samples ...', len(X_new))
return X_new, y_new
def _sample(self, X, y):
"""Resample the dataset.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : ndarray, shape (n_samples, )
Corresponding label for each sample in X.
Returns
-------
X_resampled : ndarray, shape (n_samples_new, n_features)
The array containing the resampled data.
y_resampled : ndarray, shape (n_samples_new)
The corresponding label of `X_resampled`
"""
if self.kind not in SMOTE_KIND:
raise ValueError('Unknown kind for SMOTE algorithm.')
random_state = check_random_state(self.random_state)
self._get_smote_kind()
# Define the number of sample to create
# We handle only two classes problem for the moment.
if self.ratio == 'auto':
num_samples = (self.stats_c_[self.maj_c_] -
self.stats_c_[self.min_c_])
else:
num_samples = int((self.ratio * self.stats_c_[self.maj_c_]) -
self.stats_c_[self.min_c_])
# Start by separating minority class features and target values.
X_min = X[y == self.min_c_]
# If regular SMOTE is to be performed
if self.kind == 'regular':
self.logger.debug('Finding the %s nearest neighbours ...', self.k)
# Look for k-th nearest neighbours, excluding, of course, the
# point itself.
self.nearest_neighbour.fit(X_min)
# Matrix with k-th nearest neighbours indexes for each minority
# element.
nns = self.nearest_neighbour.kneighbors(
X_min,
return_distance=False)[:, 1:]
self.logger.debug('Create synthetic samples ...')
# --- Generating synthetic samples
# Use static method make_samples to generate minority samples
X_new, y_new = self._make_samples(X_min,
self.min_c_,
X_min,
nns,
num_samples,
1.0)
# Concatenate the newly generated samples to the original data set
X_resampled = np.concatenate((X, X_new), axis=0)
y_resampled = np.concatenate((y, y_new), axis=0)
return X_resampled, y_resampled
if self.kind == 'borderline1' or self.kind == 'borderline2':
self.logger.debug('Finding the %s nearest neighbours ...', self.m)
# Find the NNs for all samples in the data set.
self.nearest_neighbour.fit(X)
# Boolean array with True for minority samples in danger
danger_index = self._in_danger_noise(X_min, y, kind='danger')
# If all minority samples are safe, return the original data set.
if not any(danger_index):
self.logger.debug('There are no samples in danger. No'
' borderline synthetic samples created.')
# All are safe, nothing to be done here.
return X, y
# If we got here is because some samples are in danger, we need to
# find the NNs among the minority class to create the new synthetic
# samples.
#
# We start by changing the number of NNs to consider from m + 1
# to k + 1
self.nearest_neighbour.set_params(**{'n_neighbors': self.k + 1})
self.nearest_neighbour.fit(X_min)
# nns...#
nns = self.nearest_neighbour.kneighbors(
X_min[danger_index],
return_distance=False)[:, 1:]
# B1 and B2 types diverge here!!!
if self.kind == 'borderline1':
# Create synthetic samples for borderline points.
X_new, y_new = self._make_samples(X_min[danger_index],
self.min_c_,
X_min,
nns,
num_samples)
# Concatenate the newly generated samples to the original
# dataset
X_resampled = np.concatenate((X, X_new), axis=0)
y_resampled = np.concatenate((y, y_new), axis=0)
# Reset the k-neighbours to m+1 neighbours
self.nearest_neighbour.set_params(
**{'n_neighbors': self.m + 1})
return X_resampled, y_resampled
else:
# Split the number of synthetic samples between only minority
# (type 1), or minority and majority (with reduced step size)
# (type 2).
# The fraction is sampled from a beta distribution centered
# around 0.5 with variance ~0.01
fractions = random_state.beta(10, 10)
# Only minority
X_new_1, y_new_1 = self._make_samples(X_min[danger_index],
self.min_c_,
X_min,
nns,
int(fractions *
(num_samples + 1)),
step_size=1.)
# Only majority with smaller step size
X_new_2, y_new_2 = self._make_samples(X_min[danger_index],
self.min_c_,
X[y != self.min_c_],
nns,
int((1 - fractions) *
num_samples),
step_size=0.5)
# Concatenate the newly generated samples to the original
# data set
X_resampled = np.concatenate((X, X_new_1, X_new_2), axis=0)
y_resampled = np.concatenate((y, y_new_1, y_new_2), axis=0)
# Reset the k-neighbours to m+1 neighbours
self.nearest_neighbour.set_params(
**{'n_neighbors': self.m + 1})
return X_resampled, y_resampled
if self.kind == 'svm':
# The SVM smote model fits a support vector machine
# classifier to the data and uses the support vector to
# provide a notion of boundary. Unlike regular smote, where
# such notion relies on proportion of nearest neighbours
# belonging to each class.
# Fit SVM to the full data#
self.svm.fit(X, y)
# Find the support vectors and their corresponding indexes
support_index = self.svm.support_[y[self.svm.support_] ==
self.min_c_]
support_vector = X[support_index]
# First, find the nn of all the samples to identify samples
# in danger and noisy ones
self.logger.debug('Finding the %s nearest neighbours ...', self.m)
# As usual, fit a nearest neighbour model to the data
self.nearest_neighbour.fit(X)
# Now, get rid of noisy support vectors
noise_bool = self._in_danger_noise(support_vector, y, kind='noise')
# Remove noisy support vectors
support_vector = support_vector[np.logical_not(noise_bool)]
danger_bool = self._in_danger_noise(support_vector, y,
kind='danger')
safety_bool = np.logical_not(danger_bool)
self.logger.debug('Out of %s support vectors, %s are noisy, '
'%s are in danger '
'and %s are safe.',
support_vector.shape[0],
noise_bool.sum().astype(int),
danger_bool.sum().astype(int),
safety_bool.sum().astype(int))
# Proceed to find support vectors NNs among the minority class
self.logger.debug('Finding the %s nearest neighbours ...', self.k)
self.nearest_neighbour.set_params(**{'n_neighbors': self.k + 1})
self.nearest_neighbour.fit(X_min)
self.logger.debug('Create synthetic samples ...')
# Split the number of synthetic samples between interpolation and
# extrapolation
# The fraction are sampled from a beta distribution with mean
# 0.5 and variance 0.01#
fractions = random_state.beta(10, 10)
# Interpolate samples in danger
if np.count_nonzero(danger_bool) > 0:
nns = self.nearest_neighbour.kneighbors(
support_vector[danger_bool],
return_distance=False)[:, 1:]
X_new_1, y_new_1 = self._make_samples(
support_vector[danger_bool],
self.min_c_,
X_min,
nns,
int(fractions * (num_samples + 1)),
step_size=1.)
# Extrapolate safe samples
if np.count_nonzero(safety_bool) > 0:
nns = self.nearest_neighbour.kneighbors(
support_vector[safety_bool],
return_distance=False)[:, 1:]
X_new_2, y_new_2 = self._make_samples(
support_vector[safety_bool],
self.min_c_,
X_min,
nns,
int((1 - fractions) * num_samples),
step_size=-self.out_step)
# Concatenate the newly generated samples to the original data set
if (np.count_nonzero(danger_bool) > 0 and
np.count_nonzero(safety_bool) > 0):
X_resampled = np.concatenate((X, X_new_1, X_new_2), axis=0)
y_resampled = np.concatenate((y, y_new_1, y_new_2), axis=0)
# not any support vectors in danger
elif np.count_nonzero(danger_bool) == 0:
X_resampled = np.concatenate((X, X_new_2), axis=0)
y_resampled = np.concatenate((y, y_new_2), axis=0)
# All the support vector in danger
elif np.count_nonzero(safety_bool) == 0:
X_resampled = np.concatenate((X, X_new_1), axis=0)
y_resampled = np.concatenate((y, y_new_1), axis=0)
# Reset the k-neighbours to m+1 neighbours
self.nearest_neighbour.set_params(**{'n_neighbors': self.m + 1})
return X_resampled, y_resampled
def _get_smote_kind(self):
# --- NN object
# Import the NN object from scikit-learn library. Since in the smote
# variations we must first find samples that are in danger, we
# initialize the NN object differently depending on the method chosen
if self.kind == 'regular':
# Regular smote does not look for samples in danger, instead it
# creates synthetic samples directly from the k-th nearest
# neighbours with not filtering
self.nearest_neighbour = NearestNeighbors(n_neighbors=self.k + 1,
n_jobs=self.n_jobs)
else:
# Borderline1, 2 and SVM variations of smote must first look for
# samples that could be considered noise and samples that live
# near the boundary between the classes. Therefore, before
# creating synthetic samples from the k-th nns, it first look
# for m nearest neighbors to decide whether or not a sample is
# noise or near the boundary.
self.nearest_neighbour = NearestNeighbors(n_neighbors=self.m + 1,
n_jobs=self.n_jobs)
# --- SVM smote
# Unlike the borderline variations, the SVM variation uses the support
# vectors to decide which samples are in danger (near the boundary).
# Additionally it also introduces extrapolation for samples that are
# considered safe (far from boundary) and interpolation for samples
# in danger (near the boundary). The level of extrapolation is
# controled by the out_step.
if self.kind == 'svm':
# Store SVM object with any parameters
self.svm = SVC(random_state=self.random_state, **self.kwargs)
| mit |
MechCoder/scikit-learn | examples/mixture/plot_gmm_covariances.py | 89 | 4724 | """
===============
GMM covariances
===============
Demonstration of several covariances types for Gaussian mixture models.
See :ref:`gmm` for more information on the estimator.
Although GMM are often used for clustering, we can compare the obtained
clusters with the actual classes from the dataset. We initialize the means
of the Gaussians with the means of the classes from the training set to make
this comparison valid.
We plot predicted labels on both training and held out test data using a
variety of GMM covariance types on the iris dataset.
We compare GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# Modified by Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import StratifiedKFold
print(__doc__)
colors = ['navy', 'turquoise', 'darkorange']
def make_ellipses(gmm, ax):
for n, color in enumerate(colors):
if gmm.covariance_type == 'full':
covariances = gmm.covariances_[n][:2, :2]
elif gmm.covariance_type == 'tied':
covariances = gmm.covariances_[:2, :2]
elif gmm.covariance_type == 'diag':
covariances = np.diag(gmm.covariances_[n][:2])
elif gmm.covariance_type == 'spherical':
covariances = np.eye(gmm.means_.shape[1]) * gmm.covariances_[n]
v, w = np.linalg.eigh(covariances)
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(n_splits=4)
# Only take the first fold.
train_index, test_index = next(iter(skf.split(iris.data, iris.target)))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
estimators = dict((cov_type, GaussianMixture(n_components=n_classes,
covariance_type=cov_type, max_iter=20, random_state=0))
for cov_type in ['spherical', 'diag', 'tied', 'full'])
n_estimators = len(estimators)
plt.figure(figsize=(3 * n_estimators // 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, estimator) in enumerate(estimators.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
estimator.means_init = np.array([X_train[y_train == i].mean(axis=0)
for i in range(n_classes)])
# Train the other parameters using the EM algorithm.
estimator.fit(X_train)
h = plt.subplot(2, n_estimators // 2, index + 1)
make_ellipses(estimator, h)
for n, color in enumerate(colors):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], s=0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate(colors):
data = X_test[y_test == n]
plt.scatter(data[:, 0], data[:, 1], marker='x', color=color)
y_train_pred = estimator.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = estimator.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(scatterpoints=1, loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
smartscheduling/scikit-learn-categorical-tree | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
andaag/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
carlvlewis/bokeh | examples/plotting/server/boxplot.py | 42 | 2372 | # The plot server must be running
# Go to http://localhost:5006/bokeh to view this plot
import numpy as np
import pandas as pd
from bokeh.plotting import figure, show, output_server
# Generate some synthetic time series for six different categories
cats = list("abcdef")
data = np.random.randn(2000)
g = np.random.choice(cats, 2000)
for i, l in enumerate(cats):
data[g == l] += i // 2
df = pd.DataFrame(dict(score=data, group=g))
# Find the quartiles and IQR foor each category
groups = df.groupby('group')
q1 = groups.quantile(q=0.25)
q2 = groups.quantile(q=0.5)
q3 = groups.quantile(q=0.75)
iqr = q3 - q1
upper = q3 + 1.5*iqr
lower = q1 - 1.5*iqr
# find the outliers for each category
def outliers(group):
cat = group.name
return group[(group.score > upper.loc[cat][0]) | (group.score < lower.loc[cat][0])]['score']
out = groups.apply(outliers).dropna()
# Prepare outlier data for plotting, we need coordinate for every outlier.
outx = []
outy = []
for cat in cats:
# only add outliers if they exist
if not out.loc[cat].empty:
for value in out[cat]:
outx.append(cat)
outy.append(value)
output_server('boxplot')
p = figure(tools="previewsave", background_fill="#EFE8E2", title="", x_range=cats)
# If no outliers, shrink lengths of stems to be no longer than the minimums or maximums
qmin = groups.quantile(q=0.00)
qmax = groups.quantile(q=1.00)
upper.score = [min([x,y]) for (x,y) in zip(list(qmax.iloc[:,0]),upper.score) ]
lower.score = [max([x,y]) for (x,y) in zip(list(qmin.iloc[:,0]),lower.score) ]
# stems
p.segment(cats, upper.score, cats, q3.score, line_width=2, line_color="black")
p.segment(cats, lower.score, cats, q1.score, line_width=2, line_color="black")
# boxes
p.rect(cats, (q3.score+q2.score)/2, 0.7, q3.score-q2.score,
fill_color="#E08E79", line_width=2, line_color="black")
p.rect(cats, (q2.score+q1.score)/2, 0.7, q2.score-q1.score,
fill_color="#3B8686", line_width=2, line_color="black")
# whisters (almost-0 height rects simpler than segments)
p.rect(cats, lower.score, 0.2, 0.01, line_color="black")
p.rect(cats, upper.score, 0.2, 0.01, line_color="black")
# outliers
p.circle(outx, outy, size=6, color="#F38630", fill_alpha=0.6)
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = "white"
p.ygrid.grid_line_width = 2
p.xaxis.major_label_text_font_size="12pt"
show(p)
| bsd-3-clause |
ChanderG/scikit-learn | sklearn/utils/extmath.py | 142 | 21102 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
out: array-like, shape: (M, N), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = check_array(X, dtype=np.float)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
gditzler/py-npfs | src/npfs.py | 1 | 6070 | #!/usr/bin/env python
import numpy as np
import feast
from scipy.stats import binom
from multiprocessing import Pool
import matplotlib.pylab as plt
__author__ = "Gregory Ditzler"
__copyright__ = "Copyright 2014, EESI Laboratory (Drexel University)"
__credits__ = ["Gregory Ditzler"]
__license__ = "GPL"
__version__ = "0.1.0"
__maintainer__ = "Gregory Ditzler"
__email__ = "[email protected]"
__status__ = "development"
class npfs:
def __init__(self, fs_method="JMI", n_select=5, n_bootstraps=100, \
verbose=False, alpha=.01, beta=0.0, parallel=None, min_improv=0.):
"""
@self - self explanitory
@fs_method - feature selection algorithm to use. Available methods are:
CIFE, CMIM, CONDMI, CONDRED, DISR, ICAP, JMI, MIM, MIFS, mRMR
DEFAULT: JMI
@n_select - number of features to select. this is the number of
features that the base feature selection uses. NPFS may
select a different number of features [DEFAULT = 5]
@n_bootstraps - number of bootstraps [DEFAULT = 100]
@alpha - size of the hypothesis test [DEFAULT = 0.01]
@beta - bias parameter for the test [DEFAULT = 0.0]
@parallel - number of parallel workers to use [DEFAULT = None]
@min_improv - critera for early stopping [DEFAULT = 0.0]
"""
self.fs_method = fs_method
self.n_select = n_select
self.n_bootstraps = n_bootstraps
self.alpha = alpha
self.beta = beta
self.selected_features = []
self.parallel = parallel
self.min_improv = min_improv
if min_improv != 0.:
self.early_stopping = True
else:
self.early_stopping = False
def fit(self, data, labels):
"""
@self - self explanitory
@data - data in a numpy array. here are some suggestions for formatting
the data.
len(data) = n_observations
len(data.transpose()) = n_features
@labels - numerical class labels in a numpy array.
len(labels) = n_observations
"""
data, labels = self.__check_data(data, labels)
try:
fs_method = getattr(feast, self.fs_method)
except ImportError:
raise("Method does not exist in FEAST")
self.n_observations = len(data)
self.n_features = len(data.transpose())
self.method = fs_method
# @Z - contains the observations of the Bernoulli random variables
# that are whether the feature were or were not selected
Z = np.zeros( (self.n_features, self.n_bootstraps) )
self.data = data
self.labels = labels
if self.parallel == None:
if self.early_stopping == False:
for b in range(self.n_bootstraps):
sf = self.boot_iteration()
Z[sf, b] = 1 # mark the features selected with a '1'.
else:
p1_old = np.zeros((self.n_features,))
for b in range(self.n_bootstraps):
sf = self.boot_iteration()
Z[sf, b] = 1.
p1 = Z.sum(axis=1)/b
d = np.abs(p1 - p1_old).mean()
if d < self.min_improv:
self.run_time = b
break
p1_old = p1
else:
pool = Pool(processes = self.parallel)
sfs = pool.map(__call__, (self for x in range(self.n_bootstraps)))
for x in range(len(sfs)):
Z[sfs[x], x] = 1
z = np.sum(Z, axis=1) # z is a binomial random variable
# compute the neyman-pearson threshold (include the bias term)
p = (1.0*self.n_select)/self.n_features + self.beta
if p > 1.0: # user chose \beta poorly -- null it out
raise ValueError("p+beta > 1 -> Invalid probability")
delta = binom.ppf(1 - self.alpha, self.n_bootstraps, p)
# based on the threshold, determine which features are relevant and return
# them in a numpy array
selected_features = []
for k in range(self.n_features):
if z[k] > delta:
selected_features.append(k)
self.Bernoulli_matrix = Z
self.selected_features = np.array(selected_features)
return self.selected_features
def __check_data(self, data, labels):
"""
The data and label arrays must be of the same length. Furthermore,
the data are expected to be in numpy arrays. Return an error if this
is not the case. Otherwise, if everything else check out, cast the
arrays as floats. Its how the data are expected for PyFeast.
"""
if isinstance(data, np.ndarray) is False:
raise Exception("Data must be an numpy ndarray.")
if isinstance(labels, np.ndarray) is False:
raise Exception("Labels must be an numpy ndarray.")
if len(data) != len(labels):
raise Exception("Data and labels must be the same length")
return 1.0*data, 1.0*labels
def boot_iteration(self, null=None):
"""
@self
@null - leave alone
"""
# generate some random integers that are the boostrap indices. the size
# of the bootstrap is the size of the data sample. hence all samples are
# sampled with replacement
idx = np.random.randint(0, self.n_observations, self.n_observations)
data_sub = self.data[idx] # bootstrap features
labels_sub = self.labels[idx] # bootstrap labels
sf = self.method(data_sub, labels_sub, self.n_select) # run feature selection
return sf
def plot_bernoulli_matrix(self, show_npfs=False):
"""
Plot the heatmap of the Bernoulli matrix
@self
@show_npfs - Highlight NPFS detections [Boolean]
"""
matrix = self.Bernoulli_matrix
if show_npfs == False:
plot = plt.imshow(matrix)
plot.set_cmap('hot')
plt.colorbar()
plt.xlabel("Bootstraps")
plt.ylabel("Feature")
plt.show()
else:
for i in self.selected_features:
for k in range(len(matrix[i])):
matrix[i,k] = .5
plot = plt.imshow(matrix)
plot.set_cmap('hot')
plt.xlabel("Bootstraps")
plt.ylabel("Feature")
plt.colorbar()
plt.show()
return None
def __call__(obj):
"""
This is a weird little hack to get around using multiprocessing with
the package being called inside of the NPFS object
"""
return obj.boot_iteration(None)
| gpl-3.0 |
fidelram/deepTools | deeptools/bamPEFragmentSize.py | 1 | 20969 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import sys
import numpy as np
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['svg.fonttype'] = 'none'
import matplotlib.pyplot as plt
import plotly.offline as py
import plotly.graph_objs as go
# own tools
from deeptools.parserCommon import writableFile
from deeptools.getFragmentAndReadSize import get_read_and_fragment_length
from deeptools._version import __version__
def parse_arguments():
parser = argparse.ArgumentParser(
description='This tool calculates the fragment sizes for read pairs given a BAM file from paired-end sequencing.'
'Several regions are sampled depending on the '
'size of the genome and number of processors to estimate the'
'summary statistics on the fragment lengths. '
'Properly paired reads are preferred for computation, i.e., '
'it will only use discordant pairs if no concordant alignments '
'overlap with a given region. '
'The default setting simply prints the summary statistics to the screen.')
parser.add_argument('--bamfiles', '-b',
help='List of BAM files to process',
nargs='+',
metavar='bam files')
parser.add_argument('--histogram', '-hist', '-o',
help='Save a .png file with a histogram '
'of the fragment length distribution.',
metavar='FILE')
parser.add_argument('--plotFileFormat',
metavar='FILETYPE',
help='Image format type. If given, this option '
'overrides the image format based on the plotFile '
'ending. The available options are: png, '
'eps, pdf, svg and plotly.',
default=None,
choices=['png', 'pdf', 'svg', 'eps', 'plotly'])
parser.add_argument('--numberOfProcessors', '-p',
help='Number of processors to use. The default is '
'to use 1. (Default: %(default)s)',
metavar="INT",
type=int,
default=1,
required=False)
parser.add_argument('--samplesLabel',
help='Labels for the samples plotted. The '
'default is to use the file name of the '
'sample. The sample labels should be separated '
'by spaces and quoted if a label itself'
'contains a space E.g. --samplesLabel label-1 "label 2" ',
nargs='+')
parser.add_argument('--plotTitle', '-T',
help='Title of the plot, to be printed on top of '
'the generated image. Leave blank for no title. (Default: %(default)s)',
default='')
parser.add_argument('--maxFragmentLength',
help='The maximum fragment length in the histogram. A value of 0 (the default) indicates to use twice the mean fragment length. (Default: %(default)s)',
default=0,
type=int)
parser.add_argument('--logScale',
help='Plot on the log scale',
action='store_true')
parser.add_argument('--binSize', '-bs',
metavar='INT',
help='Length in bases of the window used to sample the genome. (Default: %(default)s)',
default=1000,
type=int)
parser.add_argument('--distanceBetweenBins', '-n',
metavar='INT',
help='To reduce the computation time, not every possible genomic '
'bin is sampled. This option allows you to set the distance '
'between bins actually sampled from. Larger numbers are sufficient '
'for high coverage samples, while smaller values are useful for '
'lower coverage samples. Note that if you specify a value that '
'results in too few (<1000) reads sampled, the value will be '
'decreased. (Default: %(default)s)',
default=1000000,
type=int)
parser.add_argument('--blackListFileName', '-bl',
help="A BED file containing regions that should be excluded from all analyses. Currently this works by rejecting genomic chunks that happen to overlap an entry. Consequently, for BAM files, if a read partially overlaps a blacklisted region or a fragment spans over it, then the read/fragment might still be considered.",
metavar="BED file",
required=False)
parser.add_argument('--table',
metavar='FILE',
help='In addition to printing read and fragment length metrics to the screen, write them to the given file in tabular format.',
required=False)
parser.add_argument('--outRawFragmentLengths',
metavar='FILE',
required=False,
type=writableFile,
help='Save the fragment (or read if the input is single-end) length and their associated number of occurrences to a tab-separated file. Columns are length, number of occurrences, and the sample label.')
parser.add_argument('--verbose',
help='Set if processing data messages are wanted.',
action='store_true',
required=False)
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
return parser
def getDensity(lengths, minVal, maxVal):
"""
This is essentially computing what hist() in matplotlib is doing and returning the results.
This then allows us to free up the memory consumed by each sample rather than returning it all back to main() for plotting.
"""
n, bins, patches = plt.hist(lengths, bins=100, range=(minVal, maxVal), normed=True)
plt.clf()
return (n, bins)
def getFragSize(bam, args, idx, outRawFrags):
fragment_len_dict, read_len_dict = get_read_and_fragment_length(bam, return_lengths=True,
blackListFileName=args.blackListFileName,
numberOfProcessors=args.numberOfProcessors,
verbose=args.verbose,
binSize=args.binSize,
distanceBetweenBins=args.distanceBetweenBins)
if outRawFrags:
label = bam
if args.samplesLabel and idx < len(args.samplesLabel):
label = args.samplesLabel[idx]
if fragment_len_dict:
fragment_len_dict['lengths'] = [int(x) for x in fragment_len_dict['lengths']]
cnts = np.bincount(fragment_len_dict['lengths'], minlength=int(fragment_len_dict['max']) + 1)
else:
read_len_dict['lengths'] = [int(x) for x in read_len_dict['lengths']]
cnts = np.bincount(read_len_dict['lengths'], minlength=int(read_len_dict['max']) + 1)
for idx, v in enumerate(cnts):
if v > 0:
outRawFrags.write("{}\t{}\t{}\n".format(idx, v, label))
if args.samplesLabel and idx < len(args.samplesLabel):
print("\n\nSample label: {}".format(args.samplesLabel[idx]))
else:
print("\n\nBAM file : {}".format(bam))
if fragment_len_dict:
if fragment_len_dict['mean'] == 0:
print("No pairs were found. Is the data from a paired-end sequencing experiment?")
print("Sample size: {}\n".format(fragment_len_dict['sample_size']))
print("Fragment lengths:")
print("Min.: {}\n1st Qu.: {}\nMean: {}\nMedian: {}\n"
"3rd Qu.: {}\nMax.: {}\nStd: {}".format(fragment_len_dict['min'],
fragment_len_dict['qtile25'],
fragment_len_dict['mean'],
fragment_len_dict['median'],
fragment_len_dict['qtile75'],
fragment_len_dict['max'],
fragment_len_dict['std']))
print("MAD: {}\nLen. 10%: {}\nLen. 20%: {}\nLen. 30%: {}\nLen. 40%: {}\nLen. 60%: {}\nLen. 70%: {}\nLen. 80%: {}\nLen. 90%: {}\nLen. 99%: {}\n".format(fragment_len_dict['mad'],
fragment_len_dict['qtile10'],
fragment_len_dict['qtile20'],
fragment_len_dict['qtile30'],
fragment_len_dict['qtile40'],
fragment_len_dict['qtile60'],
fragment_len_dict['qtile70'],
fragment_len_dict['qtile80'],
fragment_len_dict['qtile90'],
fragment_len_dict['qtile99']))
else:
print("No pairs were found. Is the data from a paired-end sequencing experiment?")
print("\nRead lengths:")
print("Sample size: {}\n".format(read_len_dict['sample_size']))
print("Min.: {}\n1st Qu.: {}\nMean: {}\nMedian: {}\n"
"3rd Qu.: {}\nMax.: {}\nStd: {}".format(read_len_dict['min'],
read_len_dict['qtile25'],
read_len_dict['mean'],
read_len_dict['median'],
read_len_dict['qtile75'],
read_len_dict['max'],
read_len_dict['std']))
print("MAD: {}\nLen. 10%: {}\nLen. 20%: {}\nLen. 30%: {}\nLen. 40%: {}\nLen. 60%: {}\nLen. 70%: {}\nLen. 80%: {}\nLen. 90%: {}\nLen. 99%: {}\n".format(read_len_dict['mad'],
read_len_dict['qtile10'],
read_len_dict['qtile20'],
read_len_dict['qtile30'],
read_len_dict['qtile40'],
read_len_dict['qtile60'],
read_len_dict['qtile70'],
read_len_dict['qtile80'],
read_len_dict['qtile90'],
read_len_dict['qtile99']))
# The read and fragment lists will just eat up memory if not removed!
if args.histogram:
if fragment_len_dict:
maxVal = fragment_len_dict['mean'] * 2
minVal = fragment_len_dict['min']
else:
maxVal = read_len_dict['mean'] * 2
minVal = read_len_dict['min']
if args.maxFragmentLength > 0:
maxVal = args.maxFragmentLength
if fragment_len_dict:
fragment_len_dict['lengths'] = getDensity(fragment_len_dict['lengths'], minVal, maxVal)
if read_len_dict:
read_len_dict['lengths'] = getDensity(read_len_dict['lengths'], minVal, maxVal)
else:
if fragment_len_dict:
del fragment_len_dict['lengths']
if read_len_dict:
del read_len_dict['lengths']
return (fragment_len_dict, read_len_dict)
def printTable(args, fragDict, readDict):
"""
Print the read and fragment dictionary in more easily parsable tabular format to a file.
"""
of = open(args.table, "w")
of.write("\tFrag. Sampled")
of.write("\tFrag. Len. Min.\tFrag. Len. 1st. Qu.\tFrag. Len. Mean\tFrag. Len. Median\tFrag. Len. 3rd Qu.\tFrag. Len. Max\tFrag. Len. Std.")
of.write("\tFrag. Med. Abs. Dev.\tFrag. Len. 10%\tFrag. Len. 20%\tFrag. Len. 30%\tFrag. Len. 40%\tFrag. Len. 60%\tFrag. Len. 70%\tFrag. Len. 80%\tFrag. Len. 90%\tFrag. Len. 99%")
of.write("\tReads Sampled")
of.write("\tRead Len. Min.\tRead Len. 1st. Qu.\tRead Len. Mean\tRead Len. Median\tRead Len. 3rd Qu.\tRead Len. Max\tRead Len. Std.")
of.write("\tRead Med. Abs. Dev.\tRead Len. 10%\tRead Len. 20%\tRead Len. 30%\tRead Len. 40%\tRead Len. 60%\tRead Len. 70%\tRead Len. 80%\tRead Len. 90%\tRead Len. 99%\n")
for idx, bam in enumerate(args.bamfiles):
if args.samplesLabel and idx < len(args.samplesLabel):
of.write(args.samplesLabel[idx])
else:
of.write(bam)
if fragDict is not None and fragDict[bam] is not None:
d = fragDict[bam]
of.write("\t{}".format(d['sample_size']))
of.write("\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format(d['min'],
d['qtile25'],
d['mean'],
d['median'],
d['qtile75'],
d['max'],
d['std']))
of.write("\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format(d['mad'],
d['qtile10'],
d['qtile20'],
d['qtile30'],
d['qtile40'],
d['qtile60'],
d['qtile70'],
d['qtile80'],
d['qtile90'],
d['qtile99']))
else:
of.write("\t0")
of.write("\t0\t0\t0\t0\t0\t0\t0")
of.write("\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0")
d = readDict[bam]
of.write("\t{}".format(d['sample_size']))
of.write("\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format(d['min'],
d['qtile25'],
d['mean'],
d['median'],
d['qtile75'],
d['max'],
d['std']))
of.write("\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(d['mad'],
d['qtile10'],
d['qtile20'],
d['qtile30'],
d['qtile40'],
d['qtile60'],
d['qtile70'],
d['qtile80'],
d['qtile90'],
d['qtile99']))
of.close()
def main(args=None):
args = parse_arguments().parse_args(args)
fraglengths = {}
readlengths = {}
of = None
if args.outRawFragmentLengths is not None:
of = open(args.outRawFragmentLengths, "w")
of.write("#bamPEFragmentSize\nSize\tOccurrences\tSample\n")
for idx, bam in enumerate(args.bamfiles):
f, r = getFragSize(bam, args, idx, of)
fraglengths[bam] = f
readlengths[bam] = r
if args.table is not None:
printTable(args, fraglengths, readlengths)
if args.histogram:
if args.samplesLabel:
if len(args.bamfiles) != len(args.samplesLabel):
sys.exit("The number of labels does not match the number of BAM files.")
else:
labels = args.samplesLabel
else:
labels = list(fraglengths.keys())
i = 0
data = []
for bam in fraglengths.keys():
d = fraglengths[bam]
if d is None:
d = readlengths[bam]
if args.maxFragmentLength > 0:
maxVal = args.maxFragmentLength
else:
maxVal = d['mean'] * 2
if args.plotFileFormat == 'plotly':
trace = go.Histogram(x=d['lengths'],
histnorm='probability',
opacity=0.5,
name=labels[i],
nbinsx=100,
xbins=dict(start=d['min'], end=maxVal))
data.append(trace)
else:
plt.bar(d['lengths'][1][:-1], height=d['lengths'][0],
width=d['lengths'][1][1:] - d['lengths'][1][:-1],
align='edge', log=args.logScale,
alpha=0.5, label=labels[i])
i += 1
if args.plotFileFormat == 'plotly':
fig = go.Figure()
fig['data'] = data
fig['layout']['yaxis1'].update(title='Frequency')
fig['layout']['xaxis1'].update(title='Fragment Length')
fig['layout'].update(title=args.plotTitle)
fig['layout'].update(showlegend=True)
if args.logScale:
fig['layout']['yaxis1'].update(type='log')
py.plot(fig, filename=args.histogram, auto_open=False)
else:
plt.xlabel('Fragment Length')
plt.ylabel('Frequency')
plt.legend(loc='upper right')
plt.title(args.plotTitle)
plt.savefig(args.histogram, bbox_inches=0, format=args.plotFileFormat)
plt.close()
if __name__ == "__main__":
main()
| gpl-3.0 |
gully/Starfish | Starfish/model.py | 2 | 4803 | import numpy as np
import Starfish
from . import constants as C
from .grid_tools import Interpolator
import json
import h5py
import logging
import matplotlib.pyplot as plt
from itertools import zip_longest
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
class ThetaParam:
'''
An object holding the collection of parameters shared between all orders.
:param grid: parameters corresponding to the dimensions of the grid.
:type grid: 1D np.array
'''
def __init__(self, grid, vz=0.0, vsini=0.0, logOmega=0.0, Av=0.0):
self.grid = grid
self.vz = vz
self.vsini = vsini
self.logOmega = logOmega #log10Omega
self.Av = Av
def save(self, fname="theta.json"):
'''
Save the parameters to a JSON file
'''
f = open(fname, 'w')
json.dump(self, f, cls=ThetaEncoder, indent=2, sort_keys=True)
f.close()
@classmethod
def from_dict(cls, d):
'''
Load the parameters from a dictionary, e.g., from the config file
:param d: dictionary of parameters
:type d: dictionary
'''
d["grid"] = np.array(d["grid"])
return cls(**d)
@classmethod
def load(cls, fname="theta.json"):
'''
Load the parameters from a JSON file
'''
f = open(fname, "r")
read = json.load(f) # read is a dictionary
f.close()
read["grid"] = np.array(read["grid"])
return cls(**read)
def __repr__(self):
return "grid:{} vz:{} vsini:{} logOmega:{} Av:{}".format(self.grid, self.vz, self.vsini, self.logOmega, self.Av)
class ThetaEncoder(json.JSONEncoder):
'''
Serialize an instance of o=ThetaParam() to JSON
'''
def default(self, o):
try:
mydict = {"grid":o.grid.tolist(),
"vz":o.vz,
"vsini":o.vsini,
"logOmega":o.logOmega,
"Av":o.Av}
except TypeError:
pass
else:
return mydict
# Let the base class default method raise the TypeError, if there is one
return json.JSONEncoder.default(self, o)
class PhiParam:
'''
An object holding the collection of parameters specific to a single order.
'''
def __init__(self, spectrum_id, order, fix_c0=False, cheb=np.zeros((Starfish.config["cheb_degree"],)),
sigAmp=Starfish.config["Phi"]["sigAmp"], logAmp=Starfish.config["Phi"]["logAmp"], l=Starfish.config["Phi"]["l"], regions=None):
self.spectrum_id = spectrum_id
self.order = order
self.fix_c0 = fix_c0
self.cheb = cheb
self.sigAmp = sigAmp
self.logAmp = logAmp
self.l = l
self.regions = regions
def toarray(self):
'''
Return parameters formatted as a numpy array.
'''
p = self.cheb.tolist() + [self.sigAmp, self.logAmp, self.l]
if self.regions is not None:
p += self.regions.flatten().tolist()
return np.array(p)
def save(self, fname="phi.json"):
f = open(Starfish.specfmt.format(self.spectrum_id, self.order) + fname, 'w')
json.dump(self, f, cls=PhiEncoder, indent=2, sort_keys=True)
f.close()
@classmethod
def load(cls, fname):
'''
Load the parameters from a JSON file
'''
f = open(fname, "r")
read = json.load(f) # read is a dictionary
f.close()
read["cheb"] = np.array(read["cheb"])
# Try to read regions
if "regions" in read:
read["regions"] = np.array(read["regions"])
else:
read["regions"] = None
return cls(**read)
def __repr__(self):
return "spectrum_id:{} order:{} fix_c0:{} cheb:{} sigAmp:{} logAmp:{} l:{} regions:{}".format(self.spectrum_id, self.order, self.fix_c0, self.cheb, self.sigAmp, self.logAmp, self.l, self.regions)
class PhiEncoder(json.JSONEncoder):
'''
Serialize an instance of o=PhiParam() to JSON
'''
def default(self, o):
try:
mydict = {"spectrum_id":o.spectrum_id,
"order": o.order,
"fix_c0": o.fix_c0,
"cheb": o.cheb.tolist(),
"sigAmp":o.sigAmp,
"logAmp":o.logAmp,
"l":o.l}
if o.regions is not None:
mydict["regions"] = o.regions.tolist()
except TypeError:
pass
else:
return mydict
# Let the base class default method raise the TypeError, if there is one
return json.JSONEncoder.default(self, o)
| bsd-3-clause |
matsumishoki/machine_learning | digits_k-nn.py | 1 | 2503 | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 30 14:52:14 2016
@author: matsumi
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KNeighborsClassifier
class K_NN(object):
def __init__(self, learning_rate=0.5, max_iteration=100):
self.max_iteration = max_iteration
def fit(self, X, y):
distance_history = []
distance_best = 1000
distance_best_num = []
x_i_best_num = []
# num_classes = len(np.unique(y))
# count = 1
num_samples = len(X)
input_data_number = np.random.choice(num_samples)
for n in range(num_samples):
distance = np.sum((X[input_data_number, :] - X[n, :])**2)
# print "distance", distance
distance_history.append(distance)
# print "distance_history", distance_history
k = np.sort(distance_history)
print "k", k
if distance < distance_best and distance != 0:
distance_best = distance
distance_best_num.append(distance)
x_i_best_num.append(n)
print "distance_best", distance_best
print "x_i_best_num:", x_i_best_num
print "distance_best_num:", distance_best_num
print "input_data_number:", input_data_number
print "distance_best:", distance_best
# for (x_i, y_i) in zip(X, y):
# print "count", count
# print "x_i", x_i
# print "y_i", y_i
# count = count + 1
# if count ==360:
# plt.matshow(x_i.reshape(8, 8), cmap=plt.cm.gray)
def Kneighbors(self, X, n_neighbors=None, return_distance=True):
pass
def Kneighbors_graph(self, X=None, n_neighbors=None):
pass
def predict(self, X):
pass
def predict_proba(self, X):
pass
def score(self, X, y):
predict_y = self.predict(X)
correct_rate = np.mean(predict_y == y)
return correct_rate
if __name__ == '__main__':
digits = load_digits(2)
X = digits.data
num_sumples = len(X)
T = digits.target
# ライブラリを使用しない時
classifier = K_NN()
classifier.fit(X, T)
# ライブラリを使用した時
lib_classifier = KNeighborsClassifier()
lib_classifier.fit(X, T)
lib_y = lib_classifier.predict(X)
print "lib_y:", lib_y
print "lib_classifier_accuracy:", lib_classifier.score(X, T)
| mit |
kayak/fireant | fireant/tests/widgets/test_pandas.py | 2 | 30582 | import copy
from functools import partial
from unittest import TestCase
import numpy as np
import pandas as pd
import pandas.testing
from pypika import Table
from pypika.analytics import Sum
from fireant import DataSet, DataType, Field, Rollup
from fireant.tests.dataset.mocks import (
CumSum,
ElectionOverElection,
dimx0_metricx1_df,
dimx0_metricx2_df,
dimx1_date_df,
dimx1_date_operation_df,
dimx1_num_df,
dimx1_str_df,
dimx2_date_str_df,
dimx2_date_str_ref_df,
mock_dataset,
no_index_df,
test_database,
)
from fireant.utils import alias_selector as f
from fireant.widgets.pandas import Pandas
def format_float(x, is_raw=False):
if pd.isnull(x):
return ''
if x in [np.inf, -np.inf]:
return 'Inf'
return f'{x:.0f}' if is_raw else f'{x:,.0f}'
format_float_raw = partial(format_float, is_raw=True)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
class PandasTransformerTests(TestCase):
maxDiff = None
def test_metricx1(self):
result = Pandas(mock_dataset.fields.votes).transform(dimx0_metricx1_df, [], [])
expected = dimx0_metricx1_df.copy()[[f('votes')]]
expected.columns = ['Votes']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_metricx2(self):
result = Pandas(mock_dataset.fields.votes, mock_dataset.fields.wins).transform(dimx0_metricx2_df, [], [])
expected = dimx0_metricx2_df.copy()[[f('votes'), f('wins')]]
expected.columns = ['Votes', 'Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_metricx2_reversed(self):
result = Pandas(mock_dataset.fields.wins, mock_dataset.fields.votes).transform(dimx0_metricx2_df, [], [])
expected = dimx0_metricx2_df.copy()[[f('wins'), f('votes')]]
expected.columns = ['Wins', 'Votes']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_dimx1_date(self):
result = Pandas(mock_dataset.fields.wins).transform(dimx1_date_df, [mock_dataset.fields.timestamp], [])
expected = dimx1_date_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_dimx1_date_with_operation(self):
result = Pandas(CumSum(mock_dataset.fields.votes)).transform(
dimx1_date_operation_df, [mock_dataset.fields.timestamp], []
)
expected = dimx1_date_operation_df.copy()[[f('cumsum(votes)')]]
expected.index.names = ['Timestamp']
expected.columns = ['CumSum(Votes)']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_dimx1_str(self):
result = Pandas(mock_dataset.fields.wins).transform(dimx1_str_df, [mock_dataset.fields.political_party], [])
expected = dimx1_str_df.copy()[[f('wins')]]
expected.index = pd.Index(['Democrat', 'Independent', 'Republican'], name='Party')
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_dimx1_int(self):
result = Pandas(mock_dataset.fields.wins).transform(dimx1_num_df, [mock_dataset.fields['candidate-id']], [])
expected = dimx1_num_df.copy()[[f('wins')]]
expected.index.names = ['Candidate ID']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_dimx2_date_str(self):
dimensions = [mock_dataset.fields.timestamp, mock_dataset.fields.political_party]
result = Pandas(mock_dataset.fields.wins).transform(dimx2_date_str_df, dimensions, [])
expected = dimx2_date_str_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp', 'Party']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_transpose_dimx2_str(self):
result = Pandas(mock_dataset.fields.wins, transpose=True).transform(
dimx1_str_df, [mock_dataset.fields.political_party], []
)
expected = dimx1_str_df.copy()[[f('wins')]]
expected.index = pd.Index(['Democrat', 'Independent', 'Republican'], name='Party')
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.transpose()
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_dimx1_str_transposes_data_frame(self):
result = Pandas(mock_dataset.fields.wins, pivot=[mock_dataset.fields.political_party]).transform(
dimx1_str_df, [mock_dataset.fields.political_party], []
)
expected = dimx1_str_df.copy()[[f('wins')]]
expected.index = pd.Index(['Democrat', 'Independent', 'Republican'], name='Party')
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.transpose()
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_dimx2_date_str(self):
result = Pandas(mock_dataset.fields.wins, pivot=[mock_dataset.fields.political_party]).transform(
dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], []
)
expected = dimx2_date_str_df.copy()[[f('wins')]]
expected = expected.unstack(level=[1])
expected.index.names = ['Timestamp']
expected.columns = ['Democrat', 'Independent', 'Republican']
expected.columns.names = ['Party']
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_hidden_dimx2_date_str(self):
dimensions = [mock_dataset.fields.timestamp, mock_dataset.fields.political_party]
result = Pandas(mock_dataset.fields.wins, hide=[mock_dataset.fields.political_party]).transform(
dimx2_date_str_df, dimensions, []
)
expected = dimx2_date_str_df.copy()[[f('wins')]]
expected.reset_index('$political_party', inplace=True, drop=True)
expected.index.names = ['Timestamp']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_hidden_metric_dimx2_date_str(self):
dimensions = [mock_dataset.fields.timestamp, mock_dataset.fields.political_party]
references = [ElectionOverElection(mock_dataset.fields.timestamp)]
result = Pandas(mock_dataset.fields.votes, hide=[mock_dataset.fields.votes]).transform(
dimx2_date_str_ref_df, dimensions, references
)
expected = dimx2_date_str_ref_df.copy()[[f('votes_eoe')]]
expected.index.names = ['Timestamp', 'Party']
expected.columns = ['Votes EoE']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_hidden_ref_dimx2_date_str(self):
dimensions = [mock_dataset.fields.timestamp, mock_dataset.fields.political_party]
references = [ElectionOverElection(mock_dataset.fields.timestamp)]
result = Pandas(mock_dataset.fields.votes, hide=['votes_eoe']).transform(
dimx2_date_str_ref_df, dimensions, references
)
expected = dimx2_date_str_ref_df.copy()[[f('votes')]]
expected.index.names = ['Timestamp', 'Party']
expected.columns = ['Votes']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_fetch_only_dimx2_date_str(self):
dimensions = [mock_dataset.fields.timestamp, mock_dataset.fields.political_party]
dimensions[1].fetch_only = True
result = Pandas(mock_dataset.fields.wins).transform(dimx2_date_str_df, dimensions, [])
dimensions[1].fetch_only = False
expected = dimx2_date_str_df.copy()[[f('wins')]]
expected.reset_index('$political_party', inplace=True, drop=True)
expected.index.names = ['Timestamp']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_time_series_ref(self):
dimensions = [mock_dataset.fields.timestamp, mock_dataset.fields.political_party]
references = [ElectionOverElection(mock_dataset.fields.timestamp)]
result = Pandas(mock_dataset.fields.votes).transform(dimx2_date_str_ref_df, dimensions, references)
expected = dimx2_date_str_ref_df.copy()[[f('votes'), f('votes_eoe')]]
expected.index.names = ['Timestamp', 'Party']
expected.columns = ['Votes', 'Votes EoE']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_metric_format(self):
import copy
votes = copy.copy(mock_dataset.fields.votes)
votes.prefix = '$'
votes.suffix = '€'
votes.precision = 2
# divide the data frame by 3 to get a repeating decimal so we can check precision
result = Pandas(votes).transform(dimx1_date_df / 3, [mock_dataset.fields.timestamp], [])
f_votes = f('votes')
expected = dimx1_date_df.copy()[[f_votes]]
expected[f_votes] = ['${0:,.2f}€'.format(x) for x in expected[f_votes] / 3]
expected.index.names = ['Timestamp']
expected.columns = ['Votes']
expected.columns.name = 'Metrics'
pandas.testing.assert_frame_equal(expected, result)
def test_nan_in_metrics(self):
cat_dim_df_with_nan = dimx1_str_df.copy()
cat_dim_df_with_nan['$wins'] = cat_dim_df_with_nan['$wins'].apply(float)
cat_dim_df_with_nan.iloc[2, 1] = np.nan
result = Pandas(mock_dataset.fields.wins).transform(
cat_dim_df_with_nan, [mock_dataset.fields.political_party], []
)
expected = cat_dim_df_with_nan.copy()[[f('wins')]]
expected.index = pd.Index(['Democrat', 'Independent', 'Republican'], name='Party')
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_inf_in_metrics(self):
cat_dim_df_with_nan = dimx1_str_df.copy()
cat_dim_df_with_nan['$wins'] = cat_dim_df_with_nan['$wins'].apply(float)
cat_dim_df_with_nan.iloc[2, 1] = np.inf
result = Pandas(mock_dataset.fields.wins).transform(
cat_dim_df_with_nan, [mock_dataset.fields.political_party], []
)
expected = cat_dim_df_with_nan.copy()[[f('wins')]]
expected.index = pd.Index(['Democrat', 'Independent', 'Republican'], name='Party')
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_neginf_in_metrics(self):
cat_dim_df_with_nan = dimx1_str_df.copy()
cat_dim_df_with_nan['$wins'] = cat_dim_df_with_nan['$wins'].apply(float)
cat_dim_df_with_nan.iloc[2, 1] = np.inf
result = Pandas(mock_dataset.fields.wins).transform(
cat_dim_df_with_nan, [mock_dataset.fields.political_party], []
)
expected = cat_dim_df_with_nan.copy()[[f('wins')]]
expected.index = pd.Index(['Democrat', 'Independent', 'Republican'], name='Party')
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_inf_in_metrics_with_precision_zero(self):
cat_dim_df_with_nan = dimx1_str_df.copy()
cat_dim_df_with_nan['$wins'] = cat_dim_df_with_nan['$wins'].apply(float)
cat_dim_df_with_nan.iloc[2, 1] = np.inf
mock_modified_dataset = copy.deepcopy(mock_dataset)
mock_modified_dataset.fields.wins.precision = 0
result = Pandas(mock_modified_dataset.fields.wins).transform(
cat_dim_df_with_nan, [mock_modified_dataset.fields.political_party], []
)
expected = cat_dim_df_with_nan.copy()[[f('wins')]]
expected.index = pd.Index(['Democrat', 'Independent', 'Republican'], name='Party')
expected['$wins'] = ['6', '0', 'Inf']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
pandas.testing.assert_frame_equal(expected, result)
class PandasTransformerSortTests(TestCase):
def test_metricx2_sort_index_asc(self):
result = Pandas(mock_dataset.fields.wins, sort=[0]).transform(
dimx1_date_df, [mock_dataset.fields.timestamp], []
)
expected = dimx1_date_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.sort_index()
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_metricx2_sort_index_desc(self):
result = Pandas(mock_dataset.fields.wins, sort=[0], ascending=[False]).transform(
dimx1_date_df, [mock_dataset.fields.timestamp], []
)
expected = dimx1_date_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.sort_index(ascending=False)
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_metricx2_sort_value_asc(self):
result = Pandas(mock_dataset.fields.wins, sort=[1]).transform(
dimx1_date_df, [mock_dataset.fields.timestamp], []
)
expected = dimx1_date_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.sort_values(['Wins'])
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_metricx2_sort_value_desc(self):
result = Pandas(mock_dataset.fields.wins, sort=[1], ascending=[False]).transform(
dimx1_date_df, [mock_dataset.fields.timestamp], []
)
expected = dimx1_date_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.sort_values(['Wins'], ascending=False)
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_metricx2_sort_index_and_value(self):
result = Pandas(mock_dataset.fields.wins, sort=[-0, 1]).transform(
dimx1_date_df, [mock_dataset.fields.timestamp], []
)
expected = dimx1_date_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = (
expected.reset_index().sort_values(['Timestamp', 'Wins'], ascending=[True, False]).set_index('Timestamp')
)
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_dimx2_date_str_with_sort_index_asc(self):
result = Pandas(mock_dataset.fields.votes, pivot=[mock_dataset.fields.political_party], sort=[0]).transform(
dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], []
)
expected = dimx2_date_str_df.copy()[[f('votes')]]
expected = expected.unstack(level=[1])
expected.index.names = ['Timestamp']
expected.columns = ['Democrat', 'Independent', 'Republican']
expected.columns.names = ['Party']
expected = expected.sort_index()
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_dimx2_date_str_with_sort_index_desc(self):
result = Pandas(
mock_dataset.fields.votes, pivot=[mock_dataset.fields.political_party], sort=[0], ascending=[False]
).transform(dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], [])
expected = dimx2_date_str_df.copy()[[f('votes')]]
expected = expected.unstack(level=[1])
expected.index.names = ['Timestamp']
expected.columns = ['Democrat', 'Independent', 'Republican']
expected.columns.names = ['Party']
expected = expected.sort_index(ascending=False)
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_dimx2_date_str_with_sort_first_metric_asc(self):
result = Pandas(mock_dataset.fields.votes, pivot=[mock_dataset.fields.political_party], sort=[1]).transform(
dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], []
)
expected = dimx2_date_str_df.copy()[[f('votes')]]
expected = expected.unstack(level=[1])
expected.index.names = ['Timestamp']
expected.columns = ['Democrat', 'Independent', 'Republican']
expected.columns.names = ['Party']
expected = expected.reset_index().sort_values(['Democrat']).set_index('Timestamp')
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_dimx2_date_str_with_sort_metric_desc(self):
result = Pandas(
mock_dataset.fields.votes, pivot=[mock_dataset.fields.political_party], sort=[1], ascending=[False]
).transform(dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], [])
expected = dimx2_date_str_df.copy()[[f('votes')]]
expected = expected.unstack(level=[1])
expected.index.names = ['Timestamp']
expected.columns = ['Democrat', 'Independent', 'Republican']
expected.columns.names = ['Party']
expected = expected.reset_index().sort_values(['Democrat'], ascending=False).set_index('Timestamp')
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_dimx2_date_str_with_sort_metric_asc(self):
result = Pandas(mock_dataset.fields.votes, pivot=[mock_dataset.fields.political_party], sort=[1]).transform(
dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], []
)
expected = dimx2_date_str_df.copy()[[f('votes')]]
expected = expected.unstack(level=[1])
expected.index.names = ['Timestamp']
expected.columns = ['Democrat', 'Independent', 'Republican']
expected.columns.names = ['Party']
expected = expected.reset_index().sort_values(['Democrat'], ascending=True).set_index('Timestamp')
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_dimx1_metricx2(self):
result = Pandas(
mock_dataset.fields.votes, mock_dataset.fields.wins, pivot=[mock_dataset.fields.timestamp]
).transform(dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], [])
expected = dimx2_date_str_df.copy()[[f('votes'), f('wins')]]
expected = expected.unstack(level=0)
expected.index.names = ['Party']
expected.columns = pd.MultiIndex.from_product(
[
['Votes', 'Wins'],
pd.DatetimeIndex(['1996-01-01', '2000-01-01', '2004-01-01', '2008-01-01', '2012-01-01', '2016-01-01']),
],
names=['Metrics', 'Timestamp'],
)
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_dimx2_date_str_with_sort_second_metric_desc(self):
result = Pandas(
mock_dataset.fields.votes, pivot=[mock_dataset.fields.political_party], sort=1, ascending=False
).transform(dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], [])
expected = dimx2_date_str_df.copy()[[f('votes')]]
expected = expected.unstack(level=[1])
expected.index.names = ['Timestamp']
expected.columns = ['Democrat', 'Independent', 'Republican']
expected.columns.names = ['Party']
expected = expected.reset_index().sort_values(['Democrat'], ascending=False).set_index('Timestamp')
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_dimx2_date_str_with_sort_index_and_columns(self):
result = Pandas(
mock_dataset.fields.votes, pivot=[mock_dataset.fields.political_party], sort=[0, 2], ascending=[True, False]
).transform(dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], [])
expected = dimx2_date_str_df.copy()[[f('votes')]]
expected = expected.unstack(level=[1])
expected.index.names = ['Timestamp']
expected.columns = ['Democrat', 'Independent', 'Republican']
expected.columns.names = ['Party']
expected = (
expected.reset_index()
.sort_values(['Timestamp', 'Democrat'], ascending=[True, False])
.set_index('Timestamp')
)
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_use_first_value_for_ascending_when_arg_has_invalid_length(self):
result = Pandas(
mock_dataset.fields.votes, pivot=[mock_dataset.fields.political_party], sort=[0, 2], ascending=[True]
).transform(dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], [])
expected = dimx2_date_str_df.copy()[[f('votes')]]
expected = expected.unstack(level=[1])
expected.index.names = ['Timestamp']
expected.columns = ['Democrat', 'Independent', 'Republican']
expected.columns.names = ['Party']
expected = expected.reset_index().sort_values(['Timestamp', 'Democrat'], ascending=True).set_index('Timestamp')
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_use_pandas_default_for_ascending_when_arg_empty_list(self):
result = Pandas(
mock_dataset.fields.votes, pivot=[mock_dataset.fields.political_party], sort=[0, 2], ascending=[]
).transform(dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], [])
expected = dimx2_date_str_df.copy()[[f('votes')]]
expected = expected.unstack(level=[1])
expected.index.names = ['Timestamp']
expected.columns = ['Democrat', 'Independent', 'Republican']
expected.columns.names = ['Party']
expected = expected.reset_index().sort_values(['Timestamp', 'Democrat'], ascending=None).set_index('Timestamp')
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_dimx2_date_str_sort_index_level_0_default_ascending(self):
result = Pandas(mock_dataset.fields.wins, sort=[0]).transform(
dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], []
)
expected = dimx2_date_str_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp', 'Party']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.reset_index().sort_values(['Timestamp']).set_index(['Timestamp', 'Party'])
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_dimx2_date_str_sort_index_level_0_asc(self):
result = Pandas(mock_dataset.fields.wins, sort=[0], ascending=True).transform(
dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], []
)
expected = dimx2_date_str_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp', 'Party']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.reset_index().sort_values(['Timestamp'], ascending=True).set_index(['Timestamp', 'Party'])
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_dimx2_date_str_sort_index_level_1_desc(self):
result = Pandas(mock_dataset.fields.wins, sort=[1], ascending=[False]).transform(
dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], []
)
expected = dimx2_date_str_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp', 'Party']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.reset_index().sort_values(['Party'], ascending=[False]).set_index(['Timestamp', 'Party'])
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_dimx2_date_str_sort_index_and_values(self):
result = Pandas(mock_dataset.fields.wins, sort=[0, 2], ascending=[False, True]).transform(
dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], []
)
expected = dimx2_date_str_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp', 'Party']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = (
expected.reset_index()
.sort_values(['Timestamp', 'Wins'], ascending=[False, True])
.set_index(['Timestamp', 'Party'])
)
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_empty_sort_array_is_ignored(self):
result = Pandas(mock_dataset.fields.wins, sort=[]).transform(dimx1_date_df, [mock_dataset.fields.timestamp], [])
expected = dimx1_date_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_sort_value_greater_than_number_of_columns_is_ignored(self):
result = Pandas(mock_dataset.fields.wins, sort=[5]).transform(
dimx1_date_df, [mock_dataset.fields.timestamp], []
)
expected = dimx1_date_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_sort_with_no_index(self):
result = Pandas(mock_dataset.fields.wins, sort=[0]).transform(no_index_df, [mock_dataset.fields.timestamp], [])
expected = no_index_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_df_transformation_formats_totals_correctly(self):
test_table = Table('test')
ds = DataSet(
table=test_table,
database=test_database,
fields=[
Field('date', label='Date', definition=test_table.date, data_type=DataType.date),
Field('locale', label='Locale', definition=test_table.locale, data_type=DataType.text),
Field('company', label='Company', definition=test_table.text, data_type=DataType.text),
Field('metric1', label='Metric1', definition=Sum(test_table.number), data_type=DataType.number),
Field('metric2', label='Metric2', definition=Sum(test_table.number), data_type=DataType.number),
],
)
df = pd.DataFrame.from_dict(
{
'$metric1': {('~~totals', '~~totals'): 3, ('za', '~~totals'): 3, ('za', 'C1'): 2, ('za', 'C2'): 1},
'$metric2': {('~~totals', '~~totals'): 4, ('za', '~~totals'): 4, ('za', 'C1'): 2, ('za', 'C2'): 2},
}
)
df.index.names = [f(ds.fields.locale.alias), f(ds.fields.company.alias)]
result = Pandas(ds.fields.metric1, ds.fields.metric2, pivot=[ds.fields.company]).transform(
df, [Rollup(ds.fields.locale), Rollup(ds.fields.company)], [], use_raw_values=True
)
self.assertEqual(['Metrics', 'Company'], list(result.columns.names))
self.assertEqual(
[
('Metric1', 'C1'),
('Metric1', 'C2'),
('Metric1', 'Totals'),
('Metric2', 'C1'),
('Metric2', 'C2'),
('Metric2', 'Totals'),
],
result.columns.values.tolist(),
)
self.assertEqual(['Locale'], list(result.index.names))
self.assertEqual(['za', 'Totals'], result.index.values.tolist())
self.assertEqual([['2', '1', '3', '2', '2', '4'], ['', '', '3', '', '', '4']], result.values.tolist())
| apache-2.0 |
JT5D/scikit-learn | sklearn/tests/test_kernel_approximation.py | 6 | 5945 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
"""test that AdditiveChi2Sampler approximates kernel on random data"""
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
def test_skewed_chi2_sampler():
"""test that RBFSampler approximates kernel on random data"""
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
"""test that RBFSampler approximates kernel on random data"""
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
def test_input_validation():
"""Regression test: kernel approx. transformers should work on lists
No assertions; the old versions would simply crash
"""
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_poly_kernel_params():
"""Non-regression: Nystroem should pass other parameters beside gamma."""
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
"""Test Nystroem on a callable."""
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
HopkinsIDD/EpiForecastStatMech | epi_forecast_stat_mech/evaluation/plot_predictions.py | 1 | 10761 | # Lint as: python3
"""Plot helpers for predictions from high_level.Estimator.
"""
from epi_forecast_stat_mech.evaluation.plot_constants import model_colors
from epi_forecast_stat_mech.evaluation.plot_constants import model_types
from matplotlib import pyplot as plt
import numpy as np
def plot_rollout_samples(predictions, model_to_plot, location_to_plot):
"""Helper function to plot the mean predicted value and all rollout samples.
A helper function that plots the mean predicted number of new infections
as a function of time, as well as plotting all of the different rollouts.
Args:
predictions: an xr.DataArray representing predicted new_infections with
dimensions of (location, time, sample, model). Where model is the
Estimator used to generate the predicted new_infections.
model_to_plot: The coordinate name of the model that you want to plot.
location_to_plot: The coordinate integer of the location that you want to
plot.
Returns:
None
"""
pred = predictions.sel(model=model_to_plot).dropna('time')
mean = pred.isel(
location=location_to_plot).mean('sample').rename('infection_mean')
color_params = model_colors(model_to_plot)
# is there a better way to do this?
for i in pred.sample:
plt.plot(
pred.time,
pred.isel({
'location': location_to_plot,
'sample': i
}),
alpha=0.1,
label='_nolegend_',
**color_params)
plt.plot(pred.time, mean, markersize=2, label=model_to_plot, **color_params)
return None
def plot_std_dev(predictions, model_to_plot, location_to_plot, num_stddevs=3):
"""Helper function to plot the mean predicted value and shade the error.
A helper function that plots the mean predicted number of new infections
as a function of time, as well as shades the area +/- num_stddevs around the
mean.
Args:
predictions: an xr.DataArray representing predicted new_infections with
dimensions of (location, time, sample, model). Where model is the
Estimator used to generate the predicted new_infections.
model_to_plot: The coordinate name of the model that you want to plot.
location_to_plot: The coordinate integer of the location that you want to
plot.
num_stddevs: an int representing the number of standard deviations to shade.
Defaults to 3.
Returns:
None
"""
pred = predictions.sel({
'model': model_to_plot
}, drop=True).isel(
location=location_to_plot, drop=True).dropna('time')
mean = pred.mean('sample')
stddev = pred.std('sample')
upper = mean + num_stddevs * stddev
lower = mean - num_stddevs * stddev
color_params = model_colors(model_to_plot)
plt.fill_between(pred.time.data, upper.data, lower.data, alpha=.2,
label='_nolegend_', color=color_params['color'])
plt.plot(pred.time, mean, **color_params, label=model_to_plot)
return None
def plot_observed_data(data_inf, predictions, location_to_plot):
"""Helper function to plot the observed data at a location.
Args:
data_inf: an xr.DataArray representing the *true* new_infections with
dimensions of (location, time).
predictions: an xr.DataArray representing predicted new_infections with
dimensions of (location, time, sample, model). Where model is the
Estimator used to generate the predicted new_infections.
location_to_plot: The coordinate integer of the location that you want to
plot.
Returns:
None
"""
data_to_plot = data_inf.isel(location=location_to_plot)
observed_color_params = model_colors('observed')
max_observed_time = min(predictions.dropna('time').time)
plt.plot(
data_to_plot.coords['time'].sel(time=(data_inf.time < max_observed_time)),
data_to_plot.sel(time=(data_inf.time < max_observed_time)),
**observed_color_params,
label='observed')
return None
def plot_ground_truth_data(data_inf, predictions, location_to_plot):
"""Helper function to plot the ground truth data at a location.
Args:
data_inf: an xr.DataArray representing the *true* new_infections with
dimensions of (location, time).
predictions: an xr.DataArray representing predicted new_infections with
dimensions of (location, time, sample, model). Where model is the
Estimator used to generate the predicted new_infections.
location_to_plot: The coordinate integer of the location that you want to
plot.
Returns:
None
"""
data_to_plot = data_inf.isel(location=location_to_plot)
ground_truth_color_params = model_colors('ground_truth')
plt.plot(
predictions.time,
data_to_plot.sel(time=predictions.time),
**ground_truth_color_params,
label='ground truth')
return None
def plot_one_model_predictions(data_inf,
predictions,
model_to_plot,
location_to_plot,
plot_pred_function=plot_rollout_samples,
plot_ground_truth=False):
"""Plot the data and predicted mean for a single model and location.
Args:
data_inf: an xr.DataArray representing the *true* new_infections with
dimensions of (location, time).
predictions: an xr.DataArray representing predicted new_infections with
dimensions of (location, time, sample, model). Where model is the
Estimator used to generate the predicted new_infections.
model_to_plot: The coordinate name of the model that you want to plot.
location_to_plot: The coordinate integer of the location that you want to
plot.
plot_pred_function: a function to plot the predicted mean values and some
errors.
plot_ground_truth: a boolean indicating whether to plot the ground truth
new_infection values.
Returns:
None
"""
plt.figure(figsize=(12, 8))
plot_pred_function(predictions, model_to_plot, location_to_plot)
plot_observed_data(data_inf, predictions, location_to_plot)
if plot_ground_truth:
plot_ground_truth_data(data_inf, predictions, location_to_plot)
plt.xlabel('Time')
plt.ylabel('New infections')
plt.legend()
plt.show()
return None
def plot_many_model_predictions(data_inf,
predictions,
model_type_to_plot,
location_to_plot,
plot_pred_function=plot_rollout_samples,
plot_ground_truth=False):
"""Plot the data and predicted means for a type of model in one location.
Args:
data_inf: an xr.DataArray representing the *true* new_infections with
dimensions of (location, time).
predictions: an xr.DataArray representing predicted new_infections with
dimensions of (location, time, sample, model). Where model is the
Estimator used to generate the predicted new_infections.
model_type_to_plot: The name of the model_type that you want to plot. Must
be in plot_constants.model_type.keys().
location_to_plot: The coordinate integer of the location that you want to
plot.
plot_pred_function: a function to plot the predicted mean values and some
errors.
plot_ground_truth: a boolean indicating whether to plot the ground truth
new_infection values.
Returns:
None
"""
plt.figure(figsize=(16, 8))
for model_name in model_types[model_type_to_plot]:
plot_pred_function(predictions, model_name, location_to_plot)
plot_observed_data(data_inf, predictions, location_to_plot)
if plot_ground_truth:
plot_ground_truth_data(data_inf, predictions, location_to_plot)
plt.xlabel('Time')
plt.ylabel('New Infections')
plt.legend()
plt.show()
def plot_violin(ax, error_array, models_to_plot):
"""Make a violin plot of one error metric of multiple models on one dataset.
Args:
ax: A pyplot.axis object to draw on.
error_array: An xr.DataArray representing the calculated errors of a given
metric with dimensions of (location, time, model, value_type).
models_to_plot: A list of strings representing the names of the models to
plot. Must be elements of error_array.model.values().
Returns:
None
"""
e = []
for model in models_to_plot:
mean_error = error_array.sel(model=model).mean('sample')
mean_diff = mean_error.sel(value_type='difference')
e.append(mean_diff)
ax.violinplot(e, showextrema=True, showmeans=False)
ax.set_ylabel('error, in raw counts', labelpad=None)
ax.axhline(0, c='k')
ax.set_xticks(np.arange(1, len(models_to_plot) + 1))
ax.set_xticklabels(models_to_plot, rotation=15)
ax.set_xlim(0.25, len(models_to_plot) + 0.75)
ax.set_xlabel('Model')
def plot_scatter(ax, error_array, models_to_plot):
"""Make a scatter plot of real/pred metric of multiple models on one dataset.
Args:
ax: A pyplot.axis object to draw on.
error_array: An xr.DataArray representing the calculated errors of a given
metric, with dimensions of (location, time, model, value_type).
models_to_plot: A list of strings representing the names of the models to
plot. Must be elements of error_array.model.values().
Returns:
None
"""
for model in models_to_plot:
mean_error = error_array.sel(model=model).mean('sample')
ax.scatter(mean_error.sel(value_type='ground_truth'),
mean_error.sel(value_type='predicted'),
label=model, s=2, alpha=0.75, c=model_colors(model)['color'])
ax.set_aspect('equal')
ax.set_ylim(ax.get_xlim())
ax.set_ylabel('Predicted Value', labelpad=None)
ax.set_xlabel('True Value', labelpad=None)
ax.plot(ax.get_xlim(), ax.get_xlim(), 'k--')
ax.legend()
def plot_violin_scatter(error_array, models_to_plot, metrics_to_plot):
"""Make a violin and scatter plot of metrics of multiple models on a dataset.
Args:
error_array: An xr.DataArray representing the calculated errors with
with dimensions of (location, time, model, metric, value_type).
models_to_plot: A list of strings representing the names of the models to
plot. Must be elements of error_array.model.values().
metrics_to_plot: A list of strings representing the names of the error
metrics to plot. Must be elements of error_array.metric.values().
Returns:
None
"""
fig, ax = plt.subplots(2, len(metrics_to_plot),
figsize=(5*len(metrics_to_plot), 15))
for i, metric in enumerate(metrics_to_plot):
plot_violin(ax[0][i], error_array.sel(metric=metric),
models_to_plot)
ax[0][i].set_title(metric)
plot_scatter(ax[1][i], error_array.sel(metric=metric), models_to_plot)
ax[1][i].set_title(metric)
plt.show()
| gpl-3.0 |
yunfeilu/scikit-learn | sklearn/linear_model/tests/test_sag.py | 93 | 25649 | # Authors: Danny Sullivan <[email protected]>
# Tom Dupre la Tour <[email protected]>
#
# Licence: BSD 3 clause
import math
import numpy as np
import scipy.sparse as sp
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import get_max_squared_sum
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import compute_class_weight
from sklearn.preprocessing import LabelEncoder
from sklearn.datasets import make_blobs
from sklearn.base import clone
# this is used for sag classification
def log_dloss(p, y):
z = p * y
# approximately equal and saves the computation of the log
if z > 18.0:
return math.exp(-z) * -y
if z < -18.0:
return -y
return -y / (math.exp(z) + 1.0)
def log_loss(p, y):
return np.mean(np.log(1. + np.exp(-y * p)))
# this is used for sag regression
def squared_dloss(p, y):
return p - y
def squared_loss(p, y):
return np.mean(0.5 * (p - y) * (p - y))
# function for measuring the log loss
def get_pobj(w, alpha, myX, myy, loss):
w = w.ravel()
pred = np.dot(myX, w)
p = loss(pred, myy)
p += alpha * w.dot(w) / 2.
return p
def sag(X, y, step_size, alpha, n_iter=1, dloss=None, sparse=False,
sample_weight=None, fit_intercept=True):
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(X.shape[1])
sum_gradient = np.zeros(X.shape[1])
gradient_memory = np.zeros((n_samples, n_features))
intercept = 0.0
intercept_sum_gradient = 0.0
intercept_gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
decay = 1.0
seen = set()
# sparse data has a fixed decay of .01
if sparse:
decay = .01
for epoch in range(n_iter):
for k in range(n_samples):
idx = int(rng.rand(1) * n_samples)
# idx = k
entry = X[idx]
seen.add(idx)
p = np.dot(entry, weights) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient + alpha * weights
sum_gradient += update - gradient_memory[idx]
gradient_memory[idx] = update
if fit_intercept:
intercept_sum_gradient += (gradient -
intercept_gradient_memory[idx])
intercept_gradient_memory[idx] = gradient
intercept -= (step_size * intercept_sum_gradient
/ len(seen) * decay)
weights -= step_size * sum_gradient / len(seen)
return weights, intercept
def sag_sparse(X, y, step_size, alpha, n_iter=1,
dloss=None, sample_weight=None, sparse=False,
fit_intercept=True):
if step_size * alpha == 1.:
raise ZeroDivisionError("Sparse sag does not handle the case "
"step_size * alpha == 1")
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(n_features)
sum_gradient = np.zeros(n_features)
last_updated = np.zeros(n_features, dtype=np.int)
gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
intercept = 0.0
intercept_sum_gradient = 0.0
wscale = 1.0
decay = 1.0
seen = set()
c_sum = np.zeros(n_iter * n_samples)
# sparse data has a fixed decay of .01
if sparse:
decay = .01
counter = 0
for epoch in range(n_iter):
for k in range(n_samples):
# idx = k
idx = int(rng.rand(1) * n_samples)
entry = X[idx]
seen.add(idx)
if counter >= 1:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter
p = (wscale * np.dot(entry, weights)) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient
sum_gradient += update - (gradient_memory[idx] * entry)
if fit_intercept:
intercept_sum_gradient += gradient - gradient_memory[idx]
intercept -= (step_size * intercept_sum_gradient
/ len(seen) * decay)
gradient_memory[idx] = gradient
wscale *= (1.0 - alpha * step_size)
if counter == 0:
c_sum[0] = step_size / (wscale * len(seen))
else:
c_sum[counter] = (c_sum[counter - 1] +
step_size / (wscale * len(seen)))
if counter >= 1 and wscale < 1e-9:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter + 1
c_sum[counter] = 0
weights *= wscale
wscale = 1.0
counter += 1
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
weights *= wscale
return weights, intercept
def get_step_size(X, alpha, fit_intercept, classification=True):
if classification:
return (4.0 / (np.max(np.sum(X * X, axis=1))
+ fit_intercept + 4.0 * alpha))
else:
return 1.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + alpha)
@ignore_warnings
def test_classifier_matching():
n_samples = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
y[y == 0] = -1
alpha = 1.1
n_iter = 80
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept)
clf = LogisticRegression(solver="sag", fit_intercept=fit_intercept,
tol=1e-11, C=1. / alpha / n_samples,
max_iter=n_iter, random_state=10)
clf.fit(X, y)
weights, intercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
weights = np.atleast_2d(weights)
intercept = np.atleast_1d(intercept)
weights2 = np.atleast_2d(weights2)
intercept2 = np.atleast_1d(intercept2)
assert_array_almost_equal(weights, clf.coef_, decimal=10)
assert_array_almost_equal(intercept, clf.intercept_, decimal=10)
assert_array_almost_equal(weights2, clf.coef_, decimal=10)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=10)
@ignore_warnings
def test_regressor_matching():
n_samples = 10
n_features = 5
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
alpha = 1.
n_iter = 100
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha * n_samples, max_iter=n_iter)
clf.fit(X, y)
weights1, intercept1 = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
assert_array_almost_equal(weights1, clf.coef_, decimal=10)
assert_array_almost_equal(intercept1, clf.intercept_, decimal=10)
assert_array_almost_equal(weights2, clf.coef_, decimal=10)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=10)
@ignore_warnings
def test_sag_pobj_matches_logistic_regression():
"""tests if the sag pobj matches log reg"""
n_samples = 100
alpha = 1.0
max_iter = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
clf1 = LogisticRegression(solver='sag', fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10)
clf2 = clone(clf1)
clf3 = LogisticRegression(fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, log_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, log_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, log_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj2, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj1, decimal=4)
@ignore_warnings
def test_sag_pobj_matches_ridge_regression():
"""tests if the sag pobj matches ridge reg"""
n_samples = 100
n_features = 10
alpha = 1.0
n_iter = 100
fit_intercept = False
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
clf1 = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha, max_iter=n_iter, random_state=42)
clf2 = clone(clf1)
clf3 = Ridge(fit_intercept=fit_intercept, tol=.00001, solver='lsqr',
alpha=alpha, max_iter=n_iter, random_state=42)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, squared_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, squared_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, squared_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj1, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj2, decimal=4)
@ignore_warnings
def test_sag_regressor_computed_correctly():
"""tests if the sag regressor is computed correctly"""
alpha = .1
n_features = 10
n_samples = 40
max_iter = 50
tol = .000001
fit_intercept = True
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w) + 2.
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf1 = Ridge(fit_intercept=fit_intercept, tol=tol, solver='sag',
alpha=alpha * n_samples, max_iter=max_iter)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights1, spintercept1 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights1.ravel(),
decimal=3)
assert_almost_equal(clf1.intercept_, spintercept1, decimal=1)
# TODO: uncomment when sparse Ridge with intercept will be fixed (#4710)
#assert_array_almost_equal(clf2.coef_.ravel(),
# spweights2.ravel(),
# decimal=3)
#assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)'''
@ignore_warnings
def test_get_auto_step_size():
X = np.array([[1, 2, 3], [2, 3, 4], [2, 3, 2]], dtype=np.float64)
alpha = 1.2
fit_intercept = False
# sum the squares of the second sample because that's the largest
max_squared_sum = 4 + 9 + 16
max_squared_sum_ = get_max_squared_sum(X)
assert_almost_equal(max_squared_sum, max_squared_sum_, decimal=4)
for fit_intercept in (True, False):
step_size_sqr = 1.0 / (max_squared_sum + alpha + int(fit_intercept))
step_size_log = 4.0 / (max_squared_sum + 4.0 * alpha +
int(fit_intercept))
step_size_sqr_ = get_auto_step_size(max_squared_sum_, alpha, "squared",
fit_intercept)
step_size_log_ = get_auto_step_size(max_squared_sum_, alpha, "log",
fit_intercept)
assert_almost_equal(step_size_sqr, step_size_sqr_, decimal=4)
assert_almost_equal(step_size_log, step_size_log_, decimal=4)
msg = 'Unknown loss function for SAG solver, got wrong instead of'
assert_raise_message(ValueError, msg, get_auto_step_size,
max_squared_sum_, alpha, "wrong", fit_intercept)
def test_get_max_squared_sum():
n_samples = 100
n_features = 10
rng = np.random.RandomState(42)
X = rng.randn(n_samples, n_features).astype(np.float64)
mask = rng.randn(n_samples, n_features)
X[mask > 0] = 0.
X_csr = sp.csr_matrix(X)
X[0, 3] = 0.
X_csr[0, 3] = 0.
sum_X = get_max_squared_sum(X)
sum_X_csr = get_max_squared_sum(X_csr)
assert_almost_equal(sum_X, sum_X_csr)
@ignore_warnings
def test_sag_regressor():
"""tests if the sag regressor performs well"""
xmin, xmax = -5, 5
n_samples = 20
tol = .001
max_iter = 20
alpha = 0.1
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
assert_greater(score1, 0.99)
assert_greater(score2, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
score2 = clf2.score(X, y)
assert_greater(score1, 0.5)
assert_greater(score2, 0.5)
@ignore_warnings
def test_sag_classifier_computed_correctly():
"""tests if the binary classifier is computed correctly"""
alpha = .1
n_samples = 50
n_iter = 50
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@ignore_warnings
def test_sag_multiclass_computed_correctly():
"""tests if the multiclass classifier is computed correctly"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 40
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
sparse=True,
fit_intercept=fit_intercept)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
@ignore_warnings
def test_classifier_results():
"""tests if classifier results match target"""
alpha = .1
n_features = 20
n_samples = 10
tol = .01
max_iter = 200
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w)
y = np.sign(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
pred1 = clf1.predict(X)
pred2 = clf2.predict(X)
assert_almost_equal(pred1, y, decimal=12)
assert_almost_equal(pred2, y, decimal=12)
@ignore_warnings
def test_binary_classifier_class_weight():
"""tests binary classifier with classweights for each class"""
alpha = .1
n_samples = 50
n_iter = 20
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=10,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
class_weight = {1: .45, -1: .55}
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, np.unique(y), y)
sample_weight = class_weight_[le.fit_transform(y)]
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@ignore_warnings
def test_multiclass_classifier_class_weight():
"""tests multiclass with classweights for each class"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 50
class_weight = {0: .45, 1: .55, 2: .75}
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, np.unique(y), y)
sample_weight = class_weight_[le.fit_transform(y)]
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight,
sparse=True)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
def test_classifier_single_class():
"""tests if ValueError is thrown with only one class"""
X = [[1, 2], [3, 4]]
y = [1, 1]
assert_raise_message(ValueError,
"This solver needs samples of at least 2 classes "
"in the data",
LogisticRegression(solver='sag').fit,
X, y)
def test_step_size_alpha_error():
X = [[0, 0], [0, 0]]
y = [1, -1]
fit_intercept = False
alpha = 1.
msg = ("Current sag implementation does not handle the case"
" step_size * alpha_scaled == 1")
clf1 = LogisticRegression(solver='sag', C=1. / alpha,
fit_intercept=fit_intercept)
assert_raise_message(ZeroDivisionError, msg, clf1.fit, X, y)
clf2 = Ridge(fit_intercept=fit_intercept, solver='sag', alpha=alpha)
assert_raise_message(ZeroDivisionError, msg, clf2.fit, X, y)
| bsd-3-clause |
smarisa/neronet | doc/material/theano/t01__classifying_gaussian_clusters.py | 2 | 15151 | # Toy example
#
# Source: http://nbviewer.ipython.org/github/craffel/theano-tutorial/blob/master/Theano%20Tutorial.ipynb
#
# We'll train our neural network to classify two Gaussian-distributed clusters
# in 2d space.
#
# Defining a multilayer perceptron is out of the scope of this tutorial;
# please see here for background information: http://en.wikipedia.org/wiki/Multilayer_perceptron
# We will be using the convention that datapoints are column vectors.
#
### Imports
import numpy as np
import matplotlib.pyplot as plt
import theano
# By convention, the tensor submodule is loaded as T
import theano.tensor as T
### Layer class
# We'll be defining our multilayer perceptron as a series of "layers", each
# applied successively to the input to produce the network output. Each layer
# is defined as a class, which stores a weight matrix and a bias vector and
# includes a function for computing the layer's output.
# Note that if we weren't using Theano, we might expect the output method to
# take in a vector and return the layer's activation in response to this
# input. However, with Theano, the output function is instead meant to be used
# to create (using theano.function) a function which can take in a vector and
# return the layer's activation. So, if you were to pass, say, a np.ndarray to
# the Layer class's output function, you'd get an error. Instead, we'll
# construct a function for actually computing the Layer's activation outside
# of the class itself.
class Layer(object):
def __init__(self, W_init, b_init, activation):
'''
A layer of a neural network, computes s(Wx + b) where s is a nonlinearity and x is the input vector.
:parameters:
- W_init : np.ndarray, shape=(n_output, n_input)
Values to initialize the weight matrix to.
- b_init : np.ndarray, shape=(n_output,)
Values to initialize the bias vector
- activation : theano.tensor.elemwise.Elemwise
Activation function for layer output
'''
# Retrieve the input and output dimensionality based on W's initialization
n_output, n_input = W_init.shape
# Make sure b is n_output in size
assert b_init.shape == (n_output,)
# All parameters should be shared variables.
# They're used in this class to compute the layer output,
# but are updated elsewhere when optimizing the network parameters.
# Note that we are explicitly requiring that W_init has the theano.config.floatX dtype
self.W = theano.shared(value=W_init.astype(theano.config.floatX),
# The name parameter is solely for printing purporses
name='W',
# Setting borrow=True allows Theano to use user memory for this object.
# It can make code slightly faster by avoiding a deep copy on construction.
# For more details, see
# http://deeplearning.net/software/theano/tutorial/aliasing.html
borrow=True)
# We can force our bias vector b to be a column vector using numpy's reshape method.
# When b is a column vector, we can pass a matrix-shaped input to the layer
# and get a matrix-shaped output, thanks to broadcasting (described below)
self.b = theano.shared(value=b_init.reshape(n_output, 1).astype(theano.config.floatX),
name='b',
borrow=True,
# Theano allows for broadcasting, similar to numpy.
# However, you need to explicitly denote which axes can be broadcasted.
# By setting broadcastable=(False, True), we are denoting that b
# can be broadcast (copied) along its second dimension in order to be
# added to another variable. For more information, see
# http://deeplearning.net/software/theano/library/tensor/basic.html
broadcastable=(False, True))
self.activation = activation
# We'll compute the gradient of the cost of the network with respect to the parameters in this list.
self.params = [self.W, self.b]
def output(self, x):
'''
Compute this layer's output given an input
:parameters:
- x : theano.tensor.var.TensorVariable
Theano symbolic variable for layer input
:returns:
- output : theano.tensor.var.TensorVariable
Mixed, biased, and activated x
'''
# Compute linear mix
lin_output = T.dot(self.W, x) + self.b
# Output is just linear mix if no activation function
# Otherwise, apply the activation function
return (lin_output if self.activation is None else self.activation(lin_output))
### MLP class
# Most of the functionality of our MLP is contained in the Layer class; the
# MLP class is essentially just a container for a list of Layers and their
# parameters. The output function simply recursively computes the output for
# each layer. Finally, the squared_error returns the squared Euclidean
# distance between the output of the network given an input and the desired
# (ground truth) output. This function is meant to be used as a cost in the
# setting of minimizing cost over some training data. As above, the output and
# squared error functions are not to be used for actually computing values;
# instead, they're to be used to create functions which are used to compute
# values.
class MLP(object):
def __init__(self, W_init, b_init, activations):
'''
Multi-layer perceptron class, computes the composition of a sequence of Layers
:parameters:
- W_init : list of np.ndarray, len=N
Values to initialize the weight matrix in each layer to.
The layer sizes will be inferred from the shape of each matrix in W_init
- b_init : list of np.ndarray, len=N
Values to initialize the bias vector in each layer to
- activations : list of theano.tensor.elemwise.Elemwise, len=N
Activation function for layer output for each layer
'''
# Make sure the input lists are all of the same length
assert len(W_init) == len(b_init) == len(activations)
# Initialize lists of layers
self.layers = []
# Construct the layers
for W, b, activation in zip(W_init, b_init, activations):
self.layers.append(Layer(W, b, activation))
# Combine parameters from all layers
self.params = []
for layer in self.layers:
self.params += layer.params
def output(self, x):
'''
Compute the MLP's output given an input
:parameters:
- x : theano.tensor.var.TensorVariable
Theano symbolic variable for network input
:returns:
- output : theano.tensor.var.TensorVariable
x passed through the MLP
'''
# Recursively compute output
for layer in self.layers:
x = layer.output(x)
return x
def squared_error(self, x, y):
'''
Compute the squared euclidean error of the network output against the "true" output y
:parameters:
- x : theano.tensor.var.TensorVariable
Theano symbolic variable for network input
- y : theano.tensor.var.TensorVariable
Theano symbolic variable for desired network output
:returns:
- error : theano.tensor.var.TensorVariable
The squared Euclidian distance between the network output and y
'''
return T.sum((self.output(x) - y)**2)
### Gradient descent
# To train the network, we will minimize the cost (squared Euclidean distance
# of network output vs. ground-truth) over a training set using gradient
# descent. When doing gradient descent on neural nets, it's very common to use
# momentum, which is simply a leaky integrator on the parameter update. That
# is, when updating parameters, a linear mix of the current gradient update
# and the previous gradient update is computed. This tends to make the network
# converge more quickly on a good solution and can help avoid local minima in
# the cost function. With traditional gradient descent, we are guaranteed to
# decrease the cost at each iteration. When we use momentum, we lose this
# guarantee, but this is generally seen as a small price to pay for the
# improvement momentum usually gives.
# In Theano, we store the previous parameter update as a shared variable so
# that its value is preserved across iterations. Then, during the gradient
# update, we not only update the parameters, but we also update the previous
# parameter update shared variable.
def gradient_updates_momentum(cost, params, learning_rate, momentum):
'''
Compute updates for gradient descent with momentum
:parameters:
- cost : theano.tensor.var.TensorVariable
Theano cost function to minimize
- params : list of theano.tensor.var.TensorVariable
Parameters to compute gradient against
- learning_rate : float
Gradient descent learning rate
- momentum : float
Momentum parameter, should be at least 0 (standard gradient descent) and less than 1
:returns:
updates : list
List of updates, one for each parameter
'''
# Make sure momentum is a sane value
assert momentum < 1 and momentum >= 0
# List of update steps for each parameter
updates = []
# Just gradient descent on cost
for param in params:
# For each parameter, we'll create a param_update shared variable.
# This variable will keep track of the parameter's update step across iterations.
# We initialize it to 0
param_update = theano.shared(param.get_value()*0., broadcastable=param.broadcastable)
# Each parameter is updated by taking a step in the direction of the gradient.
# However, we also "mix in" the previous step according to the given momentum value.
# Note that when updating param_update, we are using its old value and also the new gradient step.
updates.append((param, param - learning_rate*param_update))
# Note that we don't need to derive backpropagation to compute updates - just use T.grad!
updates.append((param_update, momentum*param_update + (1. - momentum)*T.grad(cost, param)))
return updates
### Toy example
# We'll train our neural network to classify two Gaussian-distributed clusters
# in 2d space.
## Generation of training data
# Two randomly-generated Gaussian-distributed clouds of points in 2d space
np.random.seed(0)
# Number of points
N = 1000
# Labels for each cluster
y = np.random.random_integers(0, 1, N)
# Mean of each cluster
means = np.array([[-1, 1], [-1, 1]])
# Covariance (in X and Y direction) of each cluster
covariances = np.random.random_sample((2, 2)) + 1
# Dimensions of each point
X = np.vstack([np.random.randn(N)*covariances[0, y] + means[0, y],
np.random.randn(N)*covariances[1, y] + means[1, y]]).astype(theano.config.floatX)
# Convert to targets, as floatX
y = y.astype(theano.config.floatX)
# Plot the data
plt.figure(figsize=(8, 8))
plt.scatter(X[0, :], X[1, :], c=y, lw=.3, s=3, cmap=plt.cm.cool)
plt.axis([-6, 6, -6, 6])
plt.savefig('data.png', transparent=False)
# First, set the size of each layer (and the number of layers)
# Input layer size is training data dimensionality (2)
# Output size is just 1-d: class label - 0 or 1
# Finally, let the hidden layers be twice the size of the input.
# If we wanted more layers, we could just add another layer size to this list.
layer_sizes = [X.shape[0], X.shape[0]*2, 1]
# Set initial parameter values
W_init = []
b_init = []
activations = []
for n_input, n_output in zip(layer_sizes[:-1], layer_sizes[1:]):
# Getting the correct initialization matters a lot for non-toy problems.
# However, here we can just use the following initialization with success:
# Normally distribute initial weights
W_init.append(np.random.randn(n_output, n_input))
# Set initial biases to 1
b_init.append(np.ones(n_output))
# We'll use sigmoid activation for all layers
# Note that this doesn't make a ton of sense when using squared distance
# because the sigmoid function is bounded on [0, 1].
activations.append(T.nnet.sigmoid)
# Create an instance of the MLP class
mlp = MLP(W_init, b_init, activations)
# Create Theano variables for the MLP input
mlp_input = T.matrix('mlp_input')
# ... and the desired output
mlp_target = T.vector('mlp_target')
# Learning rate and momentum hyperparameter values
# Again, for non-toy problems these values can make a big difference
# as to whether the network (quickly) converges on a good local minimum.
learning_rate = 0.01
momentum = 0.9
# Create a function for computing the cost of the network given an input
cost = mlp.squared_error(mlp_input, mlp_target)
# Create a theano function for training the network
train = theano.function([mlp_input, mlp_target], cost,
updates=gradient_updates_momentum(cost, mlp.params, learning_rate, momentum))
# Create a theano function for computing the MLP's output given some input
mlp_output = theano.function([mlp_input], mlp.output(mlp_input))
# Keep track of the number of training iterations performed
iteration = 0
# We'll only train the network with 20 iterations.
# A more common technique is to use a hold-out validation set.
# When the validation error starts to increase, the network is overfitting,
# so we stop training the net. This is called "early stopping", which we won't do here.
max_iteration = 20
while iteration < max_iteration:
# Train the network using the entire training set.
# With large datasets, it's much more common to use stochastic or mini-batch gradient descent
# where only a subset (or a single point) of the training set is used at each iteration.
# This can also help the network to avoid local minima.
current_cost = train(X, y)
# Get the current network output for all points in the training set
current_output = mlp_output(X)
# We can compute the accuracy by thresholding the output
# and computing the proportion of points whose class match the ground truth class.
accuracy = np.mean((current_output > .5) == y)
iteration += 1
# Plot network output after this iteration
plt.figure(figsize=(8, 8))
plt.scatter(X[0, :], X[1, :], c=current_output,
lw=.3, s=3, cmap=plt.cm.cool, vmin=0, vmax=1)
plt.axis([-6, 6, -6, 6])
plt.title('Cost: {:.3f}, Accuracy: {:.3f}'.format(float(current_cost), accuracy))
plt.savefig('iter%02d.png' % (iteration), transparent=False)
| mit |
sunset1995/HMM | examples/mnist/data_proc.py | 1 | 108931 | import os
import struct
import numpy as np
from array import array
def read_mnist():
def mnist_file_name(filename):
return os.path.join(os.path.dirname(__file__), 'datas', filename)
# Read train set labels into numpy (shapes: 60000 x 1)
with open(mnist_file_name('train-labels-idx1-ubyte'), 'rb') as f:
magic, n = struct.unpack('>II', f.read(8))
train_labels = np.array(array('B', f.read()), dtype=np.uint8)
# Read train set images into numpy (shapes: 60000 x 784)
with open(mnist_file_name('train-images-idx3-ubyte'), 'rb') as f:
magic, n, row, col = struct.unpack(">IIII", f.read(16))
sz = row * col
train_images = np.array(array('B', f.read()), dtype=np.uint8).reshape((n, sz))
# Read test set labels into numpy (shapes: 10000 x 1)
with open(mnist_file_name('t10k-labels-idx1-ubyte'), 'rb') as f:
magic, n = struct.unpack('>II', f.read(8))
test_labels = np.array(array('B', f.read()), dtype=np.uint8)
# Read test set images into numpy (shapes: 10000 x 784)
with open(mnist_file_name('t10k-images-idx3-ubyte'), 'rb') as f:
magic, n, row, col = struct.unpack(">IIII", f.read(16))
sz = row * col
test_images = np.array(array('B', f.read()), dtype=np.uint8).reshape((n, sz))
return train_images, train_labels, test_images, test_labels
def show_one_example(one_x, one_y):
from matplotlib import pyplot as plt
print(one_y)
img = one_x.reshape((28, 28))
plt.imshow(img, cmap='gray', interpolation='nearest', vmin=0, vmax=255)
plt.show()
def encode_col(col):
ret = 0
val = 1
for i in range(6):
if col[i*5:(i+1)*5].sum() > 200:
ret += val
val *= 2
return ret
# K-means encoder
centers = np.array((
(0.0, 0.17661269596504756, 2.8453353893600615, 5.8364430737599591, 7.7636597275764583, 7.589719866358263, 6.3717296324852226, 6.3118478540221021, 8.2631714212284759, 16.820920071960934, 41.506707787201236, 92.530095091236191, 162.05489591364687, 209.63459264970444, 220.37270624518118, 222.76951940375224, 231.51611410948342, 207.45931637111283, 130.76345412490363, 55.957080442045743, 17.256283731688512, 6.7787201233616035, 4.9342071446928806, 5.3098432279619638, 4.5048059624775121, 1.9221279876638395, 0.4650732459521974, 0.019018247237214084, 0.0, 0.22575173477255203, 1.5414546389103059, 3.4720123361603701, 5.3729118478540219, 6.1389874068362893, 6.5939861218195839, 8.9505525571832436, 15.618093035209458, 37.202724235415062, 87.53173991261886, 160.55929067077872, 221.08712413261372, 233.69617065021845, 221.4053456694937, 216.82456951940375, 213.32706245181188, 163.82395271138526, 80.948188126445643, 28.118529940889232, 7.9904908763813927, 3.8407093292212799, 5.1986635826265744, 6.8475456180930356, 6.031508609611925, 2.8418915445900796, 0.52284759701876127, 0.010794140323824209),
(0.0, 0.0, 0.0, 0.0, 0.0, 0.1999270671893518, 4.9861427659768438, 55.648099188622481, 139.80754854590208, 185.17166560306319, 155.43486188348984, 95.529765703345788, 50.592123256449995, 32.812745008660769, 30.60570699243322, 27.220439420184157, 18.126447260461301, 8.317075394293008, 3.7126447260461299, 1.8072750478621571, 2.0914395113501687, 12.836083508068192, 58.1380253441517, 131.62868082778741, 181.47734524569242, 173.61436776369769, 104.23812562676635, 11.433494393290182, 0.0, 0.0, 0.0, 0.0, 0.0, 0.21952776005105296, 7.5846476433585561, 73.398851308232295, 167.96180144042302, 197.97638800255265, 146.3374054152612, 78.155802716747203, 40.064910201476891, 30.747834807183882, 32.783936548454733, 30.182970188713647, 20.302579998176679, 11.044580180508706, 9.6407147415443522, 17.681921779560579, 46.566505606709818, 102.90427568602425, 175.35244780745737, 230.82277327012488, 236.99425654116146, 192.83854499042758, 106.47944206399855, 11.144315799070107),
(0.0, 0.14025623735670936, 0.8540495991608601, 7.399265752603581, 68.520791189031243, 177.52693489173598, 231.449314452686, 189.336854723908, 102.30141604855024, 38.060912564621262, 13.351015209410354, 5.9291226492844835, 3.6039559451562151, 3.6313778377163408, 15.491046677155914, 58.643515396718364, 126.88334457181389, 191.05761594365774, 227.78841687270548, 225.04225668689594, 165.39049973776878, 70.478909118153894, 12.697310256986588, 4.3616543043380531, 2.9122649284483404, 0.37791263954446691, 0.0, 0.0, 0.0, 0.35918183861541919, 2.6650932793886266, 14.7505806548288, 85.821458005544315, 190.7649659099423, 230.75335281336629, 173.18341200269722, 78.963287630179067, 24.234509627631677, 9.4965160710271963, 6.5300816662920509, 5.6786543792612569, 8.3893758897130439, 28.349291975724881, 87.594815314302835, 162.7552258934592, 212.20753727429386, 223.65213156514574, 191.95242376564022, 113.53667490821907, 36.210384356035064, 6.4490896830748481, 4.3128792987188129, 3.0469768487300515, 0.41642316625458903, 0.0, 0.0),
(0.0, 0.048484848484848485, 0.083849483849483847, 0.63469863469863474, 2.6394938394938396, 8.9858141858141867, 34.914352314352314, 97.149117549117548, 167.80000000000001, 207.70089910089911, 216.00213120213121, 210.16556776556777, 207.23742923742924, 209.58374958374958, 214.20326340326341, 218.29863469863469, 225.20845820845821, 232.82584082584083, 229.02331002331002, 191.38215118215118, 122.24602064602064, 57.87246087246087, 25.166033966033964, 15.753579753579753, 10.408658008658008, 5.2269730269730266, 1.9939393939393939, 0.17442557442557444, 0.0, 0.094105894105894108, 0.27665667665667665, 0.98208458208458205, 3.5379953379953379, 10.373293373293373, 34.575491175491173, 97.909890109890114, 170.78061938061938, 215.29796869796868, 229.27392607392608, 229.66053946053947, 228.57009657009658, 228.75244755244756, 227.18101898101898, 224.20865800865801, 222.71548451548452, 216.18601398601399, 189.98001998001999, 135.47459207459207, 72.726473526473526, 25.485647685647685, 8.2147852147852145, 6.1315351315351316, 4.3386613386613391, 1.7817515817515817, 0.48171828171828174, 0.026107226107226107),
(0.0, 0.015710215710215709, 0.13447293447293449, 0.65966625966625969, 1.9763125763125764, 3.3176231176231177, 5.1873015873015875, 16.053561253561252, 68.77159137159137, 166.70801790801792, 224.99845339845339, 192.94220594220593, 121.21375661375662, 67.725844525844522, 60.834920634920636, 122.02433862433863, 197.591045991046, 198.60919820919821, 125.32511192511193, 60.716320716320716, 23.332030932030932, 9.8589336589336583, 11.431094831094832, 17.164102564102564, 14.073504273504273, 4.596011396011396, 0.7440781440781441, 0.15474155474155474, 0.0, 0.0, 0.041351241351241352, 0.48188848188848188, 1.7159951159951159, 3.6499796499796502, 8.364672364672364, 33.949857549857548, 113.60724460724461, 201.19568579568579, 212.63435083435084, 131.89450549450549, 51.91729751729752, 24.402767602767604, 41.518599918599918, 118.79137159137159, 201.88302808302808, 193.65128205128204, 104.81912901912902, 37.882702482702484, 13.268783068783069, 12.525844525844526, 20.127798127798126, 27.257305657305658, 21.990150590150591, 9.990964590964591, 2.4364672364672364, 0.18453398453398454),
(0.0, 0.42176870748299322, 19.582243153671726, 37.178963893249609, 36.022501308215595, 21.087650444793301, 11.077097505668934, 7.0641897784754928, 5.5648874934589223, 5.9674690388976099, 5.9419152276295133, 16.870225013082155, 74.186900401186122, 168.729809872667, 205.76338740624456, 143.85382871097156, 58.155590441304724, 20.831414617128903, 31.447409733124019, 118.36255014826443, 215.48360369788941, 193.3417931275074, 69.134833420547707, 8.6831501831501825, 1.4591836734693877, 0.38069073783359497, 0.11067503924646782, 0.0, 0.0, 0.25562532705389851, 12.697366125937554, 23.444967730682016, 19.992499563928135, 10.873277516134658, 7.797575440432583, 5.9114774114774118, 5.2514390371533226, 6.5761381475667191, 13.578405721262865, 42.584946799232512, 109.2245770102913, 189.86377114948544, 210.644252572824, 133.77080062794349, 44.733036804465378, 19.939124367695797, 52.688470259898828, 145.39979068550497, 219.46267224838653, 173.19361590790163, 55.613901971044825, 6.8790336647479506, 1.5249433106575965, 0.4502878074306646, 0.045351473922902494, 0.0),
(0.0, 0.005975705329153605, 0.45743534482758619, 8.2430936520376168, 88.463655956112859, 205.53556034482759, 215.7419670846395, 141.63920454545453, 70.023608934169275, 36.38313087774295, 19.056083463949843, 11.632200235109718, 8.7802213949843253, 8.1690830721003138, 9.7748824451410652, 11.06725117554859, 12.404682601880877, 11.910266457680251, 10.529192789968652, 9.0775372257053295, 6.9641947492163006, 4.7853644200626961, 4.0417319749216301, 5.0518221003134798, 3.486971003134796, 0.022825235109717869, 0.0, 0.0, 0.0, 0.014988244514106583, 0.87940830721003138, 9.3297413793103452, 70.830867946708466, 153.20973746081503, 141.32631269592477, 64.448716692789972, 15.11985697492163, 5.2517143416927903, 4.5758718652037613, 5.5011755485893419, 5.7313381661442007, 5.7104721786833856, 5.6305348746081503, 4.6918103448275863, 4.630289968652038, 3.9900568181818183, 3.7020474137931036, 4.4025764106583072, 4.9553291536050157, 5.0124412225705326, 5.1370983542319753, 6.2614615987460818, 4.0145963949843262, 0.039625783699059558, 0.0, 0.0),
(0.0, 0.0080570019183337904, 0.14206631953959989, 0.85031515483694164, 4.2023020005480953, 10.576377089613592, 12.903096738832557, 9.3292408879144979, 10.081556590846807, 37.608440668676351, 112.73088517402027, 186.86078377637708, 192.01622362291039, 128.34294327212936, 54.905563167991232, 16.119539599890381, 7.8354069608111807, 6.4488901068785971, 4.961578514661551, 3.7745683748972323, 4.3797204713620168, 4.8779939709509454, 5.0702658262537685, 6.9121403124143601, 7.4563442038914767, 4.5356536037270487, 2.760646752534941, 0.34886270211016718, 0.0, 0.01008495478213209, 0.11531926555220609, 0.77029323102219782, 3.9704576596327761, 10.128089887640449, 12.409975335708413, 13.579336804604001, 34.679802685667305, 97.337352699369688, 179.06500411071525, 220.64149081940258, 188.35385036996436, 102.56349684845163, 35.006467525349407, 7.0698821594957524, 4.3149355987941904, 4.6937791175664563, 3.8560153466703206, 3.6194025760482322, 5.2527815839956151, 7.0534392984379286, 9.4845163058372162, 14.550232940531652, 16.625376815565907, 12.506056453822966, 6.8065771444231293, 0.76146889558783226),
(0.0, 0.0, 0.014578833693304536, 0.80453563714902809, 17.057415406767458, 45.781677465802737, 74.572714182865369, 107.11213102951764, 140.61015118790496, 168.08117350611951, 187.28797696184304, 200.91666666666666, 208.50935925125989, 211.26493880489562, 211.67746580273578, 211.34125269978401, 211.33549316054714, 210.86573074154069, 209.92854571634268, 207.62760979121671, 202.74460043196544, 195.06065514758819, 183.91504679625629, 157.19186465082794, 100.13156947444205, 51.091432685385172, 17.413246940244779, 1.5046796256299495, 0.0, 0.0, 0.0, 0.15586753059755221, 3.2553995680345573, 11.5093592512599, 19.755219582433405, 28.219402447804175, 37.274478041756659, 45.357631389488844, 51.737401007919367, 55.616270698344131, 55.9244060475162, 54.961843052555793, 53.570554355651545, 51.121850251979843, 48.460043196544277, 46.590892728581714, 48.301115910727141, 53.235781137509001, 60.737041036717059, 70.121670266378686, 76.852591792656582, 71.356011519078478, 46.548596112311017, 23.609251259899207, 8.2661987041036724, 0.58063354931605471),
(0.0, 0.0, 0.0, 0.0, 1.4526170163707632, 118.36438398278379, 208.37299208362154, 167.21366536007994, 69.747598186150185, 22.503497040965335, 20.391053723772192, 57.879102298055493, 137.36230881561755, 198.37276150949197, 179.16609023134271, 99.619629544231799, 36.587502882176622, 15.047267696564445, 8.3108139266774259, 5.2369533471677814, 4.9510414264852818, 19.230497271539466, 120.55145645991853, 226.12550918453616, 179.69249096917991, 11.450772423334103, 0.26777342248866343, 0.0, 0.0, 0.0, 0.0, 0.0, 1.7961724694489278, 129.18445930366613, 215.04849742525556, 160.00038429021598, 58.054184920451924, 18.184382445622933, 28.102759203750672, 77.190223656905701, 158.0397356083314, 209.23334101913764, 184.69256782722312, 102.06817308431327, 36.48320651756206, 14.805856582891399, 7.9802474828990855, 5.2324187226193217, 6.1056798093920532, 29.180462685420029, 140.38067788794098, 233.30343555453078, 171.66651295058028, 10.008684958880947, 0.11659365152563216, 0.0),
(0.0, 0.00442976491159899, 0.0089372449970856818, 0.053934330678064892, 0.54983485525548859, 1.3254322906547504, 9.1050320575092289, 90.343306780648916, 203.73740042743344, 209.04418107635516, 104.5203419467651, 23.681678647755973, 5.6678453468039631, 4.0882067223625409, 3.6163979016903052, 3.3461433844958228, 4.7886924421993395, 5.2133281523217407, 5.9754420050514865, 5.0183796386244417, 4.325238002720031, 4.0668350495434238, 3.8084320963668157, 3.3525160287546143, 1.7759082960948125, 1.0892558772100251, 0.95395375947153682, 0.2317466485331261, 0.0, 0.0095201088012434425, 0.026267728774043132, 0.094307363512725853, 0.69384107246939963, 2.1560520691665048, 15.047678259180104, 105.65638235865553, 215.10472119681367, 197.53775014571596, 78.350340003885762, 13.679852341169614, 4.525043714785312, 3.9111326986594133, 3.1131144355935496, 2.5961530988925587, 3.6123955702350883, 4.1000971439673597, 4.9727608315523604, 4.7019234505537204, 5.1843015348746846, 6.2808626384301531, 7.1187099281134643, 8.4774043131921513, 9.64701768020206, 10.057314940742179, 7.0284048960559549, 0.98908101806877791),
(0.0, 0.0, 0.13431503792949576, 1.2355347315186673, 3.7266845158411424, 4.7648371262829095, 4.478432247508553, 7.5917745054291235, 24.860107095046853, 68.353488026178795, 133.47947344935298, 194.61892012494422, 227.67321136397442, 234.49635579354455, 232.72363528186821, 229.45671575189647, 223.82359065893203, 218.53168228469434, 219.53859883980365, 226.65112301056075, 222.70846348356389, 185.68332589617731, 105.71887550200803, 37.74550052060092, 8.1113342257920564, 2.3084188606276959, 0.74207942882641675, 0.14338836828796669, 0.0, 0.015469284545589766, 0.13126580395656701, 1.2074966532797857, 4.7981555853041797, 7.8157072735385986, 13.370667856611632, 34.41722445336903, 83.986910605384494, 156.45455897664732, 218.13810798750558, 240.27465417224454, 233.48691060538451, 217.69001933660567, 204.19113491001042, 194.30306410828499, 185.49226535772721, 180.18622638702959, 188.13223263424067, 209.93299122415587, 225.90584560464077, 201.50356983489513, 120.27130745203034, 43.08515543656106, 7.7284694332887103, 1.9145470771976796, 0.54596162427487727, 0.034508403986315633),
(0.0, 0.0, 0.0, 0.075429839156960624, 33.157307265668329, 130.80483915696061, 209.67186633388798, 200.36314475873544, 128.44737936772046, 59.752149195784803, 24.928521907931227, 11.24722684414864, 6.4862728785357735, 5.2423044925124795, 4.4034248474764279, 4.7314891846921796, 6.8827648363838048, 7.9261647254575704, 6.344356627842485, 5.9700499168053245, 26.151344980587908, 119.19301164725458, 216.07397393233501, 207.55255130338324, 69.210274542429289, 5.074112590127565, 0.036605657237936774, 0.0, 0.0, 0.0, 0.0, 0.11744315030504715, 40.814822518025515, 151.54561841375485, 221.5492928452579, 194.87312811980033, 109.30560177481975, 40.621602884082087, 15.826539101497504, 10.946547420965059, 11.331738768718802, 13.588394342762063, 13.341444814198558, 10.305671103716028, 8.9109816971713816, 8.3716028840820851, 6.8032445923460898, 8.179284525790349, 36.713325013865777, 135.96589018302828, 225.07244869661676, 203.90848585690514, 66.32536051026068, 4.3354132002218524, 0.00041597337770382697, 0.0),
(0.0, 0.025925107380680126, 0.84908089825019939, 2.7470236093361136, 5.7781760178343218, 8.0285964119958866, 10.153628842402636, 12.83780953021553, 14.284032709068022, 13.309861726354629, 11.295227205027434, 9.3071327676829796, 8.2988978466209922, 8.1268485331847131, 7.9340533684382475, 7.4694577635991504, 7.988325053570227, 7.9218979715381144, 7.957076554977947, 7.8414225177526449, 6.9082243511516399, 5.8310736146210687, 4.7688744967281327, 4.8047353198358778, 4.1223707348009491, 2.1995983434067781, 0.77595633666125363, 0.04639229741805917, 0.0, 0.016191180851166054, 0.42081695797980184, 1.2396486946160721, 2.2687447751011347, 3.1681384466075393, 5.0181802457984608, 6.9093293872334698, 8.2126377691723764, 8.7993638835772412, 8.7851713766827775, 8.062987056664328, 7.2600774486158226, 7.0007975477807989, 6.9336209630149224, 7.4071817736309562, 7.9326600620742012, 7.3346433616158508, 7.7650885470216879, 8.9708847014961233, 9.6618301319316995, 10.031431069771017, 9.3661705214809405, 9.0032670631984555, 6.8873055376721215, 3.7509536941836665, 1.2686390759976554, 0.045633185674888777),
(0.0, 0.0, 0.0, 0.0, 0.028405987108052005, 0.39484322080192286, 17.02884300229433, 105.71878072763029, 199.15175352343493, 199.73025237627007, 125.39178411449798, 52.012236425215775, 22.260133289631813, 21.977165956516988, 27.972468043264502, 30.116901562329289, 31.624822462580575, 37.663170545176442, 53.598820058997049, 88.726647000983277, 142.29738883426199, 195.6901562329291, 227.74915328307659, 238.89358680214139, 229.69638369933355, 181.45274773298371, 86.699442805637503, 7.8958811318693325, 0.0, 0.0, 0.0, 0.0, 0.0, 0.66677592046323608, 21.422265923740849, 122.30831421391893, 209.87872828580794, 196.24156014421501, 114.93510324483776, 48.354091554681524, 29.142466950726536, 34.090352889762919, 44.465858188572049, 56.01398448596089, 73.772205834152743, 107.78564405113077, 157.75101059761826, 209.03987763574784, 238.68108816781384, 241.81262973888343, 232.51993881787391, 209.78477002075823, 168.76575986015513, 111.28799300775702, 46.775811209439532, 4.1935977275210314),
(0.0, 0.0024136178861788619, 0.027862466124661246, 0.20481876693766937, 1.4504573170731707, 7.1664972899728996, 37.286331300813011, 118.66044207317073, 192.14748475609755, 217.93606029810297, 220.95350609756099, 231.296875, 240.38431571815718, 229.78374830623306, 181.80877371273712, 108.28362127371274, 45.939744241192415, 16.888804200542005, 7.9157350948509482, 5.177083333333333, 4.1646341463414638, 3.6890243902439024, 3.3224932249322494, 4.4071815718157179, 4.8443851626016263, 3.0766852981029809, 0.86001016260162599, 0.03201219512195122, 0.0, 0.0, 0.0300220189701897, 0.22599085365853658, 2.7773119918699187, 11.879446138211382, 43.880547086720867, 132.18978658536585, 210.01748814363143, 229.66624322493226, 223.74174288617886, 213.48441734417344, 194.96430386178861, 154.95329437669378, 96.156800474254737, 39.854251355013552, 9.0013973577235777, 3.0475101626016259, 3.1861026422764227, 3.5002117208672088, 3.7893377371273713, 4.0838414634146343, 4.2672764227642279, 6.3244410569105689, 7.3350694444444446, 5.1051405826558263, 1.768970189701897, 0.1027269647696477),
(0.0, 0.03801893663510561, 1.8312454479242535, 16.563292061179897, 112.61609613983977, 208.64428259286234, 219.78557902403497, 155.03721777130372, 73.942243262927889, 27.126147123088128, 11.53306627822287, 6.6627093954843408, 3.261252731245448, 2.0852148579752368, 2.2155863073561544, 3.3186453022578295, 8.4190094683175527, 25.678441369264384, 67.929424617625642, 140.14085943190094, 214.15520757465404, 226.2577567370721, 151.55782957028404, 44.805025491624178, 2.3887108521485798, 0.11835396941005098, 0.0, 0.0, 0.0, 0.061544064093226512, 4.0549162418062634, 23.994974508375819, 128.85265841223597, 215.7367079388201, 211.61689730517116, 133.48790968681718, 50.418353969410049, 13.901383831026948, 7.0651857246904592, 6.6146394756008737, 6.1711580480699197, 6.0537509104151495, 5.6278951201747995, 6.3449380917698468, 16.075819373634378, 46.490386016023308, 101.01391114348142, 167.98062636562273, 224.21653313911145, 214.33546977421705, 127.96547705753824, 34.947487254187912, 1.8325564457392571, 0.032920611798980336, 0.0, 0.0),
(0.0, 0.079899400270845425, 0.3497775198297543, 3.27031340684852, 40.43016057264461, 121.31804991294254, 192.70821242019733, 217.28027664925517, 213.17411491584446, 219.50488489069451, 229.04860708067324, 203.38252079705939, 141.80189591797253, 75.680112207390209, 36.048703811182044, 20.662362159024955, 14.248307216095956, 11.210147030373379, 10.297349584058813, 10.072160959566647, 9.0377248984329661, 7.3254497968659313, 5.7368446508028637, 5.7789224221319406, 3.9480557167730703, 0.50029019152640741, 0.032162894176823373, 0.0, 0.0, 0.061520603598374926, 0.29531824337396012, 2.3627394080092863, 43.510737086477072, 124.32636873669955, 195.60325981814665, 223.44496034049138, 216.83492938672856, 200.54609208744438, 164.8475527181273, 108.4257593344941, 53.025246662797443, 19.397368930160571, 8.6664248403946598, 6.0259721416134653, 4.5985200232153218, 3.7189011414200039, 3.7698781195589088, 4.0613271425807698, 3.6133681563165023, 3.0742406655059007, 3.4176823370090927, 5.0049332559489264, 4.0200715805765137, 0.83106016637647517, 0.10688721222673632, 0.0),
(0.0, 0.0, 0.0, 1.5541289115521479, 7.1369455959499613, 7.923328075131149, 4.393411350379691, 4.3147584284089655, 5.0878242048497748, 5.4228695109871969, 5.3581936241241426, 4.9331963755090058, 5.3138046149895448, 5.5593015150959317, 4.3279650757547969, 2.1891852232290252, 2.2089585091162554, 4.0359514288858724, 6.4370299717524491, 17.669503650170586, 67.089181554715879, 140.12469276202356, 172.07190285777173, 116.02912799442386, 36.28353938148868, 6.6176308742066841, 0.79287574745955469, 0.022744781540041822, 0.0, 0.0, 0.0, 1.8009464763931178, 8.7039509886642943, 11.257199457060054, 9.2755420228181524, 9.8722256869290881, 10.274991745845409, 10.062878315418761, 10.387211563153453, 11.866099269965883, 15.2899225943725, 17.004915807623171, 13.380351443559926, 6.9696613962361056, 3.5880259730731137, 4.5571737774679919, 9.8736930921897361, 34.458050552111231, 98.947393521405772, 180.33034960930334, 216.23588539564915, 156.08749403866614, 47.600975824498327, 4.9223375765802118, 0.3891925602553285, 0.0060530467001724203),
(0.0, 0.0, 0.01508485229415462, 0.14844529892409508, 1.4218952194328391, 4.0173032129256478, 10.283506488704846, 38.115872370318336, 107.21525492660923, 200.79273117166414, 235.69430990498023, 180.72344437460717, 92.075387288793578, 40.949125596184423, 19.861352460531666, 11.160054719562243, 6.4395681591304026, 5.0905830591193109, 6.5662365511886716, 6.4393463230672534, 4.9349280881428621, 3.8826117499168116, 3.3710208156172587, 2.6988205715975893, 1.9166266129330425, 1.4348356564498836, 1.0163419233186675, 0.42252375494509559, 0.0, 0.0, 0.0060635190594150925, 0.073168928162088212, 0.90690279883166336, 1.7642622102266425, 2.8444189743779349, 18.64206751210855, 79.492919732317816, 159.12178799866899, 169.21651199763375, 94.287462565164347, 21.430768661958812, 3.9660960550153437, 3.1540651458572113, 2.7493991939956373, 1.8262284171996894, 2.0371575405775131, 3.9246866565608016, 3.7520981994306206, 2.4969867268088883, 2.6123414796465414, 3.0915443487262912, 4.1408659000998265, 5.7820460679557808, 6.8071135430916554, 5.4242244980959073, 1.7156801123969387),
(0.0, 0.0, 0.0013995801259622112, 0.22688748930876293, 1.8015706399191354, 13.106601352927456, 60.709820387217171, 166.87722572117255, 231.19042065158231, 193.86781743254801, 94.039032734624058, 26.930176502604773, 16.39693647461317, 50.754140424539308, 117.16732757950393, 171.91796905372831, 195.85102247103646, 210.31459451053573, 231.91353704999611, 233.38573983360547, 179.20410543503615, 91.674753129616676, 33.054039343752429, 14.293834072000623, 8.9154809112821702, 5.8532773501282946, 2.6319104268719382, 0.2430604152087707, 0.0, 0.0, 0.057227276261565978, 0.38877225721172537, 2.0880180390327348, 14.005131793795195, 67.49957235051707, 177.4678485343286, 230.29344529974341, 178.57413886945028, 82.06375865018272, 38.500816421740147, 50.382785164450667, 101.65873571261955, 175.73734546302776, 224.81486665111578, 233.37874193297566, 227.7811212191898, 215.10527952725295, 164.84106990125184, 84.315294300598708, 25.14392348961978, 5.311717595832361, 2.9686649560687348, 2.8398258300287691, 1.8463572039499261, 0.64948293289790837, 0.025347951170204493),
(0.0, 0.21847603599428417, 2.6875989649712277, 5.6038697717529837, 6.6753948943729968, 5.3715289846676706, 4.7503186189317574, 5.6868651759162709, 5.899200556134863, 6.1164407368786931, 5.285289460471942, 4.2294442513420618, 3.246823465801568, 1.9176997644150928, 1.5590700189240336, 2.6058780365349707, 4.4089136059938978, 18.206812652068127, 70.835747113119382, 142.90248329664388, 164.61352489089717, 102.62793032866026, 24.97172981114587, 2.0882091685011392, 0.79384389603367711, 0.16687907928783841, 0.059668636310972079, 0.0, 0.0, 0.12273587456069208, 1.6708762986135248, 4.1986637315104467, 5.437956204379562, 5.0450314756884103, 5.4850731858031132, 6.0019310238288339, 5.7767350249102076, 5.7844977407021201, 5.742903487429035, 6.0971691190669297, 6.6030201212682966, 6.0473873247595877, 5.9780249488278683, 8.0547252153091566, 14.801027304676939, 41.024910207391962, 106.52346193952033, 188.62870273819178, 216.63190823774767, 152.42443903757771, 56.0081103000811, 8.1325454756111686, 1.235198702351987, 0.29100529100529099, 0.068937550689375501, 0.0),
(0.0, 0.0017447439588240425, 0.089767076681496985, 1.4723021896536683, 18.318851958475094, 87.20361161999476, 203.64834685509902, 215.31623484253686, 122.9138096484341, 39.286050772049201, 13.106952804675913, 7.9417255517752769, 16.965977492802931, 59.482945127802495, 145.82255953938758, 220.5526476489575, 219.86068219488789, 156.63944866090901, 86.10712727907179, 33.344499694669807, 12.135828317194452, 7.3006193841053824, 6.7564337433481638, 11.462880572276019, 13.199773183285354, 4.1092209718223849, 0.081130594085317975, 0.0, 0.0, 0.017447439588240427, 0.24408967983948354, 2.6644857367181367, 21.25769868271831, 90.209718223850643, 203.98700165750677, 196.20745005670418, 94.917822559539388, 25.424757916775715, 13.330716217395098, 18.944429904911456, 44.319637093256567, 100.65837913286225, 177.96449446043792, 224.37599232312658, 190.82639797609701, 103.99520195411323, 34.642414725639014, 10.228561458605949, 7.3946610834859987, 6.9911890430079389, 9.7259007240687421, 16.973479891825875, 18.216958911279768, 6.3181540608915645, 0.045974003315013522, 0.0),
(0.0, 0.0, 0.0023993493289955268, 0.092476616510776738, 1.8283855225701504, 32.499349328995528, 141.19036193574624, 228.07019113460757, 191.28922326148842, 89.72871085807239, 31.142374949166328, 10.714111427409517, 5.2737698251321676, 4.0483936559577067, 5.8175274501830012, 7.3869459129727533, 7.6546563643757626, 8.1972346482309888, 9.8058153721024812, 8.9010980073200496, 7.2588450589670597, 5.0286295241968277, 3.1214314762098412, 4.0373322488816594, 5.056283041886946, 2.3274095160634403, 0.19227328182187881, 0.0, 0.0, 0.00052867019113460756, 0.013054087027246848, 0.11525010166734445, 3.2456283041886946, 34.455754371695811, 131.24058560390404, 196.01830012200082, 133.78251321675478, 33.235054900366002, 4.4598210654737702, 2.6938186254575029, 2.617568117120781, 3.2232614884099227, 5.33623424156161, 5.4596583977226514, 4.2779991866612441, 3.9378202521350141, 5.420658804392029, 5.5565677104514029, 5.7223668157787717, 5.5389182594550634, 5.639080927206181, 8.5952419682797885, 9.965189101260675, 5.9835705571370479, 1.2113054087027246, 0.0),
(0.0, 0.48710376058279187, 5.5372448644746344, 9.1354597361685368, 9.5403950908971584, 7.2717070289427053, 5.1947890004594077, 4.4049353547286207, 3.9807048631620399, 4.4963575506989564, 8.1690621513421284, 25.122596311609897, 76.608124958981421, 151.66679792610094, 205.56540001312595, 218.51978735971647, 211.37297368248343, 213.1867165452517, 227.09385049550437, 226.61875697315745, 187.43847214018507, 98.784865787228455, 24.862177594014568, 6.6306359519590474, 2.0007875566056312, 0.50127977948415048, 0.097591389381111768, 0.00091881603990286804, 0.0, 0.21775940145697972, 1.6668635558180744, 2.5667126074686619, 2.8290345868609306, 2.6962000393778305, 2.2014832316072717, 2.5178184682024018, 3.4752904114983263, 8.6219071995799705, 27.806983001903262, 75.648552864737155, 149.07337402375796, 215.17943164664959, 242.23285423639825, 236.83631948546301, 221.89440178512831, 219.26100938504956, 228.42567434534357, 224.51204305309443, 180.05965741287656, 95.683861652556274, 21.980442344293497, 2.5266784800157511, 0.61567237645205752, 0.2067336089781453, 0.045743912843735642, 0.0),
(0.0, 0.0, 0.012612739571589628, 1.3792277339346111, 16.469419391206312, 64.233089064261563, 146.33976888387824, 217.3156003382187, 234.55383314543406, 187.9472237880496, 103.58998027057497, 38.218503382187144, 10.505777903043969, 3.5294532130777903, 1.4713923337091319, 2.7177987598647126, 4.3383596392333708, 6.8302564825253667, 20.936724915445321, 79.870419954904165, 185.09928128523111, 239.93284949267192, 206.22132187147687, 94.152621195039458, 14.160794813979708, 1.4006482525366404, 0.17319616685456596, 0.0, 0.0, 0.0, 0.035442502818489291, 1.5350901916572717, 20.80228297632469, 80.405651071025929, 167.86950394588501, 225.82694475760991, 227.30770856820743, 158.7689543404735, 65.388035512965047, 17.268108793686583, 6.5341741826381057, 4.680876550169109, 3.7568348365276214, 4.307144870349493, 5.4983089064261552, 11.217235062006765, 38.391206313416006, 111.60562288613303, 206.5968151071026, 240.39050169109356, 184.46589627959415, 70.84061443066517, 7.981397970687711, 0.35132468996617811, 0.021420518602029311, 0.0),
(0.0, 0.013982158965620928, 0.12438608800240553, 0.34734890247569411, 1.0733186328555677, 1.7650596371654805, 1.5783301593665431, 1.5990778791219806, 1.9397113360729679, 2.1898366242357423, 2.5580835922622032, 7.0069660218502552, 31.27127393003909, 68.963967124386087, 75.693795730179417, 45.064348000400919, 15.020948180815877, 4.6817179512879621, 2.4978450436002806, 1.529768467475193, 0.96101032374461259, 1.3898967625538738, 2.6531522501754035, 3.971283953092112, 3.9729377568407336, 2.9342988874411144, 0.95715144833116172, 0.01648792222110855, 0.0, 0.021750025057632554, 0.22782399518893454, 0.51413250476095018, 2.9868196852761351, 5.8076074972436604, 7.9025759246266416, 10.530520196451839, 15.25293174300892, 25.455547759847651, 49.480705622932746, 96.130800841936448, 159.35651999599077, 208.5472586949985, 207.08439410644482, 155.09642177007117, 94.946927934248777, 54.619424676756537, 32.192893655407438, 19.072917710734689, 11.373108148742107, 8.9054826100030073, 9.8258995690087207, 11.75904580535231, 11.18617820988273, 7.3915505663024961, 2.3809261300992284, 0.087200561290969231),
(0.0, 0.019965735371639429, 0.81016078017923032, 7.7473642593568792, 67.50349235635214, 157.73800738007381, 210.0062598840274, 221.23517395888246, 212.77899314707432, 205.23036373220876, 210.04487348444914, 218.3141143911439, 222.12809699525567, 222.70519240906694, 224.26660516605165, 223.12394570374275, 216.42797838692672, 208.7832762256194, 208.21303373748023, 216.13804691618344, 222.27583025830259, 207.87203479177649, 158.81352134949921, 88.869662625197677, 18.365181866104376, 0.35556141275698472, 0.0, 0.0, 0.0, 0.033737480231945179, 2.08875856615709, 10.99934106483922, 73.222127042699, 165.77365577227201, 222.09317343173433, 233.45275434897206, 227.06675013178705, 220.7167237743806, 219.50296520822351, 218.59916974169741, 216.41045071164999, 216.71263837638375, 220.35114654717975, 222.15175276752768, 219.05376910911966, 214.89562467053241, 216.05864522930943, 222.01937269372695, 221.61729045861887, 198.81371903004745, 146.62770163415919, 80.910648392198212, 15.881787032156035, 0.16361360042171849, 0.0, 0.0),
(0.0, 0.0, 0.034266692774623068, 0.48971999216761308, 4.3917172508321913, 25.534560407284118, 94.758860387703152, 190.09712159780693, 223.32631682005092, 168.73604856079891, 88.686900332876448, 46.156745643234778, 72.137654200117481, 163.00088114352849, 215.68210299588799, 157.62825533581358, 62.643332680634423, 19.699334247111807, 29.977286077932249, 93.789602506363821, 188.59330330918348, 225.50401409829647, 165.14773839827689, 65.416193459956915, 19.573330722537694, 6.181123947523008, 0.95760720579596637, 0.0, 0.0, 0.0, 0.34981398081065207, 1.9885451341296261, 10.041609555512043, 42.225279028784023, 125.01468572547483, 208.26169962796163, 203.04043469747407, 113.66183669473271, 37.623555903661639, 21.197767769727825, 66.213922067750147, 165.07078519678873, 221.71020168396319, 169.89044448795769, 84.015273154493826, 57.083708635206577, 88.064323477579791, 155.97552379087526, 219.25974153123164, 209.0641276679068, 119.08831016252203, 31.29723908361073, 6.6709418445271194, 1.7672802036420598, 0.28686117094184455, 0.0),
(0.0, 0.48879053208890599, 4.8487443471567397, 7.4554026748773214, 6.6338881939767154, 5.409843163667853, 5.3586067545463294, 6.2449244683921874, 6.7000384874434715, 6.2836524583854514, 5.6517848551909937, 3.834600211680939, 2.4026267680169346, 5.1123352256326378, 28.947608967574329, 92.732079284133548, 166.45641297026845, 203.29837390551333, 208.06182045607619, 220.20561916674686, 219.67391513518714, 158.64100837101896, 65.608438371981137, 14.893678437409795, 3.0144327913018376, 0.40127970749542963, 0.041951313384008468, 0.012652747041277784, 0.0, 0.18372943327239488, 2.5184258635620127, 4.0280477244299044, 4.4112864427980369, 5.1954199942268833, 6.0756759357259691, 6.6541903204079667, 6.2811026652554602, 5.5911671317232754, 4.8347445395939577, 3.6521697296257094, 4.8122774944674296, 16.858221880111614, 60.820215529683438, 140.85403637063408, 208.43817954392381, 225.46507264504956, 217.15481574136439, 221.27961127682093, 208.7824978350813, 139.87231790628309, 51.32478591359569, 7.255845280477244, 0.85052439141730007, 0.17405946310016357, 0.018955065909746945, 0.0),
(0.0, 0.012334008552779654, 0.22732388026108485, 0.71334683772225971, 1.3402655863155526, 1.6437542201215396, 1.9196038712581589, 4.5064595993697951, 20.720234076074725, 65.517578212919204, 126.90947557956336, 185.2602745892415, 221.72482556830971, 233.5873058744092, 235.90056268287194, 211.32509565608822, 132.39009678145396, 47.862975467026786, 12.870762997974342, 4.3692099932478055, 2.7613324330407383, 2.1932928201665542, 2.1291469727661489, 4.8209768174656764, 6.5411658789106459, 4.7115462525320728, 1.6113887013279315, 0.086383074499212245, 0.0, 0.014719783929777177, 0.14706279540850775, 0.52851676794958358, 1.2140445644834572, 2.2483006977267612, 4.822282241728562, 14.759261760072024, 53.243754220121538, 127.90020256583389, 194.02984469952736, 222.5146972766149, 214.30853027233852, 200.58203916272788, 201.10519918973665, 177.95557056043214, 100.1185685347738, 26.429979743416609, 4.9772676119738914, 2.8001350438892638, 2.999054692775152, 4.0474004051316674, 7.1529146972766151, 13.000270087778528, 15.008012604096331, 10.579563358091379, 3.3668242178708079, 0.10632455548053117),
(0.0, 0.022144420131291028, 0.054792122538293214, 0.42319474835886217, 6.3781181619256015, 54.110547045951861, 161.8940043763676, 212.12218818380745, 137.26757111597374, 49.16901531728665, 24.830459518599561, 60.079999999999998, 150.07623632385119, 219.85864332603938, 190.88708971553609, 103.88, 42.009452954048143, 19.614617067833699, 14.985295404814005, 15.321312910284464, 14.869496717724289, 13.375142231947484, 10.431597374179431, 6.2130415754923414, 8.6800875273522973, 7.3699781181619253, 0.37207877461706784, 0.0, 0.0, 0.019868708971553611, 0.075973741794310726, 0.43921225382932166, 6.8505908096280086, 57.461094091903718, 163.77855579868708, 206.28595185995624, 129.96087527352299, 52.723063457330419, 54.33995623632385, 114.20630196936543, 190.47203501094091, 214.83789934354485, 147.3490590809628, 53.268533916849016, 11.879737417943106, 7.158074398249453, 9.0856017505470454, 11.020131291028447, 12.221181619256017, 13.446739606126915, 13.009277899343545, 11.99964989059081, 15.411553610503283, 11.528052516411378, 1.2499781181619256, 0.0),
(0.0, 0.0, 0.091417531014846454, 3.4065487085621315, 8.0940614195647758, 13.183953630262355, 16.453833638397398, 16.743949562741509, 19.719239373601791, 23.654667480170836, 21.60656904616636, 14.880008135041692, 11.243847874720357, 32.707545251169414, 100.86729713239781, 180.52043929225138, 183.11531421598536, 104.92993695342689, 40.267032743542813, 20.814114297335774, 49.817876754118366, 125.60006101281269, 178.67063249949155, 141.376855806386, 57.256863941427703, 9.0735204392922508, 0.53548911938173682, 0.018710595891803947, 0.0, 0.0, 0.051962578808216392, 3.0835875533862112, 8.329977628635346, 14.057860484034981, 16.605043725849097, 17.39363432987594, 22.249847467968273, 27.184563758389263, 24.49440715883669, 20.772828960748424, 27.969493593654668, 57.599247508643479, 124.23591620907057, 203.04647142566606, 202.89943054708155, 125.16849705104738, 72.6050437258491, 75.487594061419571, 121.1241610738255, 189.47793369941022, 216.85133211307709, 155.80079316656497, 64.275879601382954, 10.587146634126499, 0.70378279438682123, 0.022879804758999391),
(0.024039371569184176, 0.63193261404505019, 9.7550634109407532, 18.087450312322545, 21.573443119439712, 19.661366647738028, 15.448703388226386, 13.829452962332008, 14.12615937914064, 22.075146696952491, 61.679348854817341, 145.90772288472459, 211.15341661934508, 190.0101268218815, 106.07713420405072, 38.574578837781566, 26.687204240015141, 73.584894946053382, 173.17471133825478, 225.87384062085937, 171.75108839674428, 67.80323679727428, 13.283361726291879, 5.2531705470376684, 3.9086693166761308, 1.1950596252129473, 0.18057921635434412, 0.00028392958546280523, 0.0, 0.46261593791406397, 5.6662880938860498, 10.50596252129472, 13.277209918606852, 12.645182661366647, 11.385860306643952, 11.514669695248912, 14.510032178686352, 34.580352072685976, 91.607230740109785, 170.30437251561614, 211.25156161272005, 172.17187204240014, 89.768975960628424, 40.332576187772098, 53.959114139693355, 120.88794245693735, 202.90469430247964, 210.71039182282794, 129.8427030096536, 41.195438197993568, 7.4281658148779099, 5.0443876585273522, 3.9037478705281092, 0.98845353019117921, 0.13325761877720993, 0.0),
(0.0, 0.0019397651863195507, 3.5357835630423686, 36.178050025523227, 131.62092904543135, 216.14129657988769, 201.77080142930066, 113.12863705972435, 36.050535987748852, 15.12475752935171, 43.32108218478816, 124.30107197549771, 212.38417559979581, 227.23256763654925, 168.72220520673812, 82.591015824400202, 36.427156712608472, 31.361613067891781, 64.151710056151103, 149.23481368044921, 226.57723328228687, 215.35130168453293, 126.70781010719755, 26.191730474732005, 1.7182235834609494, 0.010005104645227157, 0.0, 0.0, 0.0, 0.0, 5.8885145482388976, 43.062582950484938, 139.57039305768248, 217.00826952526799, 193.75028075548749, 102.95109749872384, 36.602041858090864, 38.81082184788157, 88.669014803471157, 165.69321082184788, 225.68361408882083, 221.10199081163859, 154.31516079632465, 70.220214395099546, 35.951914241960182, 43.732924961715163, 92.417457886676871, 179.84114344053089, 229.08912710566617, 189.48933129147525, 89.370597243491574, 14.632669729453802, 0.99530372639101583, 0.019499744767738643, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.098265429608713192, 8.4017749092375951, 61.696006454215407, 132.88212989108513, 181.87107704719645, 202.06994755949981, 207.82097620008068, 210.84292053247276, 214.31746672045179, 218.43186768858411, 226.07866075030253, 214.7457845905607, 155.23372327551431, 81.508108108108104, 33.890359015732152, 14.480274304154902, 13.684953610326744, 46.290520371117388, 129.25211778943122, 211.23977410246067, 212.33521581282776, 110.52650262202501, 16.006857603872529, 1.0529245663574023, 0.003307785397337636, 0.0, 0.0, 0.0, 0.086486486486486491, 7.496167809600645, 62.826462283178699, 149.65728116175876, 211.57216619604679, 230.02936668011296, 218.59870915691812, 197.38289632916499, 182.08261395724082, 187.47357805566762, 209.669060104881, 212.03146430012103, 158.43678902783381, 84.025897539330373, 33.090681726502623, 14.94860830980234, 22.695280354981847, 68.675675675675677, 155.45308592174263, 225.83251311012506, 211.76127470754335, 103.69891085114966, 15.209842678499395, 1.0073416700282372, 0.0049213392496974583),
(0.0, 0.0, 0.0, 1.0616155988857938, 2.9239554317548748, 4.3044568245125348, 5.2937604456824516, 6.4953203342618382, 5.8044568245125348, 4.9661281337047356, 4.9511977715877435, 4.5488579387186627, 4.0150417827298046, 3.4672980501392758, 3.8183286908077996, 7.8236768802228411, 27.029526462395545, 75.838718662952644, 139.67643454038998, 191.51949860724233, 215.89025069637884, 228.11498607242339, 220.60635097493036, 149.54941504178274, 43.565515320334264, 3.9044568245125348, 0.52997214484679667, 0.032869080779944292, 0.0, 0.0, 0.0, 1.3496378830083566, 3.9464623955431755, 6.2072423398328693, 8.7100835654596107, 10.89242339832869, 11.583454038997214, 12.448635097493037, 13.377548746518105, 12.654428969359332, 10.823286908077995, 9.5040111420612821, 12.411476323119777, 28.033203342618386, 72.975153203342614, 144.96484679665738, 203.81119777158776, 221.41253481894151, 215.51604456824512, 219.65961002785517, 215.90924791086351, 148.5225626740947, 43.2825069637883, 3.2546518105849582, 0.39598885793871869, 0.014651810584958217),
(0.0, 0.0, 0.17964026602176542, 4.24886638452237, 36.064918379685608, 125.5983222490931, 218.45843409915358, 226.38701632406287, 160.63731862152358, 77.972188633615474, 29.828521765417172, 12.834492140266022, 11.756121523579202, 32.672687424425632, 87.235187424425632, 160.04882103990326, 204.91747279322854, 206.57927750906893, 189.4849607013301, 193.350438331318, 218.55781438935912, 233.83441656590085, 193.9375, 107.71531136638453, 39.692790205562275, 9.6001360338573161, 0.14759673518742442, 0.0, 0.0, 0.0, 1.0267533252720678, 9.8004081015719464, 52.871296856106412, 148.48972188633616, 227.4387091898428, 215.13769649334947, 131.26783555018139, 48.287636033857318, 14.971735187424425, 9.8164298669891181, 16.087892986698911, 44.471584038694076, 106.83819528415961, 180.23662333736397, 222.55864570737606, 223.9402206771463, 208.59824667472793, 210.71372430471584, 224.74130894800484, 217.64691656590085, 155.76685308343409, 75.517382103990329, 24.315220677146311, 4.0600816203143895, 0.0052146311970979447, 0.0),
(0.0, 0.13387053451517594, 0.64297609454740801, 3.1790491539081387, 20.905882352941177, 59.352242814934193, 127.0262691377921, 190.18474348643568, 208.84378189632017, 194.54144507117917, 181.68611334944936, 188.79666935267258, 207.90566747246845, 224.95621810367982, 234.8299758259468, 237.69105560032233, 221.22965350523771, 164.75073865162503, 89.718184260005373, 34.976846629062585, 13.037818963201719, 7.3059897931775453, 5.4665592264302978, 5.3451517593338709, 4.6793983346763364, 2.3383293043244695, 0.37190437818963201, 0.0068761751275852809, 0.0, 0.11437013161428955, 0.9859790491539081, 3.6278270212194466, 21.229492344883159, 61.166424926134837, 130.49170024174055, 201.63142626913779, 231.24501745903842, 230.32172978780554, 226.87128659683052, 229.34955680902499, 228.68745635240398, 226.01611603545527, 224.13951114692452, 210.40220252484556, 166.16175127585279, 94.702981466559223, 36.519634703196346, 11.741821112006447, 5.9394037066881546, 4.6139672307279076, 3.9529948965887725, 4.4658071447757184, 4.1315605694332529, 1.6970185334407735, 0.16782164920762827, 0.0),
(0.0, 0.025091369131290415, 0.49156592634242341, 4.0638881079561431, 7.7974416643238689, 6.8373629463030641, 6.5796317121169521, 21.373770030924938, 73.83736294630306, 159.81859713241496, 220.72884453190892, 217.1232780432949, 165.97357323587292, 103.1642535844813, 57.022069159403991, 29.267149283103738, 16.915659263424235, 23.584832724205793, 63.893519820073095, 150.84980320494799, 223.9063817824009, 206.51996064098961, 108.80179926904695, 30.214014619061007, 8.2582935057632838, 1.8727157717177396, 0.4793365195389373, 0.10619904413831881, 0.0, 0.0073095305032330618, 0.41861118920438573, 4.07808546527973, 7.806438009558617, 7.4143238684284514, 10.454526286196232, 45.067332021366319, 125.18330053415799, 203.30805454034299, 226.94595164464437, 188.52593477649705, 122.52270171492832, 68.774739949395553, 38.85261456283385, 22.729125667697499, 18.253233061568739, 36.521436603879671, 90.088417205510268, 176.10402024177677, 229.81009277481024, 193.59066629181896, 88.585886983412991, 20.181402867585042, 4.5281135788585889, 0.65701433792521791, 0.10619904413831881, 0.0078015181332583639),
(0.0, 0.0, 0.0, 0.039261805681201377, 20.74787407273385, 149.47367468789579, 237.34774742174778, 220.69169531391353, 118.0753573367107, 34.151076533381584, 21.711597611724262, 61.522344852542069, 152.76650986068393, 224.50832277908449, 233.20218925275918, 192.39026596707075, 118.1461009589289, 61.898136421204995, 34.719829925818708, 31.492220010855799, 77.773204269947527, 182.55717387371087, 240.93803148181652, 204.53681925094989, 64.047584584765701, 2.4395693866473676, 0.0, 0.0, 0.0, 0.0, 0.0, 0.10213497376515289, 26.128007960919124, 166.44988239551293, 241.42663289307038, 212.76877148543514, 98.775918219649, 25.173149990953501, 29.001628369820878, 83.413877329473493, 173.21964899583861, 228.79799167722092, 232.54695132983537, 192.82196489958386, 122.20128460285869, 68.64619142391895, 44.717115976117242, 51.403111995657682, 111.64863397865027, 206.3397865026235, 241.91921476388637, 182.99737651528858, 49.604848923466619, 2.4886014112538448, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.60773700549231868, 8.3835867229165011, 27.669028098384143, 61.646183236488099, 115.5645148451803, 175.34553848603042, 217.3344742497811, 228.48515481970867, 209.5400780068455, 155.74647775212927, 81.497731433574785, 25.827827748149328, 9.7481493273899549, 6.835548833877259, 5.8391307808644433, 6.8256785799570165, 16.942370452917295, 68.324524397038928, 175.24850752208869, 234.33511104035659, 182.71567300803949, 50.001114383507122, 5.6283531003741141, 0.87701982010666246, 0.046246915545649928, 0.0, 0.0, 0.0, 0.56053490408341955, 8.6715752606861418, 35.085170739473057, 92.033351906391786, 166.99012974607976, 216.48618960439384, 223.1662819390273, 201.95048953275491, 161.6557350951206, 108.91275969115657, 54.194698718458966, 14.68964419326594, 5.2520098702539206, 4.8740746636949774, 4.4778317280904245, 5.9152272546366316, 19.714081031600731, 79.864761601528301, 184.65716787391545, 236.19071877736209, 175.53585926928281, 44.630104274456741, 2.3238080076414871, 0.087160710021491686, 0.014248189126800924),
(0.0, 0.0, 0.0, 0.85167359439947499, 1.9561729745496974, 1.6340698607161088, 2.115510829140232, 3.6828556843870781, 9.5029534018814257, 35.888281193028511, 96.107635090789756, 169.05746372055714, 217.52483045285496, 221.0625683657843, 185.21585356960549, 123.76336323196966, 66.373514183621381, 30.742361263035075, 18.557864799824983, 28.124845037555605, 85.229052723692845, 175.91278349011887, 217.88995843360314, 158.70159702472108, 57.325822212499091, 18.103988915627507, 2.9413695033909431, 0.059797272660978631, 0.0, 0.0, 0.0, 0.80799241595566251, 2.4414788886458108, 2.8429227740100633, 4.3851819441405961, 11.295048494129658, 41.608036170057609, 105.9509954058193, 175.11784438124408, 216.07314227375483, 219.94610953110188, 198.67913658572158, 156.05607817399547, 94.489535477284335, 42.080142930066359, 16.333333333333332, 13.210238459855612, 32.795814190913731, 102.24115802523153, 196.17844381244075, 229.63253846714795, 159.83883905782835, 52.800772989134401, 11.876102967986583, 1.3044556260482754, 0.037482680667979287),
(0.0, 0.0, 0.0012428977272727273, 0.43039772727272729, 2.8014914772727271, 12.236150568181818, 42.892311789772727, 105.45303622159091, 181.31125710227272, 225.53657670454547, 214.14390980113637, 149.94176136363637, 73.1796875, 28.796431107954547, 32.3447265625, 81.642489346590907, 148.2548828125, 190.18235085227272, 198.82652698863637, 204.94060724431819, 221.75417258522728, 222.08877840909091, 170.00372869318181, 96.547762784090907, 36.246981534090907, 10.552468039772727, 3.3106356534090908, 0.63272372159090906, 0.0, 0.0, 0.080699573863636367, 0.59312855113636365, 3.6375177556818183, 17.176580255681817, 59.758167613636367, 137.93226207386363, 208.74405184659091, 231.06667258522728, 195.07901278409091, 113.94264914772727, 51.487837357954547, 28.136541193181817, 45.8759765625, 113.62553267045455, 191.89923650568181, 224.79607599431819, 221.50967684659091, 218.64018110795453, 217.76509232954547, 189.12606534090909, 119.83575994318181, 56.670010653409093, 17.08895596590909, 2.265625, 0.58629261363636365, 0.18394886363636365),
(0.0, 0.0, 0.43093922651933703, 26.618892861011808, 151.06023182753765, 224.17343733073341, 169.99447513812154, 66.106164012566353, 18.329650092081032, 8.673816487921135, 7.4348391290217748, 8.6753331166720837, 32.244935543278082, 114.42736431589211, 194.89546094680966, 196.44545553027842, 123.06337341566461, 54.833170837395734, 23.500270826562669, 31.680099664175064, 103.40320658650201, 194.54598635034125, 197.88993608493121, 84.879103022424445, 1.7166070848228794, 0.0, 0.0, 0.0, 0.0, 0.0, 1.807604809879753, 37.092406023182754, 169.82212111363881, 223.13530495070955, 147.96652583685409, 40.818329541761457, 7.6655833604159893, 6.1182970425739356, 7.3763405914852127, 14.761239302350775, 54.133571660708483, 139.46690499404181, 209.10627234319142, 197.57761889286101, 111.07810638067382, 42.688657783555414, 21.047990466904995, 43.919727006824829, 121.27418481204637, 197.94312642183945, 183.09543928068464, 73.42476438089048, 1.3528328458455205, 0.0, 0.0, 0.0),
(0.00061791967044284239, 1.5728801922416753, 36.523377960865091, 141.64991417782355, 229.83501544799176, 216.10882251973911, 131.18688637143839, 52.329488499828358, 18.832337796086509, 7.8177823549605217, 4.7153450051493309, 4.8602128389975974, 13.45849639546859, 45.267353244078272, 104.39917610710607, 160.64909028492963, 184.81853759011329, 186.10175077239958, 195.90744936491589, 206.29982835564709, 180.33807071747339, 108.56491589426707, 28.395193958118778, 1.4736697562650189, 0.0, 0.0, 0.0, 0.0, 0.0079642979745966364, 2.8279437006522485, 52.541709577754894, 158.29220734637829, 221.19649845520081, 178.58633710950909, 82.196223824236185, 18.532715413662892, 4.4473738414006183, 2.748094747682801, 3.4742190181943013, 7.3273601098523855, 23.654720219704771, 64.911294198420876, 126.89543426021284, 176.31136285616202, 191.96615173360797, 188.84270511500171, 193.47614143494678, 192.10401647785787, 157.29893580501201, 88.71644352900789, 22.262753175420528, 1.4584277377274288, 0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0, 0.2486336858542125, 11.406132443750444, 66.15331109376109, 142.03172687912556, 187.21314500674285, 192.44978351905743, 182.77883455177798, 180.56100503939243, 190.44800908510186, 201.8378877138193, 204.55979842430264, 203.49279579814041, 206.61090212222302, 212.58229824685924, 214.51657321314499, 211.63510540137696, 210.42217332670879, 217.49272482078217, 229.65100432961884, 225.53396266590957, 176.69422954077649, 85.553836326211936, 25.280928383845552, 1.9095748456242458, 0.0, 0.0, 0.0, 0.0, 0.80424444602171907, 16.964937185037972, 76.781034849882886, 154.12740435800978, 198.78359003477891, 205.64937185037974, 199.23287671232876, 200.89417275888991, 211.80005678188658, 223.30136986301369, 225.72340123500604, 224.56938036766272, 226.56150188089998, 227.63595712967563, 220.83291929874369, 206.79515934416921, 198.68599616722267, 201.99510256228263, 212.70423734828589, 207.90524522677265, 159.88331322308184, 70.296543402654549, 19.164312584285614, 1.3272056214067713),
(0.0, 0.0, 0.049518470605145894, 0.15581428776771597, 0.75334195774040535, 4.5756073019979873, 25.042834555124337, 113.37789276987208, 209.85022279718268, 214.36617795026592, 139.78654592496767, 71.16415121460399, 45.124982032485264, 81.435173206842023, 179.9754923099037, 228.39866321690383, 156.75937904269082, 59.64237458674716, 18.737386804657181, 6.3602127353744429, 4.8012074169900822, 6.0038809831824063, 6.9439413540319102, 8.2206410809256862, 8.8608595659048444, 5.5426189449475345, 1.3344832542762686, 0.022710938622969672, 0.0, 0.0, 0.026304441569642088, 0.13626563173781803, 1.0062526951272099, 6.8132815868909011, 35.236596234008914, 137.33922667816589, 212.25204829667959, 173.60758947822336, 73.933951415840156, 22.674931723444015, 31.979804513439699, 86.693042978295239, 183.93876670978869, 219.41224665804225, 132.70303291648699, 33.020267356619236, 5.6438119879258304, 4.7874083656748603, 6.6782377461549522, 9.9606152077044712, 14.50898375736668, 20.070504527813714, 22.92978295242202, 16.848210435532557, 5.9326577547793589, 0.067557855397441427),
(0.0, 0.0, 0.0, 0.59366875669062547, 2.5719528979966357, 7.2177703012693071, 11.192995870928277, 11.09068664933476, 9.1574399755314264, 8.3161798440128454, 10.295458021104144, 18.795305092521794, 44.28437069888362, 93.163174797369635, 151.08235204159658, 191.19291940663709, 207.32229698730694, 212.50756996482642, 216.98203089157363, 223.54870775347914, 231.71088851506346, 236.98042514145894, 225.50175867869706, 165.1871845847989, 67.807998164857011, 16.075852576846614, 2.2670897690778404, 0.16699801192842942, 0.0, 0.0, 0.0, 0.19888362134883009, 3.5890808992200642, 9.1163021868787268, 13.583728398837742, 15.004129071723504, 17.090916042208288, 23.775653769689555, 41.485242391803027, 76.434087781006269, 126.23772748126625, 179.75447316103379, 221.9779782841413, 237.52278635877045, 235.44693378192383, 225.94815721058265, 213.16615690472548, 205.24139776724269, 210.92139470867104, 219.70438905031349, 212.66317479736963, 154.53027985930569, 57.031426823673343, 12.169980119284293, 1.8649640617831473, 0.12104297293164093),
(0.017536025979297747, 0.2741627765374467, 2.011812461944388, 4.8636898721331443, 6.4961639943170288, 6.8503348893850209, 8.0168459508828906, 7.5926527298558959, 7.2203775116703879, 8.1227115892023551, 7.3110614978688861, 5.937730870712401, 4.4335701238075904, 3.9024152628374265, 6.0135173533590418, 24.856180231378122, 90.564278465597724, 156.79354576821595, 154.93720316622691, 95.127623300182663, 32.799106961639943, 8.9473107367566467, 4.7901359853866454, 4.7137812056017863, 3.2733103308301197, 0.5513700020296326, 0.082606048305256752, 0.0, 0.0091333468642175765, 0.14243961842906433, 1.2474528110412015, 3.0297138217982544, 3.8136391313172315, 3.6537446722143292, 4.682930789527096, 4.1398822813070835, 3.790460726608484, 4.8452202151410591, 4.8365739801096002, 4.237142277247818, 4.1622488329612342, 7.1402882078343817, 16.196021920032475, 44.142196062512689, 115.37467018469657, 178.09376902780596, 167.65601786076721, 101.51743454434747, 36.300426222853666, 9.8000000000000007, 6.2095798660442458, 6.3337121980921456, 4.1048102293484883, 0.8332859752384818, 0.10168459508828902, 0.0),
(0.0, 0.0, 0.0, 4.3776686925354529, 116.9266791335515, 215.49571450833722, 174.94358734611188, 72.182484026803806, 19.37260402057036, 18.628720585943586, 65.538101916783546, 158.93595137914915, 218.30356864578462, 181.1052672588437, 85.057659342371821, 22.064983637213651, 6.9821567710768271, 5.1600436340969305, 5.2254947794919744, 7.529764687548699, 42.903303724481844, 153.80247779336139, 231.86753934860528, 168.16316035530622, 13.693548387096774, 0.03171263830450366, 0.0, 0.0, 0.0, 0.0, 0.0, 4.5214274583138536, 122.44888577216769, 219.27388187626616, 173.71817048465016, 69.715521271622251, 24.283154121863799, 39.316425120772948, 98.316347202742719, 182.36029297179368, 223.44834034595604, 180.63620071684588, 84.044880785413739, 21.938522674146796, 7.3692535452703751, 4.9696119682094437, 4.6988468131525636, 9.4128097241701738, 58.096930029608849, 168.88062957768429, 230.3417484805984, 151.73835125448028, 9.5903849150693468, 0.001714196665108306, 0.0, 0.0),
(0.0073943661971830983, 0.64043427230046945, 7.2782863849765258, 12.015551643192488, 11.794072769953052, 9.436326291079812, 6.9514671361502351, 6.5227112676056338, 6.7653169014084504, 6.1940140845070424, 4.7075117370892015, 5.1042840375586858, 17.407159624413147, 64.320892018779347, 144.45510563380282, 204.36291079812207, 217.45346244131454, 224.83409624413144, 203.32828638497654, 132.94113849765259, 50.876408450704226, 11.748767605633804, 3.9881455399061032, 2.7182511737089201, 1.7245305164319249, 0.42212441314553989, 0.073004694835680756, 0.0, 0.028169014084507043, 0.41109154929577463, 3.3873826291079814, 5.5051643192488262, 5.9095657276995306, 5.6958920187793431, 4.4079812206572768, 3.7181924882629107, 3.4866197183098593, 3.3557511737089203, 3.4576877934272301, 6.9130281690140842, 28.525293427230046, 92.745187793427235, 175.17224178403757, 222.93086854460094, 227.37394366197182, 215.47963615023474, 153.61901408450703, 69.193309859154922, 15.777699530516433, 2.7780516431924882, 1.5072769953051643, 1.7411971830985915, 1.324530516431925, 0.3606220657276995, 0.047535211267605633, 0.0),
(0.0, 0.040891555936562365, 1.3885983711958851, 14.003171881697385, 72.057779682811827, 164.56510930132876, 230.2986712387484, 201.42983283326188, 111.00797256750964, 39.366223746249467, 17.425203600514358, 33.383026146592371, 91.110244320617227, 169.237205315045, 211.29507072438921, 207.39545649378482, 195.72001714530649, 203.56870981568795, 227.12207458208314, 222.49361337333906, 169.43840548649808, 93.466438062580366, 34.847835405057864, 11.752164594942135, 6.2824689241320186, 1.6990998714102015, 0.0, 0.0, 0.0, 0.096870981568795547, 3.3156450921560223, 19.350964423489071, 77.655979425632239, 170.18525503643377, 233.19417059579939, 195.74462066009431, 105.29712816116589, 50.912816116588083, 49.654779254179168, 82.427346763823408, 144.57993999142735, 209.14204886412344, 232.19982854693527, 219.95662237462494, 205.26069438491214, 208.56408058294042, 213.34033433347622, 178.46043720531503, 111.96433776253751, 47.708786969567079, 13.809515645092157, 5.5053579082726101, 3.6100300042863265, 0.41208744106300899, 0.0, 0.0),
(0.0, 0.0, 0.017241379310344827, 0.40782169890664421, 2.0997476871320435, 13.076955424726661, 72.209671993271655, 181.91034482758621, 225.07687132043733, 156.07947855340623, 65.336417157275022, 22.73431455004205, 9.9101766190075686, 7.7025231286795623, 16.277964676198486, 78.39705634987385, 188.33288477712364, 223.18082422203531, 148.72481076534902, 61.53305298570227, 15.998906644238856, 7.0020185029436499, 6.5268292682926825, 7.3492010092514715, 7.431034482758621, 4.1803195962994115, 0.65811606391925992, 0.0053826745164003362, 0.0, 0.0, 0.035912531539108491, 0.4372582001682086, 2.3820016820857863, 14.75870479394449, 83.248191757779651, 190.6793944491169, 204.16509671993271, 111.71825063078217, 28.728511354079057, 7.7835155592935239, 6.3519764507989906, 9.5624053826745161, 27.501766190075696, 98.904121110176618, 196.40866274179984, 205.23397813288477, 110.71169049621531, 27.640117746005046, 7.9635828427249793, 9.4912531539108489, 12.277544154751892, 16.154415475189236, 16.107064760302777, 10.436164844407065, 2.2291841883936079, 0.015054667788057191),
(0.0, 0.0, 0.0, 0.0, 0.51274145279767847, 24.82570055318763, 118.01868141833681, 209.99337988573501, 211.04797315679696, 135.98449260905051, 63.757504307608599, 30.031558900879659, 27.438831957921465, 72.475197243130495, 162.34197877935975, 217.46948399383331, 192.24149814092681, 113.5505577219552, 51.157159698920829, 23.773465131041988, 23.548018500045345, 62.7752788609776, 151.09113992926453, 211.05885553641062, 172.13584837217738, 57.774371996009798, 5.462138387594087, 0.0, 0.0, 0.0, 0.0, 0.0, 0.66763398929899342, 32.661195248027568, 143.14310329191983, 222.76512197333818, 195.01405640700099, 101.15471116350776, 36.623378978870043, 17.685861975151902, 27.343067017321122, 83.81191620567698, 173.97224993198512, 224.35540038088328, 199.80375442096673, 127.3894985036728, 73.015688763943047, 58.244309422327014, 73.928811100027204, 123.83368096490433, 201.02457604062755, 232.55618028475561, 170.28357667543304, 58.04298539947402, 6.0381790151446451, 0.0),
(0.0, 0.19434025172959907, 2.5505126281570392, 5.0358839709927485, 6.8687171792948236, 8.452613153288322, 9.5339668250395935, 9.9661165291322824, 9.7064266066516627, 10.907726931732933, 14.616154038509627, 21.712928232058015, 38.468492123030757, 80.133533383345835, 157.62065516379096, 218.82987413520047, 182.50533466700008, 86.319621572059688, 28.886596649162289, 10.252646494957073, 4.1511627906976747, 2.5499291489539053, 2.1374927065099607, 2.9443610902725683, 2.7347253480036677, 1.1916729182295573, 0.21426189880803534, 0.015128782195548886, 0.0, 0.084396099024756185, 1.2809869133950154, 2.6208218721347003, 3.2448528798866385, 3.2134283570892723, 2.788613820121697, 2.5351337834458616, 1.9024339418187881, 2.1504126031507877, 2.7127198466283238, 3.7503959323164122, 13.657539384846212, 47.731516212386431, 108.78415437192632, 151.72768192048011, 111.07835292156372, 31.422730682670668, 2.7868633825122946, 2.3232058014503627, 2.8023255813953489, 3.2992831541218637, 4.3041176960906897, 5.7939068100358426, 4.7237225973159953, 1.7549804117696091, 0.28311244477786113, 0.0088355422188880553),
(0.0, 0.80314521525763993, 4.8637073388356011, 21.386013830024538, 94.005353557885343, 175.0616774481374, 210.26366272585324, 206.64108855677003, 199.45884452375643, 210.52241802364489, 227.8816640642427, 229.06981931742138, 209.22016506803479, 187.79399955387018, 175.76455498550078, 178.61911666294893, 197.4138969440107, 219.13941556993086, 214.18157483827792, 169.90999330805263, 99.571492304260545, 40.519629712246264, 17.739237118001338, 10.913450814186929, 4.2791657372295342, 0.5185143876868169, 0.019072049966540262, 0.0, 0.0, 1.1951817979031898, 8.798237787196074, 25.792549631942894, 91.44579522641088, 174.75741690832032, 218.1477805041267, 220.57060004461297, 212.99966540263216, 207.87731429846085, 191.27570823109525, 156.54784742360027, 116.63283515503011, 86.850100379210346, 77.713919250501903, 90.85333482043275, 123.29589560562124, 157.53033682801694, 162.79210350211912, 124.73968324782511, 65.344300691501232, 20.453379433415122, 5.878764220388133, 3.9884006245817534, 2.0198527771581531, 0.27370064688824447, 0.0, 0.0),
(0.0, 0.059018201875344733, 1.0813568670711529, 15.504044861187719, 74.75684868542011, 157.68716675859534, 208.18744254458539, 210.55800698657842, 199.57538150395294, 206.08282772568487, 225.18413311270453, 232.25381503952931, 206.84822577679719, 148.6138996138996, 78.871667585953304, 32.360176503033642, 19.944934730649017, 34.042103327817614, 84.683581540724404, 165.40917448060304, 227.04072439786725, 220.11831218974075, 149.22890237175952, 53.261445118587979, 6.8878470307041733, 0.29766501195072625, 0.0, 0.0, 0.0, 0.025923883066740209, 1.6802721088435375, 13.388858246001103, 68.348593491450629, 157.63191763191764, 221.00156278727707, 231.47085861371576, 222.0894465894466, 216.00597536311821, 211.99126677698106, 196.26539805111233, 160.96442360728074, 108.60700496414782, 54.125114910829197, 21.825519396947968, 16.402187902187901, 36.163081448795737, 91.665563522706378, 174.40770362198933, 227.16105901820188, 209.17503217503219, 130.05809891524177, 42.885089170803454, 5.4461298032726608, 0.13789299503585217, 0.0, 0.0),
(0.0, 0.033208636870775084, 0.32591717529697445, 1.6798582398109865, 7.2599593095753754, 28.426593161383476, 87.683533503970594, 173.47568418980114, 224.58856730327491, 197.67067007941196, 117.06983001903262, 48.528909890398374, 18.017457504758156, 9.0978539082496557, 5.3385837107042065, 6.3668044890726518, 23.034586860930631, 79.419570781649938, 172.30353744175363, 231.42948086893745, 214.89715823324801, 132.95346853055062, 43.706503904968173, 10.67191704403754, 3.8849511058607336, 1.5832512961869134, 0.55358666404147794, 0.050075474174706307, 0.0, 0.070551945921113079, 0.41976767080133887, 2.0494848067204829, 8.4233116755266781, 32.836319485463015, 97.862637002034518, 180.86867493601102, 218.73026186257138, 173.79339765045611, 84.419308262781385, 27.471615147338714, 11.548533175822012, 8.6699481525234621, 7.0231672901489794, 13.54308590929973, 51.149373236201349, 128.17713460654986, 204.83303799960623, 228.16387740368839, 177.15738006169192, 79.562971713591921, 15.730983789459868, 4.2015488613244081, 2.7634048697250115, 1.1045481393975192, 0.232132309509746, 0.020476471746406773),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
))
def encode_col_k_means(col):
return np.argmin(((col-centers)**2).sum(axis=1))
# Parameter of HMM
# Emission probability is come from statistic_model.py
N = 41
M = 60
A = np.full((N, N), 0, dtype=np.float64)
A[N-1] = np.full((N), 0, dtype=np.float64)
A[N-1][N-1] = 0.95
for i in range(10):
A[N-1, i] = 0.005
A[i, i] = 6/7
A[i, 10+i] = 1/7
A[10+i, 10+i] = 6/7
A[10+i, 20+i] = 1/7
A[20+i, 20+i] = 6/7
A[20+i, 30+i] = 1/7
A[30+i, 30+i] = 6/7
A[30+i, N-1] = 1/7
B = (
(0.0334326989223, 0.000366888328365, 0.00197202476496, 0.0374684705343, 0.00476954826875, 0.016051364366, 0.000137583123137, 0.00302682870901, 0.00293510662692, 0.0011006649851, 0.00137583123137, 0.152029351066, 0.00073377665673, 0.00298096766797, 9.17220820913e-05, 0.0016051364366, 0.00201788580601, 0.000550332492548, 0.00366888328365, 0.00114652602614, 0.000642054574639, 0.0319192845678, 0.000183444164183, 0.000917220820913, 0.119376289842, 0.00706260032103, 0.0258197661087, 0.0184819995414, 0.000183444164183, 0.0968585186884, 0.00779637697776, 0.000458610410456, 0.0160055033249, 0.0102728731942, 0.000229305205228, 0.00243063517542, 0.043246961706, 0.000596193533593, 0.00609951845907, 0.0318734235267, 4.58610410456e-05, 0.0132079798211, 0.0529236413667, 0.00518229763816, 4.58610410456e-05, 4.58610410456e-05, 0.0106856225636, 0.000366888328365, 0.149461132768, 0.0254987388214, 0.0, 0.0228846594818, 0.000871359779867, 0.00123824810823, 4.58610410456e-05, 0.00105480394405, 0.0105480394405, 0.0073377665673, 0.00738362760835, 0.00325613391424),
(0.000990277277638, 0.000270075621174, 0.000180050414116, 0.00126035289881, 0.000180050414116, 0.000810226863522, 0.0238566798704, 0.00630176449406, 0.0, 0.0, 0.00639178970112, 0.00414115952467, 0.00135037810587, 0.0379906373785, 9.0025207058e-05, 0.000990277277638, 0.00126035289881, 0.0188152682751, 0.369823550594, 0.0017104789341, 9.0025207058e-05, 0.0443824270796, 9.0025207058e-05, 0.010893050054, 0.00459128555996, 0.00225063017645, 0.0244868563198, 0.00909254591286, 0.0010803024847, 0.0129636298163, 0.000270075621174, 0.00639178970112, 0.0147641339575, 0.000630176449406, 0.000540151242348, 0.000540151242348, 0.298793662225, 0.000990277277638, 0.00855239467051, 0.00198055455528, 9.0025207058e-05, 0.000630176449406, 0.0017104789341, 0.000630176449406, 0.0, 9.0025207058e-05, 0.00261073100468, 0.0, 0.0328592005762, 0.00360100828232, 9.0025207058e-05, 0.00144040331293, 0.00189052934822, 0.0, 0.000180050414116, 0.00162045372704, 0.00090025207058, 0.00153042851999, 0.000720201656464, 0.0296182931221),
(0.00139778158535, 0.000631256199838, 0.0286319776355, 0.000405807557039, 0.0050500495987, 0.0117233294256, 0.0376048336189, 0.00856704842637, 4.50897285598e-05, 0.0, 0.0348543601768, 0.000586166471278, 0.0101902786545, 0.0124898548111, 0.00198394805663, 0.000856704842637, 0.0484714582018, 0.0045991523131, 0.0554603661286, 0.0131211110109, 0.00834159978357, 0.116331499684, 0.00225448642799, 0.0413472810894, 0.0102353683831, 0.0258364144648, 0.00184867887095, 0.000721435656957, 0.00166831995671, 0.157047524574, 0.000360717828479, 0.000450897285598, 0.0170439173956, 0.00257011452791, 0.000946884299757, 4.50897285598e-05, 0.112679231671, 0.0276400036072, 0.00013526918568, 0.00784561276941, 0.000495987014158, 0.00455406258454, 0.00144287131391, 0.0132563801966, 0.00473442149878, 0.0229957615655, 0.000180358914239, 0.00157814049959, 0.0052754982415, 0.060645684913, 9.01794571197e-05, 0.0260618631076, 0.00432861394174, 0.00676345928398, 0.000991974028316, 0.00559112634142, 0.000180358914239, 0.00198394805663, 0.0252502479935, 0.00157814049959),
(0.00019399582909, 0.00921480188176, 0.00315243222271, 0.0, 0.00145496871817, 0.00465589989815, 0.0549978175469, 0.00717784567632, 0.0, 0.0125612299336, 0.052524370726, 0.000339492700907, 0.0521848780251, 0.0674620495659, 0.00174596246181, 0.00174596246181, 0.0269654202435, 0.00446190406906, 0.230855036617, 0.0140161986517, 0.000290993743635, 0.139773994859, 0.0064988602745, 0.0927785052621, 0.000387991658179, 0.0226005140889, 0.00872981230904, 4.84989572724e-05, 0.00354042388089, 0.0195450797808, 0.000242494786362, 0.0084388185654, 0.0084388185654, 0.00126097288908, 0.00514088947088, 0.00150346767544, 0.0401571366216, 0.0014064697609, 9.69979145448e-05, 0.00310393326543, 0.00484989572724, 0.0064988602745, 0.00305543430816, 0.000339492700907, 0.00678985401814, 0.000872981230904, 4.84989572724e-05, 0.0056258790436, 0.000678985401814, 0.021727532858, 0.00906930500994, 0.0019399582909, 0.000339492700907, 0.00290993743635, 0.00872981230904, 0.00465589989815, 4.84989572724e-05, 0.000775983316359, 0.00523788738542, 0.00611086861632),
(0.129043834914, 0.00143552935145, 0.00046142014868, 0.015175596001, 0.0207639066906, 0.00343501666239, 0.00143552935145, 0.0312740322994, 0.000102537810818, 0.00143552935145, 0.00558831068957, 0.00215329402717, 5.12689054089e-05, 0.0169700076903, 0.000205075621635, 0.0492181491925, 5.12689054089e-05, 0.00974109202769, 0.00492181491925, 0.00338374775699, 0.000205075621635, 0.00246090745963, 0.00384516790567, 0.00210202512176, 0.00681876441938, 0.0, 0.14729556524, 0.000666495770315, 0.000205075621635, 0.00435785695975, 0.152524993591, 0.00671622660856, 0.0105101256088, 0.00251217636503, 0.0, 0.00389643681107, 0.000153806716227, 5.12689054089e-05, 0.0748526018969, 0.000769033581133, 0.0, 0.000358882337862, 0.0186618815688, 0.000666495770315, 0.0, 5.12689054089e-05, 0.00199948731095, 0.022199436042, 0.000974109202769, 0.050910023071, 0.0, 0.0765957446809, 0.000717764675724, 0.00451166367598, 0.000512689054089, 0.094796206101, 0.00738272237888, 0.0, 0.000410151243271, 0.00246090745963),
(0.0113142741691, 0.00676836044045, 0.000656631983029, 0.00282856854228, 0.00702091120315, 0.0402060814224, 0.00146479442368, 0.0367713910496, 0.000151530457622, 0.00555611677947, 0.00798060410142, 0.00570764723709, 0.00242448732195, 0.0321749671684, 0.000202040610163, 0.0295484392363, 0.00121224366098, 0.0109101929488, 0.157440145469, 0.00550560662693, 0.000353571067785, 0.173249823214, 0.00106071320335, 0.00368724113547, 0.00308111930498, 0.00353571067785, 0.0493484190322, 0.00222244671179, 0.00429336296596, 0.0164157995757, 0.0357611879988, 0.00661682998283, 0.0197999797959, 0.0126275381352, 0.00191938579655, 0.039700979897, 0.0191433478129, 0.0, 0.0141933528639, 0.0216688554399, 0.00171734518638, 0.0227800787958, 0.0784422668956, 0.00126275381352, 0.000151530457622, 0.0, 0.00191938579655, 0.00666734013537, 0.00303060915244, 0.0512678048288, 0.00181836549146, 0.00479846449136, 0.000202040610163, 0.000959692898273, 0.00207091625417, 0.00793009394888, 0.00151530457622, 0.0143448833215, 0.00237397716941, 0.00222244671179),
(0.0630477651183, 0.0, 0.00695661700263, 0.0747151621385, 0.00958588957055, 0.0134750219106, 0.0012050832603, 0.00284837861525, 0.00032865907099, 0.0, 0.00109553023663, 0.106430762489, 0.0, 0.00832602979842, 0.0, 0.00405346187555, 0.00175284837862, 0.00317703768624, 0.000985977212971, 0.000657318141981, 0.00109553023663, 0.0244851007888, 0.00213628396144, 0.000438212094654, 0.175284837862, 0.00109553023663, 0.0708808063103, 0.012379491674, 0.000273882559159, 0.0829864154251, 0.0197743207713, 0.000821647677476, 0.00257449605609, 0.0141871165644, 0.000109553023663, 5.47765118317e-05, 0.00361524978089, 0.000602541630149, 0.0362072743208, 0.0139132340053, 0.0, 0.000164329535495, 0.0024101665206, 0.00394390885188, 0.0, 0.00876424189308, 5.47765118317e-05, 0.000876424189308, 0.0209794040316, 0.0689088518843, 0.0, 0.0487510955302, 0.00153374233129, 0.00301270815074, 0.0, 0.00262927256792, 0.0565293602103, 0.00323181419807, 0.00766871165644, 0.0089833479404),
(0.00713831711729, 0.0902557082091, 9.77851659903e-05, 0.00180902557082, 0.00425365472058, 9.77851659903e-05, 0.00410697697159, 0.140615068694, 0.0, 0.00136899232386, 0.204126534005, 0.000146677748985, 0.00664939128734, 0.0712364934239, 0.0116364347528, 0.0432210433677, 0.000293355497971, 0.0090940204371, 0.0227350510927, 0.156945191414, 9.77851659903e-05, 0.000733388744927, 0.00132009974087, 0.0696719307681, 0.000293355497971, 0.000880066493913, 0.050848286315, 0.0, 0.000195570331981, 0.000146677748985, 0.0332469564367, 0.0010267442429, 0.00136899232386, 0.000146677748985, 0.0, 0.000537818412947, 0.00156456265585, 0.000537818412947, 0.00474258055053, 0.000537818412947, 0.000146677748985, 0.00342248080966, 0.00836063169217, 9.77851659903e-05, 4.88925829952e-05, 0.0, 9.77851659903e-05, 0.00386251405662, 0.000195570331981, 0.00581821737642, 0.0, 0.00293355497971, 4.88925829952e-05, 0.00821395394319, 0.00180902557082, 0.010022979514, 4.88925829952e-05, 0.0, 0.00185791815382, 0.00928959076908),
(0.00238650827323, 0.00206830717013, 0.00121977089521, 0.00625795502758, 0.00625795502758, 0.00270470937633, 0.00312897751379, 0.0511243105643, 0.00127280441239, 0.00111370386084, 0.0378659312686, 0.0141599490878, 0.000636402206194, 0.0112431056428, 0.0152736529487, 0.0409418752652, 0.00121977089521, 0.0314488756894, 0.100286380993, 0.0248727195588, 0.00376537971998, 0.0622083156555, 0.000477301654646, 0.0114022061943, 0.00243954179041, 0.00747772592278, 0.0120386084005, 0.0149554518456, 0.00800806109461, 0.0299109036911, 0.0134174798473, 0.00684132371659, 0.0174480271532, 0.009280865507, 0.00106067034366, 0.00954603309291, 0.182912600764, 0.00668222316504, 0.00334111158252, 0.0348430207891, 0.00243954179041, 0.0148493848112, 0.0171828595672, 0.0441769198133, 0.000424268137463, 0.0, 0.0456088247773, 0.00477301654646, 0.0183495969453, 0.019145099703, 0.000424268137463, 0.00164403903267, 0.00201527365295, 0.0015379719983, 0.00471998302927, 0.00074246924056, 0.00137887144675, 0.0140008485363, 0.00975816716165, 0.00328807806534),
(0.090278594099, 0.0279769601505, 0.0, 0.00728811566945, 0.0521923122135, 0.000587751263665, 5.87751263665e-05, 0.103973198542, 0.0, 0.00129305278006, 0.00440813447749, 0.00111672740096, 5.87751263665e-05, 0.0287410367932, 0.000999177148231, 0.0707652521453, 0.0, 0.00199835429646, 0.0168096861408, 0.00969789585048, 0.000117550252733, 0.000705301516398, 0.0010579522746, 0.000470201010932, 0.00358528270836, 5.87751263665e-05, 0.185082872928, 0.0, 0.000470201010932, 0.000940402021864, 0.253438344892, 0.00364405783472, 0.00293875631833, 0.00487833548842, 5.87751263665e-05, 0.00705301516398, 0.000411425884566, 5.87751263665e-05, 0.0136946044434, 0.0010579522746, 5.87751263665e-05, 0.00240978018103, 0.0296226636887, 5.87751263665e-05, 0.0, 0.0, 0.000705301516398, 0.0313859174797, 0.000293875631833, 0.00387915834019, 0.0, 0.0106970729987, 0.0, 0.00376160808746, 0.000528976137299, 0.00670036440578, 0.000764076642765, 0.000235100505466, 0.000470201010932, 0.0104619724932),
(0.000921942224954, 0.00964197910264, 0.00192071296865, 0.00192071296865, 0.00126767055931, 0.0130608481868, 0.00460971112477, 0.00668408113092, 0.00119084204057, 0.00230485556238, 0.00822065150584, 0.0220113706208, 0.156153964352, 0.000691456668715, 0.000115242778119, 0.00207437000615, 0.0444452980947, 0.00418715427167, 0.0164797172711, 0.00587738168408, 0.000384142593731, 0.00560848186847, 3.84142593731e-05, 0.00653042409342, 0.0024200983405, 0.130301167793, 0.000345728334358, 0.00971880762139, 0.000960356484327, 0.000960356484327, 0.00130608481868, 3.84142593731e-05, 0.00745236631838, 0.0119084204057, 0.000307314074985, 0.0195912722803, 0.000806699446835, 0.00207437000615, 0.000153657037492, 0.0957283343577, 0.00126767055931, 0.186117086663, 0.117201905347, 0.00595421020283, 0.000384142593731, 0.000537799631223, 0.00706822372465, 3.84142593731e-05, 0.0106791641057, 0.0014213275968, 0.0019975414874, 0.000537799631223, 0.000192071296865, 0.00049938537185, 0.000576213890596, 0.000345728334358, 0.00149815611555, 0.0388368162262, 0.0241625691457, 0.000268899815612),
(0.0516343951697, 0.000208203206329, 0.000694010687765, 0.00680130474009, 0.000138802137553, 0.00097161496287, 0.00277604275106, 0.00395586092026, 0.00201263099452, 0.0, 0.00111041710042, 0.0331043098064, 0.00305364702616, 0.00367825664515, 0.0011798181692, 0.00569088763967, 0.0047192726768, 0.018321882157, 0.01304740093, 0.00131862030675, 0.000485807481435, 0.0172114650566, 6.94010687765e-05, 0.00166562565064, 0.0599625234229, 0.00395586092026, 0.00402526198903, 0.0870983413145, 0.000624609618988, 0.132347838157, 0.0094385453536, 0.00201263099452, 0.00395586092026, 0.000832812825318, 0.00159622458186, 0.00166562565064, 0.126309945173, 0.00305364702616, 0.0208203206329, 0.00360885557638, 0.000347005343882, 0.00360885557638, 0.00367825664515, 0.00194322992574, 0.000138802137553, 0.000555208550212, 0.0345617322507, 6.94010687765e-05, 0.151363731001, 0.0199181067388, 0.000277604275106, 0.106947046985, 0.00256783954473, 6.94010687765e-05, 0.000555208550212, 0.014296620168, 0.00520508015823, 0.00555208550212, 0.00097161496287, 0.0122145881047),
(0.00551739668956, 0.000150133243253, 0.084262282776, 0.00773186202755, 0.0125361258117, 0.0317531809481, 0.00600532973014, 0.000638066283827, 0.00011259993244, 0.000863266148707, 0.00446646398679, 0.00101339939196, 0.0132492587171, 0.00067559959464, 0.00123859925684, 0.000525466351387, 0.0673347595991, 0.000375333108133, 0.00487933040574, 0.00165146567579, 0.0352062455429, 0.019104455204, 0.0171151897309, 0.00544233006794, 0.00897046128439, 0.0167773899336, 0.000638066283827, 0.0104342604061, 0.0156138572984, 0.0760424877078, 0.00157639905416, 0.00247719851368, 0.0258229178396, 0.00754419547348, 0.0168149232444, 0.000938332770334, 0.0189543219607, 0.0898172127763, 0.0050669969598, 0.00593026310851, 0.00634312952746, 0.00221446533799, 0.00101339939196, 0.021506587096, 0.0638816950043, 0.101752805615, 0.000262733175693, 0.00626806290583, 0.00243966520287, 0.0238711856773, 0.00446646398679, 0.0258979844612, 0.0370453777728, 0.0195173216229, 0.00439139736516, 0.00664339601396, 0.00574259655444, 0.00867019479788, 0.0327665803401, 3.75333108133e-05),
(0.000164785367059, 0.0103814781247, 0.0015242646453, 8.23926835297e-05, 0.00193622806295, 0.0143775232759, 0.0173848562248, 0.0038724561259, 0.0, 0.139861580292, 0.00926917689709, 0.000370767075884, 0.0903847738321, 0.00461399027766, 0.000782730493532, 0.00177144269589, 0.0240998599324, 0.0030485292906, 0.113537117904, 0.00197742440471, 0.000988712202356, 0.0234407184642, 0.0161489659718, 0.0254593392107, 0.000247178050589, 0.0198154403889, 0.00243058416413, 0.00230699513883, 0.0149954684024, 0.00313092197413, 0.000164785367059, 0.0215868830848, 0.014542308643, 0.00308972563236, 0.0253357501854, 0.0212573123507, 0.0108758342259, 0.00284254758177, 0.00123589025295, 0.00642662931532, 0.0683447309879, 0.0151602537695, 0.0179204086677, 0.00127708659471, 0.0367471368542, 0.000535552442943, 0.000865123177062, 0.00861003542885, 0.00107110488589, 0.00115349756942, 0.12581362775, 0.000453159759413, 0.000535552442943, 0.0052731317459, 0.0449864052072, 0.00267776221472, 0.000247178050589, 0.0101754964159, 0.0022246024553, 0.000164785367059),
(0.0450960742451, 0.00326783146704, 0.00745065574485, 0.00975992331489, 0.0653130582545, 0.00797350877957, 0.00270140734609, 0.00971635222866, 0.000174284344909, 0.00204784105268, 0.000479281948499, 0.0100649209185, 0.000348568689817, 0.00182998562154, 0.00553352795085, 0.015729162128, 0.000697137379635, 0.00331140255327, 0.00152498801795, 0.00239640974249, 0.0136377499891, 0.00265783625986, 0.0264476493399, 0.00100213498323, 0.00984706548734, 0.000392139776045, 0.0276676397543, 0.00287569169099, 0.00941135462507, 0.0223083961483, 0.0617402291839, 0.0116334800227, 0.0468389176942, 0.0099342076598, 0.000174284344909, 0.0120691908849, 0.00888850159034, 0.00975992331489, 0.0306740447039, 0.00880135941789, 0.00135070367304, 0.00178641453531, 0.0266219336848, 0.0203912683543, 0.000348568689817, 0.000435710862272, 0.0084963618143, 0.0605202387696, 0.0193891333711, 0.0657051980306, 8.71421724544e-05, 0.0416539584332, 0.00165570127663, 0.039126835432, 0.0193891333711, 0.16064659492, 0.0130277547819, 0.000871421724544, 0.00614352315803, 0.000174284344909),
(0.00483335434792, 0.0111377295843, 0.000924641701341, 0.00474929601143, 0.02008994242, 0.0411885848779, 0.00214348758038, 0.0109696129114, 0.000336233345942, 0.0717437901904, 0.00504350018913, 0.0130290421553, 0.0127348379776, 0.00273189593578, 0.00584205438574, 0.00853192115328, 0.00159710839322, 0.00374059597361, 0.0253435884504, 0.0078594544614, 0.0060942293952, 0.0177783381667, 0.00546379187156, 0.00365653763712, 0.00374059597361, 0.00689278359181, 0.00487538351616, 0.00374059597361, 0.0433741016265, 0.00487538351616, 0.00722901693775, 0.0139957130248, 0.014710208885, 0.0382885722692, 0.00798554196612, 0.0996931870718, 0.00453915017022, 0.00760727945194, 0.00315218761821, 0.0562350271088, 0.0293363594334, 0.0676669608708, 0.0882192241416, 0.0199638549153, 0.0102971462195, 0.000168116672971, 0.00613625856344, 0.0212667591308, 0.00159710839322, 0.00626234606817, 0.0482915143109, 0.00277392510402, 0.00046232085067, 0.00853192115328, 0.049342243517, 0.00348842096415, 0.00075652502837, 0.0195855924011, 0.00718698776951, 0.000168116672971),
(0.00354246294397, 0.0, 0.038547590193, 0.0156148037662, 0.0102544979957, 0.038640812902, 0.00228395637177, 0.00186445418104, 0.000512724899786, 0.00046611354526, 0.00111867250862, 0.105854386128, 0.00214412230819, 0.00167800876293, 9.32227090519e-05, 0.00111867250862, 0.0428358348094, 0.00046611354526, 0.00261023585345, 0.00111867250862, 0.0150554675119, 0.032348280041, 0.00223734501725, 0.00186445418104, 0.0390137037382, 0.0165936422112, 0.000605947608838, 0.0400857648923, 0.0316024983686, 0.0312296075324, 0.00223734501725, 0.000699170317889, 0.00279668127156, 0.0295049874149, 0.012585065722, 0.00652558963363, 0.000792393026941, 0.0382213107113, 0.000885615735993, 0.0708492588795, 0.00181784282651, 0.00839004381467, 0.0276871445884, 0.0667474596812, 0.0193903234828, 0.0872564556726, 0.000978838445045, 0.0035890742985, 0.0095087163233, 0.0130045679127, 0.00195767689009, 0.00559336254312, 0.0126782884311, 0.00452130138902, 0.00135172928125, 0.000512724899786, 0.0208352754731, 0.0220471706908, 0.0454460706628, 0.000186445418104),
(0.000748378513221, 0.143771827707, 0.00457342424746, 0.000789955097289, 0.0164227507068, 0.000291036088475, 0.00162148677865, 0.0530101446865, 0.0, 0.000124729752204, 0.1717112922, 0.000207882920339, 0.0130134708132, 0.00390819890238, 0.10036587394, 0.019000498919, 0.00328455014136, 0.00623648761018, 0.00798270414103, 0.146391152503, 0.0135123898221, 0.00424081157492, 0.00503076667221, 0.0437385664394, 0.000124729752204, 0.0265674372194, 0.00266090138034, 0.000291036088475, 0.0010394146017, 0.00153833361051, 0.00469815399967, 0.000831531681357, 0.00141360385831, 0.00249459504407, 0.0, 0.000831531681357, 0.00498919008814, 0.00723432562781, 0.0022035589556, 0.0294777981041, 4.15765840679e-05, 0.0188757691668, 0.00835689339764, 0.0242807250956, 0.0, 4.15765840679e-05, 0.00390819890238, 0.00968734408781, 0.00120572093797, 0.00328455014136, 0.0, 0.000291036088475, 0.000831531681357, 0.0256527523699, 0.00702644270747, 0.00145518044237, 0.000124729752204, 0.000789955097289, 0.0462747380675, 0.00149675702644),
(0.00487122818096, 0.000721663434216, 0.00365342113572, 0.0161923233052, 0.0205674078752, 0.00518695593343, 0.00248071805512, 0.00284154977222, 0.00257092598439, 0.0549366289297, 0.000721663434216, 0.0183573136079, 0.00144332686843, 0.000225519823192, 0.00505164403951, 0.00577330747373, 0.000766767398854, 0.00446529249921, 0.00144332686843, 0.00108249515132, 0.0146136845429, 0.00162374272699, 0.0198006404763, 0.00112759911596, 0.00369852510036, 0.00112759911596, 0.00211988633801, 0.0445627170628, 0.0279193541112, 0.00428487664066, 0.00396914888819, 0.0194849127238, 0.0423075188309, 0.00631455504939, 0.0114113030535, 0.0744666456181, 0.0173650263858, 0.0559289161517, 0.00604393126156, 0.00541247575662, 0.0856524288485, 0.00509674800415, 0.014343060755, 0.0312119435298, 0.0237697893645, 0.00193947047946, 0.103017455234, 0.0143881647197, 0.0189887691128, 0.00884037706914, 0.0208831356276, 0.00207478237337, 0.0211086554508, 0.00884037706914, 0.105317757431, 0.00225519823192, 0.00297686166614, 0.00838933742276, 0.00387894095891, 9.0207929277e-05),
(0.00487368211657, 0.0648000795703, 0.00522180226775, 0.00785756912672, 0.101551621245, 0.00159140640541, 0.00193952655659, 0.00696240302367, 0.0, 0.0090013924806, 0.0142729261985, 0.00377959021285, 0.000795703202705, 0.00134274915457, 0.0440123333996, 0.024716530734, 0.000795703202705, 0.000994629003382, 0.00427690471454, 0.0110403819375, 0.0239705589815, 0.000895166103044, 0.0390889198329, 0.0196439228168, 0.000497314501691, 0.00149194350507, 0.00193952655659, 0.000497314501691, 0.0385916053312, 0.000596777402029, 0.0323751740601, 0.0304356475035, 0.00770837477621, 0.00731052317486, 0.00054704595186, 0.0181022478615, 0.00104436045355, 0.00656455142232, 0.0114382335389, 0.0165108414561, 0.00318281281082, 0.00651481997215, 0.0260095484384, 0.0152675552019, 0.00054704595186, 0.0, 0.0142231947484, 0.165406803262, 0.000298388701015, 0.0033817386115, 0.00169086930575, 0.000248657250845, 0.00258603540879, 0.115426695842, 0.0497811816193, 0.00372985876268, 0.0012930177044, 0.00134274915457, 0.0192958026656, 0.000696240302367),
(0.000353218210361, 0.0015306122449, 0.0722919937206, 0.00364992150706, 0.00686813186813, 3.92464678179e-05, 0.00706436420722, 0.000510204081633, 0.0, 0.000470957613815, 0.00902668759812, 0.000784929356358, 0.166326530612, 0.000392464678179, 0.000313971742543, 0.000353218210361, 0.188029827316, 0.00168759811617, 0.0176609105181, 0.00376766091052, 0.00910518053375, 0.0120879120879, 0.00447409733124, 0.00851648351648, 7.84929356358e-05, 0.18543956044, 3.92464678179e-05, 0.00698587127159, 0.00074568288854, 0.000981161695447, 0.000196232339089, 0.000510204081633, 7.84929356358e-05, 0.00282574568289, 0.000784929356358, 0.0015306122449, 0.000470957613815, 0.00910518053375, 0.0021978021978, 0.0324568288854, 0.000470957613815, 0.032103610675, 0.000981161695447, 0.00981161695447, 0.000392464678179, 0.00368916797488, 0.000667189952904, 0.0010989010989, 0.0, 0.00400313971743, 0.00235478806907, 0.000784929356358, 0.00459183673469, 0.0157770800628, 7.84929356358e-05, 0.000353218210361, 0.0043956043956, 0.0319858712716, 0.126687598116, 3.92464678179e-05),
(0.103093541039, 7.34807847748e-05, 0.000440884708649, 0.0455580865604, 0.000220442354324, 0.00014696156955, 0.00484973179514, 0.0524652803292, 0.0144022338159, 0.0, 0.00565802042766, 0.0260121978103, 0.00161657726505, 0.00279226982144, 0.000440884708649, 0.130207950621, 0.00242486589757, 0.0458520096995, 0.00484973179514, 0.0260121978103, 0.000587846278198, 0.00404144316261, 0.0, 0.00301271217577, 0.014696156955, 0.00110221177162, 0.0075685208318, 0.166213535161, 0.000367403923874, 0.00609890513631, 0.145859357778, 0.000220442354324, 0.000661327062973, 0.000661327062973, 0.000514365493423, 0.00338011609964, 0.00705415533838, 0.00213094275847, 0.0678962451319, 0.000440884708649, 0.000514365493423, 0.00139613491072, 0.000881769417297, 0.0014696156955, 0.00014696156955, 0.00132265412595, 0.0232934087736, 0.000220442354324, 0.0113160408553, 0.00198398118892, 7.34807847748e-05, 0.0148431185245, 0.00271878903667, 7.34807847748e-05, 0.000220442354324, 0.0100668675141, 0.0177823499155, 0.00396796237784, 0.000440884708649, 0.00764200161658),
(0.0117003785417, 7.64730623638e-05, 0.0868351623141, 0.0302833326961, 0.0142622261308, 0.00646197376974, 0.00103238634191, 0.00164417084082, 0.00248537452682, 0.00363247046228, 0.000420601843001, 0.00451191067946, 0.00477956639774, 0.001261805529, 0.000305892249455, 0.00504722211601, 0.0203800711199, 0.00290597636982, 0.00952089626429, 0.00198829962146, 0.0263832065155, 0.0337628570336, 0.0426337322678, 0.000535311436546, 0.00332657821282, 0.0113944862922, 0.00198829962146, 0.0235154666769, 0.00665315642565, 0.00734141398692, 0.0052766413031, 0.00489427599128, 0.00191182655909, 0.0103238634191, 0.0370129621841, 0.00783848889229, 0.00195006309028, 0.0305509884143, 0.0337246205024, 0.0135739685696, 0.0113180132298, 0.00833556379765, 0.00130004206018, 0.00699728520629, 0.0335716743777, 0.112071272894, 0.000955913279547, 0.00757083317401, 0.000344128780637, 0.0518869728138, 0.0142239895997, 0.0201506519329, 0.0682904446909, 0.0224448438038, 0.00325010515046, 0.0173976216878, 0.0331128360035, 0.0478339005085, 0.024815508737, 0.0),
(0.00131011748796, 0.00164821232356, 0.00147916490576, 0.00367678133717, 0.00342321021046, 0.0128476037529, 0.000929760797904, 0.00405713802722, 8.45237089003e-05, 0.103161186713, 0.000718451525653, 0.00215535457696, 0.00984701208689, 0.000464880398952, 0.000464880398952, 0.00321190093821, 0.00401487617277, 0.00435297100837, 0.00650832558533, 0.0010142845068, 0.00342321021046, 0.00164821232356, 0.00621249260418, 0.00143690305131, 0.000887498943454, 0.00431070915392, 0.00139464119686, 0.0325416279266, 0.0446285182994, 0.000549404107852, 0.00114107007015, 0.0171160510523, 0.00743808638323, 0.0128053418984, 0.0931873890626, 0.106795706196, 0.000338094835601, 0.00756487194658, 0.00376130504607, 0.0163553376722, 0.0893838221621, 0.0327951990533, 0.0134815315696, 0.00376130504607, 0.0263713971769, 0.00135237934241, 0.00460654213507, 0.00578987405967, 0.000972022652354, 0.000422618544502, 0.163173020032, 0.000211309272251, 0.0210886653706, 0.00291606795706, 0.0262446116136, 0.00177499788691, 0.00705772969318, 0.0669005155946, 0.00270475868481, 8.45237089003e-05),
(0.0985014538135, 0.00107358532767, 0.00362335048088, 0.016103779915, 0.0169089689108, 0.00577052113621, 0.00201297248938, 0.0212480429434, 0.00487586669649, 0.00058152538582, 0.00138671438157, 0.0170431670767, 4.47327219861e-05, 0.00335495414896, 0.00250503243122, 0.00979646611496, 0.000268396331917, 0.00237083426527, 0.00237083426527, 0.00429434131067, 0.00653097740998, 0.00165511071349, 0.0131961529859, 0.000670990829792, 0.0363677029747, 0.000223663609931, 0.0380228136882, 0.0173562961306, 0.000984119883695, 0.038828002684, 0.0697830462984, 0.00509953030642, 0.0566763587564, 0.00362335048088, 4.47327219861e-05, 0.00058152538582, 0.0154775218072, 0.00688883918586, 0.0264817714158, 0.000492059941847, 0.000536792663834, 4.47327219861e-05, 0.00357861775889, 0.00460747036457, 0.000357861775889, 0.000939387161709, 0.0213375083874, 0.0166405725788, 0.0989935137553, 0.0471482889734, 0.0, 0.0896443748602, 0.00366808320286, 0.0125251621561, 0.00331022142697, 0.136613732946, 0.00541265936032, 8.94654439723e-05, 0.00143144710356, 0.0),
(0.00230660800478, 0.0026483277092, 0.0220836358977, 0.00713339882961, 0.0242193840502, 0.00751783349707, 0.015761821366, 0.014907522105, 8.54299261031e-05, 0.0603562427918, 0.0835931826919, 0.0012387339285, 0.0161035410704, 0.00397249156379, 0.00491222075093, 0.000854299261031, 0.013284353509, 0.00243475289394, 0.00602280979027, 0.0557857417453, 0.0410063645295, 0.000982444150186, 0.0106787407629, 0.0663363376191, 0.000854299261031, 0.0103797360215, 0.000897014224083, 0.0054675152706, 0.0524539746273, 0.00247746785699, 0.00170859852206, 0.00751783349707, 0.00850027764726, 0.00982444150186, 0.0286190252445, 0.0105505958737, 0.00205031822647, 0.0559138866345, 0.000897014224083, 0.0120456195805, 0.0418179488275, 0.014095937807, 0.00350262697023, 0.017427704925, 0.0352825594806, 0.00337448208107, 0.0014950237068, 0.0100380163171, 0.00055529451967, 0.00324633719192, 0.0694545299218, 0.00277647259835, 0.0239630942719, 0.0242193840502, 0.0301140489513, 0.00256289778309, 0.00106787407629, 0.0123019093588, 0.0283200205032, 0.0),
(0.00782487638616, 0.0, 0.0290432528443, 0.00475253228362, 0.00772886563295, 0.134751092122, 0.00585665594547, 0.00355239786856, 9.60107532044e-05, 0.000624069895828, 0.00129614516826, 0.0121453602804, 0.00192021506409, 0.000864096778839, 0.0, 0.000288032259613, 0.0131054678124, 0.000288032259613, 0.0043204838942, 0.00110412366185, 0.0288512313379, 0.013153473189, 0.0110892419951, 0.00340838173875, 0.0670155057366, 0.00379242475157, 0.00187220968748, 0.00787288176276, 0.0431568335654, 0.0309154625318, 0.000528059142624, 0.00292832797273, 0.00508856991983, 0.103403581201, 0.0525658873794, 0.000528059142624, 0.00100811290865, 0.0340838173875, 0.000480053766022, 0.031491527051, 0.00264029571312, 0.00129614516826, 0.00499255916663, 0.0163698334213, 0.0584225433249, 0.128318371658, 4.80053766022e-05, 0.00475253228362, 0.00398444625798, 0.00715280111372, 0.00835293552878, 0.0163218280447, 0.0431088281888, 0.00600067207527, 0.00129614516826, 0.00412846238779, 0.00192021506409, 0.00566463443906, 0.01243339254, 0.0),
(0.00699718406007, 0.0770116904173, 0.00763717040703, 0.0321273146173, 0.0374178684188, 0.000255994538783, 0.000255994538783, 0.0169383053162, 0.00145063571977, 0.000170663025855, 0.0590067411895, 0.00405324686407, 0.00349859203004, 0.00277327417015, 0.12983189692, 0.0270500895981, 0.0015359672327, 0.00110930966806, 0.00486389623688, 0.0447137127741, 0.0968086014165, 0.00298660295247, 0.0203089000768, 0.0116050857582, 0.000810649372813, 0.0114770884888, 0.00166396450209, 0.00213328782319, 0.000511989077566, 0.0030719344654, 0.0187729328441, 0.0112637597065, 0.00110930966806, 0.00413857837699, 4.26657564639e-05, 0.000426657564639, 0.00610120317433, 0.0112637597065, 0.0389538356515, 0.0158289956481, 4.26657564639e-05, 0.00328526324772, 0.00255994538783, 0.0539295161703, 0.0, 0.000127997269392, 0.0391671644338, 0.0570867821486, 0.00302926870893, 0.00477856472395, 0.0, 0.00371192081236, 0.00358392354296, 0.044329720966, 0.00341326051711, 0.00537588531445, 0.000725317859886, 0.000127997269392, 0.0567454560969, 0.0),
(0.00362184249629, 0.000139301634473, 0.00385401188707, 0.00552563150074, 0.00566493313522, 0.00408618127786, 0.0117013372957, 0.037147102526, 0.00362184249629, 0.0627786032689, 0.00538632986627, 0.0092867756315, 0.00162518573551, 0.000882243684993, 0.00116084695394, 0.0166233283804, 0.00130014858841, 0.0315750371471, 0.000603640416048, 0.0139765973254, 0.0092867756315, 0.000139301634473, 0.0320858098068, 0.00348254086181, 0.00283246656761, 0.00134658246657, 0.00617570579495, 0.0545598068351, 0.0169483655275, 0.000789375928678, 0.00348254086181, 0.106472882615, 0.0077544576523, 0.00770802377415, 0.0443443536404, 0.0366363298663, 0.00102154531947, 0.0431835066865, 0.00821879643388, 0.00650074294205, 0.0927284546805, 0.00571136701337, 0.0104011887073, 0.00951894502229, 0.0377507429421, 0.00366827637444, 0.0254921991085, 0.0164375928678, 0.00900817236256, 0.00092867756315, 0.0407689450223, 0.00157875185736, 0.0517273402675, 0.00436478454681, 0.0390508915305, 0.00482912332838, 0.0134658246657, 0.017087667162, 0.00195022288262, 0.0),
(0.0123520065604, 0.0133258162062, 0.0107631592435, 0.0298805801855, 0.00635538926759, 0.000820050228076, 0.00205012557019, 0.00579160473579, 0.000871303367331, 0.0128132848137, 0.00517656706473, 0.00322894777305, 0.0021526318487, 0.000922556506586, 0.0382860950233, 0.0220901030188, 0.00138383475988, 0.00189636615243, 0.000820050228076, 0.00297268207678, 0.082671313618, 0.000563784531803, 0.101993747117, 0.0158372200297, 0.00199887243094, 0.00117882220286, 0.00307518835529, 0.00635538926759, 0.0375172979345, 0.000615037671057, 0.00943057762288, 0.0680129157911, 0.0048690482292, 0.00384398544411, 0.00210137870945, 0.00943057762288, 0.00256265696274, 0.0445389780124, 0.0487417354313, 0.0021526318487, 0.0117882220286, 0.00240889754497, 0.00143508789913, 0.0086105273948, 0.00143508789913, 0.000153759417764, 0.0595561478141, 0.0744708113372, 0.0086105273948, 0.00625288298908, 0.00235764440572, 0.00353646660858, 0.0528419865717, 0.0625800830301, 0.0627338424478, 0.00609912357132, 0.000358771974783, 0.00220388498796, 0.0109681718005, 0.000153759417764),
(0.0546458074421, 0.000219314277359, 0.0544995979238, 0.113531690913, 0.0139630089919, 0.00186417135756, 0.00540975217487, 0.00855325681702, 0.00614079976606, 0.0, 0.00325316178083, 0.01494992324, 0.00281453322611, 0.00350902843775, 0.000255866656919, 0.058885883471, 0.0156809708312, 0.0247459609621, 0.00131588566416, 0.0233935229183, 0.0343592367863, 0.00237590467139, 0.033884055852, 0.00500767599971, 0.0175451421888, 0.0214927991812, 0.000767599970758, 0.0184223992982, 0.000365523795599, 0.00215659039403, 0.0535492360553, 0.0102712186563, 0.000219314277359, 0.0159368374881, 0.000548285693399, 0.000511733313839, 0.0, 0.00314350464215, 0.126544338036, 0.0264639228014, 3.65523795599e-05, 0.00551940931355, 0.00296074274435, 0.0139995613714, 0.000621390452518, 0.00394765699247, 0.00149864756196, 0.0070180568755, 0.00113312376636, 0.00493457124059, 0.00010965713868, 0.0150230279991, 0.0270487608743, 0.0217852182177, 3.65523795599e-05, 0.0307405512099, 0.0181665326413, 0.00833394253966, 0.0540609693691, 0.001827618978),
(0.000470283933925, 0.0, 0.000176356475222, 0.00176356475222, 5.87854917406e-05, 5.87854917406e-05, 0.255422961613, 0.00258656163659, 0.0711304450062, 0.0, 0.0122861677738, 0.00164599376874, 0.00264534712833, 0.0294515313621, 0.000117570983481, 0.0276879666098, 0.00199870671918, 0.29380988772, 0.0392687084827, 0.0146375874434, 0.0, 0.0130503791664, 0.0, 0.0780083475398, 0.000352712950444, 0.00094056786785, 0.000411498442184, 0.0243959790724, 0.0, 0.00211627770266, 0.000999353359591, 0.000293927458703, 0.000117570983481, 5.87854917406e-05, 0.000293927458703, 0.000822996884369, 0.0198107107166, 0.000176356475222, 0.0166950796543, 0.0, 0.000117570983481, 0.000529069425666, 5.87854917406e-05, 5.87854917406e-05, 0.0, 5.87854917406e-05, 0.0102874610546, 0.000117570983481, 0.0088178237611, 0.00399741343836, 0.000176356475222, 0.000117570983481, 0.000235141966963, 0.0, 0.0, 0.00141085180178, 0.0424431250367, 0.00317441655399, 0.000117570983481, 0.0145200164599),
(0.00643237027454, 0.000503090412534, 0.00797757654161, 0.0181831249102, 0.00772603133535, 0.0017248814144, 0.00981026304442, 0.00506683915481, 0.00711513583441, 0.000826505677735, 0.00251545206267, 0.00420439844761, 0.00294667241627, 0.0579991375593, 3.59350294667e-05, 0.0175362943798, 0.00697139571654, 0.0459609026879, 0.0704326577548, 0.0164941785252, 0.00438407359494, 0.175937904269, 0.00822912174788, 0.0104570935748, 0.00240764697427, 0.0157754779359, 0.00542618944948, 0.0139068564036, 0.00154520626707, 0.0137631162858, 0.00521057927268, 0.00963058789708, 0.000251545206267, 0.00815725168895, 0.0171769440851, 0.00657611039241, 0.0125053902544, 0.00276699726894, 0.0208423170907, 0.0112117291936, 0.00229984188587, 0.012469455225, 0.001617076326, 0.0019404915912, 0.00539025442001, 0.0123257151071, 0.000790570648268, 0.00305447750467, 0.000395285324134, 0.145680609458, 0.00654017536294, 0.0138709213742, 0.01886589047, 0.00528244933161, 0.000179675147334, 0.0389176369125, 0.0288917636912, 0.0435173206842, 0.0196564611183, 0.00168894638494),
(0.00755062349846, 0.000953361552835, 0.00915227090722, 0.0124699691111, 0.00606337947603, 0.0269991991763, 0.0167410288678, 0.00320329481753, 0.00415665637036, 0.00236433665103, 0.0161690119361, 0.00408038744614, 0.000991496014949, 0.00575830377913, 0.00152537848454, 0.00762689242268, 0.00118216832552, 0.0593753575106, 0.00507188346108, 0.0163978187088, 0.0243679212905, 0.016817297792, 0.0093810776799, 0.0379437898028, 0.031270258933, 0.00266941234794, 0.000724554780155, 0.0568966174732, 0.0299736872211, 0.0778324371735, 0.0014109750982, 0.00606337947603, 0.0302024939938, 0.0130038515807, 0.021851046791, 0.0186477519735, 0.0192197689052, 0.0161690119361, 0.0115547420204, 0.0085421195134, 0.00598711055181, 0.00335583266598, 0.0068260687183, 0.0346642260611, 0.00278381573428, 0.0011440338634, 0.00747435457423, 0.00419479083248, 0.013118254967, 0.0431300766503, 0.0062540517866, 0.0441978415894, 0.0244823246768, 0.0128894481943, 0.00789383365748, 0.0202493993822, 0.0700911413645, 0.0306982420013, 0.0158639362392, 0.00232620218892),
(0.0234406357871, 0.000280966524845, 0.00168579914907, 0.0180621337401, 0.00907120494501, 0.00168579914907, 0.0780284177571, 0.0276952717348, 0.0408204222525, 0.00020069037489, 0.0319097696074, 0.0179818575901, 0.000321104599823, 0.0520590832464, 0.000321104599823, 0.0594043509673, 0.000160552299912, 0.0652645099141, 0.0134462551176, 0.0433089829012, 0.000481656899735, 0.00128441839929, 0.018182547965, 0.0722886730352, 0.00766637232078, 0.000160552299912, 0.018262824115, 0.0343180541061, 0.00020069037489, 0.00385325519788, 0.0449947820503, 0.0113189371438, 0.00858954804528, 0.000722485349603, 8.02761499558e-05, 0.00204704182387, 0.00577988279682, 0.00100345187445, 0.039977522678, 0.00020069037489, 0.000160552299912, 0.000842899574536, 0.00144497069921, 0.000160552299912, 0.000441518824757, 0.000160552299912, 0.0459982339247, 0.0257285060608, 0.0250862968612, 0.0206309705387, 0.0, 0.00642209199647, 0.0010837280244, 0.00935217146986, 0.000842899574536, 0.0894276310508, 0.0134061170426, 0.000280966524845, 0.000561933049691, 0.00140483262423),
(0.00445103857567, 8.01988932553e-05, 0.0281899109792, 0.000360895019649, 0.000721790039297, 0.0138744085332, 0.185540139546, 0.00505253027508, 0.000120298339883, 0.00372924853637, 0.137982195846, 0.00068169059267, 0.00268666292405, 0.0518886839362, 0.00164407731173, 0.000320795573021, 0.00850108268506, 0.00228566845778, 0.00312775683696, 0.0491619215655, 0.00461143636218, 0.0060951158874, 0.0133130162804, 0.211524580961, 0.0139546074264, 0.00172427620499, 0.000641591146042, 0.00128318229208, 0.00429064078916, 0.0308364744567, 0.000521292806159, 0.00084208837918, 0.0120699334349, 0.00617531478066, 0.0135536129601, 0.000240596679766, 0.00814018766541, 0.0251022535889, 0.000160397786511, 0.00128318229208, 0.00693720426658, 0.000320795573021, 0.00272676237068, 0.00128318229208, 0.0129521212607, 0.0191274360414, 0.000160397786511, 0.00176437565162, 0.00553372363461, 0.0174031598364, 0.00553372363461, 0.0181650493223, 0.0218942978587, 0.00673670703344, 0.00384954687625, 0.0117090384153, 0.000761889485925, 0.000561392252787, 0.00445103857567, 0.00136338118534),
(0.121805213552, 0.0, 0.00878831620956, 0.00403328521695, 0.0153264838244, 0.0380827035748, 0.0445784155557, 0.0109535535366, 8.49112677252e-05, 0.0, 0.000934023944977, 0.00254733803176, 4.24556338626e-05, 0.0362146556848, 8.49112677252e-05, 0.00140103591747, 0.00390591831536, 0.000636834507939, 0.00106139084657, 0.00225014859472, 0.000551923240214, 0.00887322747729, 0.00619852254394, 0.00297189437038, 0.0725566782712, 0.000127366901588, 0.00539186550055, 0.00016982253545, 0.000297189437038, 0.0349409866689, 0.0257705697546, 0.00080665704339, 0.00229260422858, 0.0813449944808, 0.00433047465399, 0.0, 0.00123121338202, 0.00258979366562, 0.000636834507939, 0.00437293028785, 0.000212278169313, 0.0, 0.000254733803176, 0.000467011972489, 0.0045427528233, 0.0951430754861, 0.0, 0.00114630211429, 0.00110384648043, 0.0453850725991, 0.000127366901588, 0.149825931901, 0.0146471936826, 0.00118875774815, 0.000127366901588, 0.12970196145, 0.000382100704764, 4.24556338626e-05, 0.00157085845292, 0.00594378874077),
(0.015594467221, 0.00636039695007, 0.000191578221388, 0.0585079888118, 0.0131039503429, 0.000191578221388, 0.000574734664163, 0.0293880991609, 0.0156710985095, 0.0, 0.0633357599908, 0.00781639143262, 0.0, 0.0248668531361, 0.0193877160044, 0.203992490134, 0.0, 0.0375110157477, 0.00218399172382, 0.123184796352, 0.0244453810491, 0.00157094141538, 0.0040997739377, 0.0280470516112, 0.00172420399249, 0.000114946932833, 0.00421472087053, 0.0015326257711, 3.83156442776e-05, 0.000881259818384, 0.054523161807, 0.0130656346986, 0.000536419019886, 0.0025671481666, 3.83156442776e-05, 0.000919575462661, 0.00149431012682, 0.000766312885551, 0.076478025978, 0.00218399172382, 0.0, 0.000536419019886, 0.000919575462661, 0.0103452239549, 0.0, 0.0, 0.04720487375, 0.0235258055864, 0.00517261197747, 0.0145216291812, 3.83156442776e-05, 0.00348672362926, 0.00103452239549, 0.00842944174106, 0.000498103375608, 0.036169968198, 0.00141767883827, 3.83156442776e-05, 0.00352503927354, 0.00203072914671),
(0.00132516150406, 0.000662580752029, 0.00323008116614, 0.00331290376015, 0.0045138313732, 0.00434818618519, 0.0572304124565, 0.0530892827563, 0.00414112970018, 0.00256750041411, 0.0808348517476, 0.00372701673016, 0.00190491966208, 0.0349925459665, 0.00306443597813, 0.0274142786152, 0.00426536359119, 0.173927447408, 0.00641875103528, 0.104936226603, 0.00385125062117, 0.00960742090442, 0.00265032300812, 0.0979377174093, 0.00335431505715, 0.00753685605433, 0.00323008116614, 0.0178482690078, 0.00563193639225, 0.0171442769588, 0.00207056485009, 0.0370631108166, 0.00989729998344, 0.00409971840318, 0.0090690740434, 0.00873778366738, 0.0161918171277, 0.0091104853404, 0.00306443597813, 0.00977306609243, 0.00542487990724, 0.00625310584728, 0.00376842802717, 0.022196455193, 0.00215338744409, 0.00120092761305, 0.00662580752029, 0.00281596819612, 0.00323008116614, 0.0108083485175, 0.0044724200762, 0.00513500082823, 0.00542487990724, 0.00360278283916, 0.00621169455027, 0.00339572635415, 0.0182209706808, 0.0204157694219, 0.0136657280106, 0.00120092761305),
(0.0154196835613, 0.00455886296594, 0.000581031554483, 0.0425493876821, 0.00241351568785, 0.000312863144722, 0.00165370519353, 0.0459908822741, 0.0578796817735, 0.000938589434165, 0.0311522302673, 0.0137212836328, 0.000178778939841, 0.0294538303388, 0.00558684187003, 0.136676499508, 8.93894699204e-05, 0.0376329668365, 0.0162241887906, 0.111513363726, 0.0108608205953, 0.00156431572361, 0.0131402520783, 0.0278448198802, 0.00397783141146, 0.000178778939841, 0.00330741038706, 0.00768749441316, 0.00156431572361, 0.00259229462769, 0.0343702511844, 0.0568517028694, 0.00312863144722, 0.00107267363905, 0.000357557879682, 0.00446947349602, 0.00947528381157, 0.00384374720658, 0.0722266916957, 0.000938589434165, 0.00160901045857, 0.000938589434165, 0.00286046303745, 0.00254759989273, 4.46947349602e-05, 4.46947349602e-05, 0.110395995352, 0.00992223116117, 0.0172521676946, 0.00178778939841, 0.0, 0.00245821042281, 0.0113971574149, 0.00156431572361, 0.00527397872531, 0.0105032627157, 0.00151962098865, 0.000938589434165, 0.000849199964244, 0.00411191561634),
(*(0.1/(M-1), )*(M-1), 0.9)
)
pi = [0] * N
pi[N-1] = 1
| mit |
gfrubi/GR | figuras-editables/Lane_Emden_def.py | 4 | 1688 | # -*- coding: utf-8 -*-
from matplotlib.pyplot import *
from numpy import *
from scipy.integrate import odeint, quad
import matplotlib.pyplot as plt
style.use('classic')
def dtheta(theta, x, n):
if modf(n)[0] == 0.0:
return(theta[1], -2*theta[1]/x-(theta[0])**n)
else:
if theta[0] < 0.0 :
return(theta[1], -2*theta[1]/x+(abs(theta[0]))**n)
else:
return(theta[1], -2*theta[1]/x-(theta[0])**n)
theta0 = [1.0, 0.0]
x = linspace(1.0e-30, 35.0, 1000000)
enes = [0.,1.,1.5,3.,5.]
Thetas = []
#raices=zeros(len(enes))
for i in range(len(enes)):
sol = odeint(dtheta, theta0, x, args=(enes[i],))
if len(where(sol[:,0]<0)[0]) is not 0:
pos = (where(sol[:,0] < 0)[0][0])-1 #tiene algunos nan, por eso se cae
elif len(where(isnan(sol[:,0])==True)[0]) is not 0:
pos = where(isnan(sol[:,0])==True)[0][0]-1
else:
pos = len(x)
# thetapp=dtheta([sol[pos,0], sol[pos,1]],x[pos+1],enes[i])[1] #Segunda derivada en la ultima posicion
# x1 = x[pos] - sol[pos,1]/thetapp - sqrt(sol[pos,1]**2-2*sol[pos,0]*thetapp)/thetapp
# raices[i]=x1
Thetas.append(sol[:pos,0])
colores=['blue','red','brown','purple','black']
dasheses=[[],[5,2],[5,5],[5,2,2,2],[2,2]]
fig, axes = plt.subplots(figsize=(8,6))
for i in range(len(enes)):
axes.plot(x[:len(Thetas[i])], Thetas[i], colores[i], dashes=dasheses[i], label='$n = %1.1f$'%enes[i], linewidth=1.50)
axes.legend(loc='best')
#axes.set_title(u'Funciones de Lane-Emden para distintos valores de $n$')
axes.set_xlabel('$x$', fontsize=15)
axes.set_ylabel('$\Theta(x)$', fontsize=15)
axes.set_xlim(0,8)
axes.set_ylim(0,1)
axes.grid()
fig.savefig('../fig/fig-Lane-Emden.pdf')
#fig.show()
| gpl-3.0 |
setten/pymatgen | pymatgen/phonon/plotter.py | 6 | 15693 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
import logging
from collections import OrderedDict
import numpy as np
from monty.json import jsanitize
from pymatgen.phonon.bandstructure import PhononBandStructureSymmLine
from pymatgen.util.plotting import pretty_plot
from pymatgen.electronic_structure.plotter import plot_brillouin_zone
"""
This module implements plotter for DOS and band structure.
"""
logger = logging.getLogger(__name__)
class PhononDosPlotter(object):
"""
Class for plotting phonon DOSs. Note that the interface is extremely flexible
given that there are many different ways in which people want to view
DOS. The typical usage is::
# Initializes plotter with some optional args. Defaults are usually
# fine,
plotter = PhononDosPlotter()
# Adds a DOS with a label.
plotter.add_dos("Total DOS", dos)
# Alternatively, you can add a dict of DOSs. This is the typical
# form returned by CompletePhononDos.get_element_dos().
Args:
stack: Whether to plot the DOS as a stacked area graph
key_sort_func: function used to sort the dos_dict keys.
sigma: A float specifying a standard deviation for Gaussian smearing
the DOS for nicer looking plots. Defaults to None for no
smearing.
"""
def __init__(self, stack=False, sigma=None):
self.stack = stack
self.sigma = sigma
self._doses = OrderedDict()
def add_dos(self, label, dos):
"""
Adds a dos for plotting.
Args:
label:
label for the DOS. Must be unique.
dos:
PhononDos object
"""
densities = dos.get_smeared_densities(self.sigma) if self.sigma \
else dos.densities
self._doses[label] = {'frequencies': dos.frequencies, 'densities': densities}
def add_dos_dict(self, dos_dict, key_sort_func=None):
"""
Add a dictionary of doses, with an optional sorting function for the
keys.
Args:
dos_dict: dict of {label: Dos}
key_sort_func: function used to sort the dos_dict keys.
"""
if key_sort_func:
keys = sorted(dos_dict.keys(), key=key_sort_func)
else:
keys = dos_dict.keys()
for label in keys:
self.add_dos(label, dos_dict[label])
def get_dos_dict(self):
"""
Returns the added doses as a json-serializable dict. Note that if you
have specified smearing for the DOS plot, the densities returned will
be the smeared densities, not the original densities.
Returns:
Dict of dos data. Generally of the form, {label: {'frequencies':..,
'densities': ...}}
"""
return jsanitize(self._doses)
def get_plot(self, xlim=None, ylim=None):
"""
Get a matplotlib plot showing the DOS.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
import prettyplotlib as ppl
from prettyplotlib import brewer2mpl
ncolors = max(3, len(self._doses))
ncolors = min(9, ncolors)
colors = brewer2mpl.get_map('Set1', 'qualitative', ncolors).mpl_colors
y = None
alldensities = []
allfrequencies = []
plt = pretty_plot(12, 8)
# Note that this complicated processing of frequencies is to allow for
# stacked plots in matplotlib.
for key, dos in self._doses.items():
frequencies = dos['frequencies']
densities = dos['densities']
if y is None:
y = np.zeros(frequencies.shape)
if self.stack:
y += densities
newdens = y.copy()
else:
newdens = densities
allfrequencies.append(frequencies)
alldensities.append(newdens)
keys = list(self._doses.keys())
keys.reverse()
alldensities.reverse()
allfrequencies.reverse()
allpts = []
for i, (key, frequencies, densities) in enumerate(zip(keys, allfrequencies, alldensities)):
allpts.extend(list(zip(frequencies, densities)))
if self.stack:
plt.fill(frequencies, densities, color=colors[i % ncolors],
label=str(key))
else:
ppl.plot(frequencies, densities, color=colors[i % ncolors],
label=str(key), linewidth=3)
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
else:
xlim = plt.xlim()
relevanty = [p[1] for p in allpts
if xlim[0] < p[0] < xlim[1]]
plt.ylim((min(relevanty), max(relevanty)))
ylim = plt.ylim()
plt.plot([0, 0], ylim, 'k--', linewidth=2)
plt.xlabel('Frequencies (THz)')
plt.ylabel('Density of states')
plt.legend()
leg = plt.gca().get_legend()
ltext = leg.get_texts() # all the text.Text instance in the legend
plt.setp(ltext, fontsize=30)
plt.tight_layout()
return plt
def save_plot(self, filename, img_format="eps", xlim=None, ylim=None):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(xlim, ylim)
plt.savefig(filename, format=img_format)
def show(self, xlim=None, ylim=None):
"""
Show the plot using matplotlib.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(xlim, ylim)
plt.show()
class PhononBSPlotter(object):
"""
Class to plot or get data to facilitate the plot of band structure objects.
Args:
bs: A BandStructureSymmLine object.
"""
def __init__(self, bs):
if not isinstance(bs, PhononBandStructureSymmLine):
raise ValueError(
"PhononBSPlotter only works with PhononBandStructureSymmLine objects. "
"A PhononBandStructure object (on a uniform grid for instance and "
"not along symmetry lines won't work)")
self._bs = bs
self._nb_bands = self._bs.nb_bands
def _maketicks(self, plt):
"""
utility private method to add ticks to a band structure
"""
ticks = self.get_ticks()
# Sanitize only plot the uniq values
uniq_d = []
uniq_l = []
temp_ticks = list(zip(ticks['distance'], ticks['label']))
for i in range(len(temp_ticks)):
if i == 0:
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
else:
if temp_ticks[i][1] == temp_ticks[i - 1][1]:
logger.debug("Skipping label {i}".format(
i=temp_ticks[i][1]))
else:
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Unique labels are %s" % list(zip(uniq_d, uniq_l)))
plt.gca().set_xticks(uniq_d)
plt.gca().set_xticklabels(uniq_l)
for i in range(len(ticks['label'])):
if ticks['label'][i] is not None:
# don't print the same label twice
if i != 0:
if ticks['label'][i] == ticks['label'][i - 1]:
logger.debug("already print label... "
"skipping label {i}".format(
i=ticks['label'][i]))
else:
logger.debug("Adding a line at {d}"
" for label {l}".format(
d=ticks['distance'][i], l=ticks['label'][i]))
plt.axvline(ticks['distance'][i], color='k')
else:
logger.debug("Adding a line at {d} for label {l}".format(
d=ticks['distance'][i], l=ticks['label'][i]))
plt.axvline(ticks['distance'][i], color='k')
return plt
def bs_plot_data(self):
"""
Get the data nicely formatted for a plot
Returns:
A dict of the following format:
ticks: A dict with the 'distances' at which there is a qpoint (the
x axis) and the labels (None if no label)
frequencies: A list (one element for each branch) of frequencies for
each qpoint: [branch][qpoint][mode]. The data is
stored by branch to facilitate the plotting
lattice: The reciprocal lattice.
"""
distance = []
frequency = []
ticks = self.get_ticks()
for b in self._bs.branches:
frequency.append([])
distance.append([self._bs.distance[j]
for j in range(b['start_index'],
b['end_index'] + 1)])
for i in range(self._nb_bands):
frequency[-1].append(
[self._bs.bands[i][j]
for j in range(b['start_index'], b['end_index'] + 1)])
return {'ticks': ticks, 'distances': distance, 'frequency': frequency,
'lattice': self._bs.lattice_rec.as_dict()}
def get_plot(self, ylim=None):
"""
Get a matplotlib object for the bandstructure plot.
Args:
ylim: Specify the y-axis (frequency) limits; by default None let
the code choose.
"""
plt = pretty_plot(12, 8)
from matplotlib import rc
import scipy.interpolate as scint
try:
rc('text', usetex=True)
except:
# Fall back on non Tex if errored.
rc('text', usetex=False)
band_linewidth = 1
data = self.bs_plot_data()
for d in range(len(data['distances'])):
for i in range(self._nb_bands):
plt.plot(data['distances'][d],
[data['frequency'][d][i][j]
for j in range(len(data['distances'][d]))], 'b-',
linewidth=band_linewidth)
self._maketicks(plt)
# plot y=0 line
plt.axhline(0, linewidth=1, color='k')
# Main X and Y Labels
plt.xlabel(r'$\mathrm{Wave\ Vector}$', fontsize=30)
ylabel = r'$\mathrm{Frequency\ (THz)}$'
plt.ylabel(ylabel, fontsize=30)
# X range (K)
# last distance point
x_max = data['distances'][-1][-1]
plt.xlim(0, x_max)
if ylim is not None:
plt.ylim(ylim)
plt.tight_layout()
return plt
def show(self, ylim=None):
"""
Show the plot using matplotlib.
Args:
ylim: Specify the y-axis (frequency) limits; by default None let
the code choose.
"""
plt = self.get_plot(ylim)
plt.show()
def save_plot(self, filename, img_format="eps", ylim=None):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(ylim=ylim)
plt.savefig(filename, format=img_format)
plt.close()
def get_ticks(self):
"""
Get all ticks and labels for a band structure plot.
Returns:
A dict with 'distance': a list of distance at which ticks should
be set and 'label': a list of label for each of those ticks.
"""
tick_distance = []
tick_labels = []
previous_label = self._bs.qpoints[0].label
previous_branch = self._bs.branches[0]['name']
for i, c in enumerate(self._bs.qpoints):
if c.label is not None:
tick_distance.append(self._bs.distance[i])
this_branch = None
for b in self._bs.branches:
if b['start_index'] <= i <= b['end_index']:
this_branch = b['name']
break
if c.label != previous_label \
and previous_branch != this_branch:
label1 = c.label
if label1.startswith("\\") or label1.find("_") != -1:
label1 = "$" + label1 + "$"
label0 = previous_label
if label0.startswith("\\") or label0.find("_") != -1:
label0 = "$" + label0 + "$"
tick_labels.pop()
tick_distance.pop()
tick_labels.append(label0 + "$\\mid$" + label1)
else:
if c.label.startswith("\\") or c.label.find("_") != -1:
tick_labels.append("$" + c.label + "$")
else:
tick_labels.append(c.label)
previous_label = c.label
previous_branch = this_branch
return {'distance': tick_distance, 'label': tick_labels}
def plot_compare(self, other_plotter):
"""
plot two band structure for comparison. One is in red the other in blue.
The two band structures need to be defined on the same symmetry lines!
and the distance between symmetry lines is
the one of the band structure used to build the PhononBSPlotter
Args:
another PhononBSPlotter object defined along the same symmetry lines
Returns:
a matplotlib object with both band structures
"""
data_orig = self.bs_plot_data()
data = other_plotter.bs_plot_data()
if len(data_orig['distances']) != len(data['distances']):
raise ValueError('The two objects are not compatible.')
plt = self.get_plot()
band_linewidth = 1
for i in range(other_plotter._nb_bands):
for d in range(len(data_orig['distances'])):
plt.plot(data_orig['distances'][d],
[e[i] for e in data['frequency']][d],
'r-', linewidth=band_linewidth)
return plt
def plot_brillouin(self):
"""
plot the Brillouin zone
"""
# get labels and lines
labels = {}
for q in self._bs.qpoints:
if q.label:
labels[q.label] = q.frac_coords
lines = []
for b in self._bs.branches:
lines.append([self._bs.qpoints[b['start_index']].frac_coords,
self._bs.qpoints[b['end_index']].frac_coords])
plot_brillouin_zone(self._bs.lattice_rec, lines=lines, labels=labels)
| mit |
delijati/pysimiam-simulator | gui/qt_plotwindow_mpl.py | 1 | 4547 | from PyQt4 import QtGui
from matplotlib.lines import Line2D
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavToolbar
from matplotlib.figure import Figure
import numpy
from random import random
def get_color(color):
if color is None:
color = random()
if isinstance(color, str):
if color == 'random':
return (random(), random(), random())
elif color == 'black':
return (0, 0, 0)
elif color == 'blue':
return (0, 0, 1)
elif color == 'red':
return (1, 0, 0)
elif color == 'green':
return (0, 1, 0)
elif isinstance(color, tuple) or isinstance(color, list):
if sum(color) >= 4.0:
return [c / 255. for c in color]
else:
return color
else:
color = float(color)
return (color, color, color)
class PlotVariable:
"""
A plot variable corresponds to one curve on the plot.
It keeps track of the generating expression and of the
values of the expression over time.
"""
def __init__(self, label, expression, axes, color=None):
self.expression = expression
#self.xdata = []
#self.ydata = []
self.ymax = float("-inf")
self.ymin = float("inf")
self.curve = Line2D([], [])
self.curve.set_label(label)
self.curve.set_color(get_color(color))
axes.add_line(self.curve)
def add_point(self, x, y):
self.curve.set_xdata(numpy.append(self.curve.get_xdata(), x))
self.curve.set_ydata(numpy.append(self.curve.get_ydata(), y))
# self.xdata.append(x)
# self.ydata.append(y)
if y > self.ymax:
self.ymax = y
elif y < self.ymin or self.ymin is None:
self.ymin = y
#self.curve.set_data(self.xdata, self.ydata)
def clear_data(self):
#self.xdata = []
#self.ydata = []
self.curve.set_data([], [])
class Plot:
"""
The plot follows one or more variables through time.
It keeps track of the variables.
"""
def __init__(self, axes):
self.axes = axes
self.variables = []
def add_curve(self, label, expression, color=None):
self.variables.append(
PlotVariable(label, expression, self.axes, color))
self.axes.legend().draggable()
def add_data(self, data):
for variable in self.variables:
if variable.expression not in data:
print("No value for {}".format(variable.expression))
else:
variable.add_point(data['time'], data[variable.expression])
ymin = min([v.ymin for v in self.variables])
ymax = max([v.ymax for v in self.variables])
# Add 5% axis margins
drange = ymax - ymin
if drange > 0:
ymin -= 0.05 * drange
ymax += 0.05 * drange
self.axes.set_ylim(ymin, ymax)
def clear_data(self):
for v in self.variables:
v.clear_data()
class PlotWindow(QtGui.QWidget):
"""
The window consists of a figure with a nav toolbar and subplots.
It keeps track of all subplots
"""
def __init__(self):
QtGui.QWidget.__init__(self)
self.plots = []
self.figure = Figure()
vlayout = QtGui.QVBoxLayout(self)
canvas = FigureCanvas(self.figure)
canvas.setParent(self)
tbar = NavToolbar(canvas, self)
vlayout.addWidget(tbar)
vlayout.addWidget(canvas)
# Slots
def clear_data(self):
for plot in self.plots:
plot.clear_data()
def add_plot(self):
"""Add a new subplot with a curve given by expression"""
n = len(self.plots)
if n > 0:
for i, plot in enumerate(self.plots):
plot.axes.change_geometry(n + 1, 1, i + 1)
axes = self.figure.add_subplot(
n + 1, 1, n + 1, sharex=self.plots[0].axes)
else:
axes = self.figure.add_subplot(111)
# axes.legend()
self.plots.append(Plot(axes))
self.figure.canvas.draw()
return self.plots[-1]
def add_data(self, data):
for plot in self.plots:
plot.add_data(data)
plot.axes.set_xlim(right=data['time'])
if data['time'] > 10:
plot.axes.set_xlim(left=data['time'] - 10)
self.figure.canvas.draw()
| gpl-2.0 |
GregorCH/ipet | test/ValidationTest.py | 1 | 6585 | '''
Created on 25.01.2018
@author: gregor
'''
import unittest
from ipet.validation import Validation
from ipet.Key import ObjectiveSenseCode as osc
from ipet import Key
import numpy
import pandas as pd
from ipet.Key import SolverStatusCodes as ssc
from ipet.Key import ProblemStatusCodes as psc
from ipet.misc import getInfinity as infty
# infeasible instances
inf_good = "inf_good"
inf_bad = "inf_bad"
inf_time = "inf_time"
# feasible instances (it is bad to state infeasible)
feas_good = "feas_good"
feas_bad = "feas_bad"
# instances for which an optimal solution is known
opt_good = "opt_good"
opt_tol = "opt_tol"
opt_bad = "opt_bad"
# best instances for which also a dual bound is known
best_good = "best_good"
best_pbad = "best_pbad"
best_dbad = "best_dbad"
# instances for which no reference exists for which both solvers consistently report infeasibility
both_infeasible = "both_infeasible"
# instance that was reported infeasible by one method
one_says_infeasible = "one_says_infeasible"
# instances that crashed
opt_abort = "opt_abort"
opt_readerror = "opt_readerror"
# an instance that is partially inconsistent
part_inconsistent = "part_inconsistent"
class ValidationTest(unittest.TestCase):
def testBoundReplacement(self):
"""Test if Null values are replaced correctly
"""
v = Validation()
inftys = [(v.getDbValue, osc.MAXIMIZE),
(v.getPbValue, osc.MINIMIZE)]
neginftys = [(v.getDbValue, osc.MINIMIZE),
(v.getPbValue, osc.MAXIMIZE)]
for m, s in inftys:
self.assertEqual(m(None, s), infty(), "Should be inf")
for m, s in neginftys:
self.assertEqual(m(None, s), -infty(), "Should be negative inf")
def compareValidationStatus(self, d : pd.DataFrame, v : Validation):
vstatus = d.apply(v.validateSeries, axis = 1)
# compare validation status to expected status codes
self.assertTrue(numpy.all(vstatus == d.Status),
"Not matching validation status codes:\n{}"
"\nData:\n"
"{}".format(vstatus[vstatus != d.Status], d[vstatus != d.Status])
)
def testInstanceData(self):
"""
test some fake instances
"""
d = pd.DataFrame(
[ # ProblemName PrimalBound DualBound Objsense SolverStatus Status
(inf_good, None, None, osc.MINIMIZE, ssc.Infeasible, psc.Ok),
(inf_bad, 5, 5, osc.MINIMIZE, ssc.Optimal, psc.FailSolOnInfeasibleInstance),
(inf_time, None, None, osc.MINIMIZE, ssc.TimeLimit, psc.TimeLimit),
(feas_good, 3, None, osc.MAXIMIZE, ssc.MemoryLimit, psc.MemoryLimit),
(feas_bad, 3, None, osc.MAXIMIZE, ssc.Infeasible, psc.FailDualBound),
(opt_good, 10, 10, osc.MAXIMIZE, ssc.Optimal, psc.Ok),
(opt_bad, 9, 9, osc.MAXIMIZE, ssc.Optimal, psc.FailDualBound),
(opt_tol, 10 - 1e-5, 10 - 1e-5, osc.MAXIMIZE, ssc.Optimal, psc.Ok),
(best_good, 105, 85, osc.MINIMIZE, ssc.NodeLimit, psc.NodeLimit),
(best_dbad, 105, 103, osc.MINIMIZE, ssc.NodeLimit, psc.FailDualBound),
(best_pbad, 85, 85, osc.MINIMIZE, ssc.Optimal, psc.FailObjectiveValue),
(opt_abort, None, None, osc.MINIMIZE, ssc.Crashed, psc.FailAbort),
(opt_readerror, None, None, osc.MINIMIZE, ssc.Readerror, psc.FailReaderror),
],
columns = [Key.ProblemName, Key.PrimalBound, Key.DualBound, Key.ObjectiveSense, Key.SolverStatus, "Status"])
v = Validation()
v.referencedict = {
inf_good : (Validation.__infeas__, None),
inf_bad : (Validation.__infeas__, None),
inf_time : (Validation.__infeas__, None),
feas_good : (Validation.__feas__, Validation.__feas__),
feas_bad : (Validation.__feas__, Validation.__feas__),
opt_good : (10, 10),
opt_bad : (10, 10),
opt_tol : (10, 10),
best_good : (100, 90),
best_pbad : (100, 90),
best_dbad : (100, 90),
opt_abort : (1, 1),
opt_readerror : (1, 1)
}
self.compareValidationStatus(d, v)
def testInconsistencydetection(self):
"""test if inconsistent primal and dual bounds are detected well.
"""
d = pd.DataFrame(
[
(opt_good, 100, 90, osc.MINIMIZE, ssc.TimeLimit, psc.TimeLimit),
(opt_good, 95, 85, osc.MINIMIZE, ssc.TimeLimit, psc.TimeLimit),
(opt_bad, 100, 90, osc.MINIMIZE, ssc.TimeLimit, psc.FailInconsistent),
(opt_bad, 89, 89, osc.MINIMIZE, ssc.Optimal, psc.FailInconsistent),
(part_inconsistent, 12, 12, osc.MINIMIZE, ssc.Optimal, psc.FailDualBound),
(part_inconsistent, 10, 10, osc.MINIMIZE, ssc.Optimal, psc.FailInconsistent),
(part_inconsistent, 9, 9, osc.MINIMIZE, ssc.Optimal, psc.FailInconsistent),
(both_infeasible, numpy.nan, numpy.nan, osc.MAXIMIZE, ssc.Infeasible, psc.Ok),
(both_infeasible, numpy.nan, numpy.nan, osc.MAXIMIZE, ssc.Infeasible, psc.Ok),
(one_says_infeasible, numpy.nan, numpy.nan, osc.MINIMIZE, ssc.Infeasible, psc.FailInconsistent),
(one_says_infeasible, 1, 1, osc.MINIMIZE, ssc.Optimal, psc.FailInconsistent),
(one_says_infeasible, 3, 0, osc.MINIMIZE, ssc.TimeLimit, psc.FailInconsistent)
],
columns = [Key.ProblemName, Key.PrimalBound, Key.DualBound, Key.ObjectiveSense, Key.SolverStatus, "Status"])
v = Validation()
v.referencedict = { part_inconsistent : (10, 0) }
v.collectInconsistencies(d)
self.assertNotIn(opt_good, v.inconsistentset, "{} wrongly appears as inconsistent".format(opt_good))
self.assertNotIn(both_infeasible, v.inconsistentset, "{} wrongly appears as inconsistent".format(both_infeasible))
self.assertIn(opt_bad, v.inconsistentset, "{} should be inconsistent".format(opt_bad))
self.assertIn(part_inconsistent, v.inconsistentset, "{} should be inconsistent".format(part_inconsistent))
self.assertIn(one_says_infeasible, v.inconsistentset, "{} should be inconsistent".format(one_says_infeasible))
self.compareValidationStatus(d, v)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| mit |
pypot/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 134 | 7452 | """
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
greytip/data-science-utils | DataExplorerTemplate.py | 1 | 2574 |
# coding: utf-8
# In[16]:
# Custom libraries
from datascienceutils import plotter
from datascienceutils import analyze
# Standard libraries
import json
get_ipython().magic('matplotlib inline')
import datetime
import numpy as np
import pandas as pd
import random
from sklearn import cross_validation
from sklearn import metrics
from bokeh.plotting import figure, show, output_file, output_notebook, ColumnDataSource
from bokeh.charts import Histogram
import bokeh
output_notebook(bokeh.resources.INLINE)
from sqlalchemy import create_engine
# In[17]:
irisDf = pd.read_csv('./data/Iris.csv')
# Sample Timeseries picked from here https://www.backblaze.com/b2/hard-drive-test-data.html
hdd2013Df = pd.read_csv('./data/hdd_2013-11-26.csv')
# In[18]:
# Create classes for showing off correlation_analyze's heatmapping ability
def createClasses(x):
rdm = random.random()
if rdm < 0.3:
return 'A'
elif rdm > 0.3 and rdm < 0.6:
return 'B'
else:
return 'C'
irisDf['Class'] = irisDf['Species'].apply(createClasses)
# In[19]:
irisDf.describe()
# In[20]:
irisDf.head()
# In[21]:
irisDf.corr()
# In[22]:
irisDf.select_dtypes(include=[np.number]).columns
# In[23]:
analyze.correlation_analyze(irisDf, exclude_columns='Id',
categories=['Species', 'Class'],
measures=['count', 'SepalLengthCm','SepalWidthCm',
'PetalLengthCm', 'PetalWidthCm'])
# In[24]:
analyze.dist_analyze(irisDf)
# In[25]:
analyze.dist_analyze(irisDf, 'SepalLengthCm')
# In[26]:
analyze.regression_analyze(irisDf, 'SepalLengthCm', 'SepalWidthCm')
# In[27]:
target = irisDf.Species
irisDf.drop(['Species', 'Class'], 1, inplace=True)
# In[28]:
irisDf.head()
# In[29]:
analyze.silhouette_analyze(irisDf, cluster_type='KMeans')
# In[30]:
analyze.silhouette_analyze(irisDf, cluster_type='dbscan')
# In[ ]:
analyze.silhouette_analyze(irisDf, cluster_type='spectral')
# In[ ]:
analyze.silhouette_analyze(irisDf, cluster_type='birch')
# In[ ]:
#analyze.som_analyze(df, (10,10), algo_type='som')
# In[31]:
hdd2013Df.fillna(value=0, inplace=True)
hdd2013Df.describe()
# In[32]:
hdd2013Df.head()
# In[33]:
hdd2013Df['date'] = hdd2013Df['date'].astype('datetime64[ns]')
# In[34]:
hdd2013Df['date'] = [each + datetime.timedelta(0, i*45) for i, each in enumerate(hdd2013Df.date)]
# In[35]:
analyze.time_series_analysis(hdd2013Df, timeCol='date', valueCol='smart_1_raw', seasonal={'freq': '30s'})
# In[ ]:
| gpl-3.0 |
kashif/scikit-learn | sklearn/tests/test_learning_curve.py | 59 | 10869 | # Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/matplotlib/type1font.py | 8 | 12515 | """
This module contains a class representing a Type 1 font.
This version reads pfa and pfb files and splits them for embedding in
pdf files. It also supports SlantFont and ExtendFont transformations,
similarly to pdfTeX and friends. There is no support yet for
subsetting.
Usage::
>>> font = Type1Font(filename)
>>> clear_part, encrypted_part, finale = font.parts
>>> slanted_font = font.transform({'slant': 0.167})
>>> extended_font = font.transform({'extend': 1.2})
Sources:
* Adobe Technical Note #5040, Supporting Downloadable PostScript
Language Fonts.
* Adobe Type 1 Font Format, Adobe Systems Incorporated, third printing,
v1.1, 1993. ISBN 0-201-57044-0.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six import unichr
import binascii
import io
import itertools
import numpy as np
import re
import struct
import sys
if six.PY3:
def ord(x):
return x
class Type1Font(object):
"""
A class representing a Type-1 font, for use by backends.
.. attribute:: parts
A 3-tuple of the cleartext part, the encrypted part, and the
finale of zeros.
.. attribute:: prop
A dictionary of font properties.
"""
__slots__ = ('parts', 'prop')
def __init__(self, input):
"""
Initialize a Type-1 font. *input* can be either the file name of
a pfb file or a 3-tuple of already-decoded Type-1 font parts.
"""
if isinstance(input, tuple) and len(input) == 3:
self.parts = input
else:
with open(input, 'rb') as file:
data = self._read(file)
self.parts = self._split(data)
self._parse()
def _read(self, file):
"""
Read the font from a file, decoding into usable parts.
"""
rawdata = file.read()
if not rawdata.startswith(b'\x80'):
return rawdata
data = b''
while len(rawdata) > 0:
if not rawdata.startswith(b'\x80'):
raise RuntimeError('Broken pfb file (expected byte 128, '
'got %d)' % ord(rawdata[0]))
type = ord(rawdata[1])
if type in (1, 2):
length, = struct.unpack(str('<i'), rawdata[2:6])
segment = rawdata[6:6 + length]
rawdata = rawdata[6 + length:]
if type == 1: # ASCII text: include verbatim
data += segment
elif type == 2: # binary data: encode in hexadecimal
data += binascii.hexlify(segment)
elif type == 3: # end of file
break
else:
raise RuntimeError('Unknown segment type %d in pfb file' %
type)
return data
def _split(self, data):
"""
Split the Type 1 font into its three main parts.
The three parts are: (1) the cleartext part, which ends in a
eexec operator; (2) the encrypted part; (3) the fixed part,
which contains 512 ASCII zeros possibly divided on various
lines, a cleartomark operator, and possibly something else.
"""
# Cleartext part: just find the eexec and skip whitespace
idx = data.index(b'eexec')
idx += len(b'eexec')
while data[idx] in b' \t\r\n':
idx += 1
len1 = idx
# Encrypted part: find the cleartomark operator and count
# zeros backward
idx = data.rindex(b'cleartomark') - 1
zeros = 512
while zeros and data[idx] in b'0' or data[idx] in b'\r\n':
if data[idx] in b'0':
zeros -= 1
idx -= 1
if zeros:
raise RuntimeError('Insufficiently many zeros in Type 1 font')
# Convert encrypted part to binary (if we read a pfb file, we
# may end up converting binary to hexadecimal to binary again;
# but if we read a pfa file, this part is already in hex, and
# I am not quite sure if even the pfb format guarantees that
# it will be in binary).
binary = binascii.unhexlify(data[len1:idx+1])
return data[:len1], binary, data[idx+1:]
_whitespace_re = re.compile(br'[\0\t\r\014\n ]+')
_token_re = re.compile(br'/{0,2}[^]\0\t\r\v\n ()<>{}/%[]+')
_comment_re = re.compile(br'%[^\r\n\v]*')
_instring_re = re.compile(br'[()\\]')
# token types, compared via object identity (poor man's enum)
_whitespace = object()
_name = object()
_string = object()
_delimiter = object()
_number = object()
@classmethod
def _tokens(cls, text):
"""
A PostScript tokenizer. Yield (token, value) pairs such as
(cls._whitespace, ' ') or (cls._name, '/Foobar').
"""
pos = 0
while pos < len(text):
match = (cls._comment_re.match(text[pos:]) or
cls._whitespace_re.match(text[pos:]))
if match:
yield (cls._whitespace, match.group())
pos += match.end()
elif text[pos] == b'(':
start = pos
pos += 1
depth = 1
while depth:
match = cls._instring_re.search(text[pos:])
if match is None:
return
pos += match.end()
if match.group() == b'(':
depth += 1
elif match.group() == b')':
depth -= 1
else: # a backslash - skip the next character
pos += 1
yield (cls._string, text[start:pos])
elif text[pos:pos + 2] in (b'<<', b'>>'):
yield (cls._delimiter, text[pos:pos + 2])
pos += 2
elif text[pos] == b'<':
start = pos
pos += text[pos:].index(b'>')
yield (cls._string, text[start:pos])
else:
match = cls._token_re.match(text[pos:])
if match:
try:
float(match.group())
yield (cls._number, match.group())
except ValueError:
yield (cls._name, match.group())
pos += match.end()
else:
yield (cls._delimiter, text[pos:pos + 1])
pos += 1
def _parse(self):
"""
Find the values of various font properties. This limited kind
of parsing is described in Chapter 10 "Adobe Type Manager
Compatibility" of the Type-1 spec.
"""
# Start with reasonable defaults
prop = {'weight': 'Regular', 'ItalicAngle': 0.0, 'isFixedPitch': False,
'UnderlinePosition': -100, 'UnderlineThickness': 50}
filtered = ((token, value)
for token, value in self._tokens(self.parts[0])
if token is not self._whitespace)
# The spec calls this an ASCII format; in Python 2.x we could
# just treat the strings and names as opaque bytes but let's
# turn them into proper Unicode, and be lenient in case of high bytes.
convert = lambda x: x.decode('ascii', 'replace')
for token, value in filtered:
if token is self._name and value.startswith(b'/'):
key = convert(value[1:])
token, value = next(filtered)
if token is self._name:
if value in (b'true', b'false'):
value = value == b'true'
else:
value = convert(value.lstrip(b'/'))
elif token is self._string:
value = convert(value.lstrip(b'(').rstrip(b')'))
elif token is self._number:
if b'.' in value:
value = float(value)
else:
value = int(value)
else: # more complicated value such as an array
value = None
if key != 'FontInfo' and value is not None:
prop[key] = value
# Fill in the various *Name properties
if 'FontName' not in prop:
prop['FontName'] = (prop.get('FullName') or
prop.get('FamilyName') or
'Unknown')
if 'FullName' not in prop:
prop['FullName'] = prop['FontName']
if 'FamilyName' not in prop:
extras = r'(?i)([ -](regular|plain|italic|oblique|(semi)?bold|(ultra)?light|extra|condensed))+$'
prop['FamilyName'] = re.sub(extras, '', prop['FullName'])
self.prop = prop
@classmethod
def _transformer(cls, tokens, slant, extend):
def fontname(name):
result = name
if slant:
result += b'_Slant_' + str(int(1000 * slant)).encode('latin-1')
if extend != 1.0:
result += b'_Extend_' + str(int(1000 * extend)).encode('latin-1')
return result
def italicangle(angle):
return str(float(angle) - np.arctan(slant) / np.pi * 180).encode('latin-1')
def fontmatrix(array):
array = array.lstrip(b'[').rstrip(b']').strip().split()
array = [float(x) for x in array]
oldmatrix = np.eye(3, 3)
oldmatrix[0:3, 0] = array[::2]
oldmatrix[0:3, 1] = array[1::2]
modifier = np.array([[extend, 0, 0],
[slant, 1, 0],
[0, 0, 1]])
newmatrix = np.dot(modifier, oldmatrix)
array[::2] = newmatrix[0:3, 0]
array[1::2] = newmatrix[0:3, 1]
as_string = u'[' + u' '.join(str(x) for x in array) + u']'
return as_string.encode('latin-1')
def replace(fun):
def replacer(tokens):
token, value = next(tokens) # name, e.g., /FontMatrix
yield bytes(value)
token, value = next(tokens) # possible whitespace
while token is cls._whitespace:
yield bytes(value)
token, value = next(tokens)
if value != b'[': # name/number/etc.
yield bytes(fun(value))
else: # array, e.g., [1 2 3]
result = b''
while value != b']':
result += value
token, value = next(tokens)
result += value
yield fun(result)
return replacer
def suppress(tokens):
for x in itertools.takewhile(lambda x: x[1] != b'def', tokens):
pass
yield b''
table = {b'/FontName': replace(fontname),
b'/ItalicAngle': replace(italicangle),
b'/FontMatrix': replace(fontmatrix),
b'/UniqueID': suppress}
while True:
token, value = next(tokens)
if token is cls._name and value in table:
for value in table[value](itertools.chain([(token, value)],
tokens)):
yield value
else:
yield value
def transform(self, effects):
"""
Transform the font by slanting or extending. *effects* should
be a dict where ``effects['slant']`` is the tangent of the
angle that the font is to be slanted to the right (so negative
values slant to the left) and ``effects['extend']`` is the
multiplier by which the font is to be extended (so values less
than 1.0 condense). Returns a new :class:`Type1Font` object.
"""
with io.BytesIO() as buffer:
tokenizer = self._tokens(self.parts[0])
transformed = self._transformer(tokenizer,
slant=effects.get('slant', 0.0),
extend=effects.get('extend', 1.0))
list(map(buffer.write, transformed))
return Type1Font((buffer.getvalue(), self.parts[1], self.parts[2]))
| mit |
giorgiop/scikit-learn | sklearn/neighbors/base.py | 16 | 30646 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array, _get_n_jobs, gen_even_slices
from ..utils.fixes import argpartition
from ..utils.multiclass import check_classification_targets
from ..externals import six
from ..externals.joblib import Parallel, delayed
from ..exceptions import NotFittedError
from ..exceptions import DataConversionWarning
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1):
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
if metric == 'precomputed':
alg_check = 'brute'
else:
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if ((self.n_neighbors is None or
self.n_neighbors < self._fit_X.shape[0] // 2) and
self.metric != 'precomputed'):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
self.n_neighbors
)
return self
@property
def _pairwise(self):
# For cross-validation routines to split data correctly
return self.metric == 'precomputed'
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([[1., 1., 1.]])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
n_jobs = _get_n_jobs(self.n_jobs)
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=n_jobs, squared=True)
else:
dist = pairwise_distances(
X, self._fit_X, self.effective_metric_, n_jobs=n_jobs,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = Parallel(n_jobs, backend='threading')(
delayed(self._tree.query, check_pickle=False)(
X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
dist, neigh_ind = tuple(zip(*result))
result = np.vstack(dist), np.vstack(neigh_ind)
else:
result = np.vstack(result)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=self.n_jobs, squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
n_jobs=self.n_jobs,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
check_classification_targets(y)
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
"""
return self._fit(X)
| bsd-3-clause |
lukecwik/incubator-beam | sdks/python/setup.py | 1 | 10705 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Apache Beam SDK for Python setup file."""
from __future__ import absolute_import
from __future__ import print_function
import os
import platform
import sys
import warnings
from distutils.errors import DistutilsError
from distutils.version import StrictVersion
# Pylint and isort disagree here.
# pylint: disable=ungrouped-imports
import setuptools
from pkg_resources import DistributionNotFound
from pkg_resources import get_distribution
from pkg_resources import normalize_path
from pkg_resources import to_filename
from setuptools import Command
from setuptools.command.build_py import build_py
from setuptools.command.develop import develop
from setuptools.command.egg_info import egg_info
from setuptools.command.test import test
class mypy(Command):
user_options = []
def initialize_options(self):
"""Abstract method that is required to be overwritten"""
def finalize_options(self):
"""Abstract method that is required to be overwritten"""
def get_project_path(self):
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
project_path = normalize_path(ei_cmd.egg_base)
return os.path.join(project_path, to_filename(ei_cmd.egg_name))
def run(self):
import subprocess
args = ['mypy', self.get_project_path()]
result = subprocess.call(args)
if result != 0:
raise DistutilsError("mypy exited with status %d" % result)
def get_version():
global_names = {}
exec( # pylint: disable=exec-used
open(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'apache_beam/version.py')
).read(),
global_names
)
return global_names['__version__']
PACKAGE_NAME = 'apache-beam'
PACKAGE_VERSION = get_version()
PACKAGE_DESCRIPTION = 'Apache Beam SDK for Python'
PACKAGE_URL = 'https://beam.apache.org'
PACKAGE_DOWNLOAD_URL = 'https://pypi.python.org/pypi/apache-beam'
PACKAGE_AUTHOR = 'Apache Software Foundation'
PACKAGE_EMAIL = '[email protected]'
PACKAGE_KEYWORDS = 'apache beam'
PACKAGE_LONG_DESCRIPTION = '''
Apache Beam is a unified programming model for both batch and streaming
data processing, enabling efficient execution across diverse distributed
execution engines and providing extensibility points for connecting to
different technologies and user communities.
'''
REQUIRED_PIP_VERSION = '7.0.0'
_PIP_VERSION = get_distribution('pip').version
if StrictVersion(_PIP_VERSION) < StrictVersion(REQUIRED_PIP_VERSION):
warnings.warn(
"You are using version {0} of pip. " \
"However, version {1} is recommended.".format(
_PIP_VERSION, REQUIRED_PIP_VERSION
)
)
REQUIRED_CYTHON_VERSION = '0.28.1'
try:
_CYTHON_VERSION = get_distribution('cython').version
if StrictVersion(_CYTHON_VERSION) < StrictVersion(REQUIRED_CYTHON_VERSION):
warnings.warn(
"You are using version {0} of cython. " \
"However, version {1} is recommended.".format(
_CYTHON_VERSION, REQUIRED_CYTHON_VERSION
)
)
except DistributionNotFound:
# do nothing if Cython is not installed
pass
# Currently all compiled modules are optional (for performance only).
if platform.system() == 'Windows':
# Windows doesn't always provide int64_t.
cythonize = lambda *args, **kwargs: []
else:
try:
# pylint: disable=wrong-import-position
from Cython.Build import cythonize
except ImportError:
cythonize = lambda *args, **kwargs: []
REQUIRED_PACKAGES = [
# Apache Avro does not follow semantic versioning, so we should not auto
# upgrade on minor versions. Due to AVRO-2429, Dataflow still
# requires Avro 1.8.x.
'avro>=1.8.1,<1.10.0; python_version < "3.0"',
# Avro 1.9.2 for python3 was broken. The issue was fixed in version 1.9.2.1
'avro-python3>=1.8.1,!=1.9.2,<1.10.0; python_version >= "3.0"',
'crcmod>=1.7,<2.0',
# Dill doesn't have forwards-compatibility guarantees within minor version.
# Pickles created with a new version of dill may not unpickle using older
# version of dill. It is best to use the same version of dill on client and
# server, therefore list of allowed versions is very narrow.
# See: https://github.com/uqfoundation/dill/issues/341.
'dill>=0.3.1.1,<0.3.2',
'fastavro>=0.21.4,<0.22',
'funcsigs>=1.0.2,<2; python_version < "3.0"',
'future>=0.16.0,<1.0.0',
'futures>=3.2.0,<4.0.0; python_version < "3.0"',
'grpcio>=1.12.1,<2',
'hdfs>=2.1.0,<3.0.0',
'httplib2>=0.8,<=0.12.0',
'mock>=1.0.1,<3.0.0',
'numpy>=1.14.3,<2',
'pymongo>=3.8.0,<4.0.0',
'oauth2client>=2.0.1,<4',
'protobuf>=3.5.0.post1,<4',
# [BEAM-6287] pyarrow is not supported on Windows for Python 2
('pyarrow>=0.15.1,<0.17.0; python_version >= "3.0" or '
'platform_system != "Windows"'),
'pydot>=1.2.0,<2',
'python-dateutil>=2.8.0,<3',
'pytz>=2018.3',
# [BEAM-5628] Beam VCF IO is not supported in Python 3.
'pyvcf>=0.6.8,<0.7.0; python_version < "3.0"',
# fixes and additions have been made since typing 3.5
'typing>=3.7.0,<3.8.0; python_version < "3.5.3"',
'typing-extensions>=3.7.0,<3.8.0',
]
# [BEAM-8181] pyarrow cannot be installed on 32-bit Windows platforms.
if sys.platform == 'win32' and sys.maxsize <= 2**32:
REQUIRED_PACKAGES = [
p for p in REQUIRED_PACKAGES if not p.startswith('pyarrow')
]
REQUIRED_TEST_PACKAGES = [
'freezegun>=0.3.12',
'nose>=1.3.7',
'nose_xunitmp>=0.4.1',
'pandas>=0.23.4,<0.25',
'parameterized>=0.7.1,<0.8.0',
# pyhamcrest==1.10.0 doesn't work on Py2. Beam still supports Py2.
# See: https://github.com/hamcrest/PyHamcrest/issues/131.
'pyhamcrest>=1.9,!=1.10.0,<2.0.0',
'pyyaml>=3.12,<6.0.0',
'requests_mock>=1.7,<2.0',
'tenacity>=5.0.2,<6.0',
'pytest>=4.4.0,<5.0',
'pytest-xdist>=1.29.0,<2',
'pytest-timeout>=1.3.3,<2',
]
GCP_REQUIREMENTS = [
'cachetools>=3.1.0,<4',
'google-apitools>=0.5.28,<0.5.29',
'google-cloud-datastore>=1.7.1,<1.8.0',
'google-cloud-pubsub>=0.39.0,<1.1.0',
# GCP packages required by tests
'google-cloud-bigquery>=1.6.0,<=1.24.0',
'google-cloud-core>=0.28.1,<2',
'google-cloud-bigtable>=0.31.1,<1.1.0',
'google-cloud-spanner>=1.13.0,<1.14.0',
'grpcio-gcp>=0.2.2,<1',
# GCP Packages required by ML functionality
'google-cloud-dlp>=0.12.0,<=0.13.0',
'google-cloud-language>=1.3.0,<2',
'google-cloud-videointelligence>=1.8.0,<1.14.0',
'google-cloud-vision>=0.38.0,<0.43.0',
]
INTERACTIVE_BEAM = [
'facets-overview>=1.0.0,<2',
'ipython>=5.8.0,<8',
'timeloop>=1.0.2,<2',
]
AWS_REQUIREMENTS = [
'boto3 >=1.9'
]
# We must generate protos after setup_requires are installed.
def generate_protos_first(original_cmd):
try:
# See https://issues.apache.org/jira/browse/BEAM-2366
# pylint: disable=wrong-import-position
import gen_protos
class cmd(original_cmd, object):
def run(self):
gen_protos.generate_proto_files()
super(cmd, self).run()
return cmd
except ImportError:
warnings.warn("Could not import gen_protos, skipping proto generation.")
return original_cmd
python_requires = '>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*'
if sys.version_info[0] == 2:
warnings.warn(
'You are using Apache Beam with Python 2. '
'New releases of Apache Beam will soon support Python 3 only.')
setuptools.setup(
name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description=PACKAGE_DESCRIPTION,
long_description=PACKAGE_LONG_DESCRIPTION,
url=PACKAGE_URL,
download_url=PACKAGE_DOWNLOAD_URL,
author=PACKAGE_AUTHOR,
author_email=PACKAGE_EMAIL,
packages=setuptools.find_packages(),
package_data={'apache_beam': [
'*/*.pyx', '*/*/*.pyx', '*/*.pxd', '*/*/*.pxd', 'testing/data/*.yaml',
'portability/api/*.yaml']},
ext_modules=cythonize([
'apache_beam/**/*.pyx',
'apache_beam/coders/coder_impl.py',
'apache_beam/metrics/cells.py',
'apache_beam/metrics/execution.py',
'apache_beam/runners/common.py',
'apache_beam/runners/worker/logger.py',
'apache_beam/runners/worker/opcounters.py',
'apache_beam/runners/worker/operations.py',
'apache_beam/transforms/cy_combiners.py',
'apache_beam/utils/counters.py',
'apache_beam/utils/windowed_value.py',
]),
install_requires=REQUIRED_PACKAGES,
python_requires=python_requires,
test_suite='nose.collector',
# BEAM-8840: Do NOT use tests_require or setup_requires.
extras_require={
'docs': ['Sphinx>=1.5.2,<2.0'],
'test': REQUIRED_TEST_PACKAGES,
'gcp': GCP_REQUIREMENTS,
'interactive': INTERACTIVE_BEAM,
'aws': AWS_REQUIREMENTS
},
zip_safe=False,
# PyPI package information.
classifiers=[
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache License, Version 2.0',
keywords=PACKAGE_KEYWORDS,
entry_points={
'nose.plugins.0.10': [
'beam_test_plugin = test_config:BeamTestPlugin',
]},
cmdclass={
'build_py': generate_protos_first(build_py),
'develop': generate_protos_first(develop),
'egg_info': generate_protos_first(egg_info),
'test': generate_protos_first(test),
'mypy': generate_protos_first(mypy),
},
)
| apache-2.0 |
roebius/deeplearning_keras2 | nbs2/utils2.py | 2 | 4860 | import math, keras, datetime, pandas as pd, numpy as np, keras.backend as K, threading, json, re, collections
import tarfile, tensorflow as tf, matplotlib.pyplot as plt, xgboost, operator, random, pickle, glob, os, bcolz
import shutil, sklearn, functools, itertools, scipy
from PIL import Image
from concurrent.futures import ProcessPoolExecutor, as_completed, ThreadPoolExecutor
import matplotlib.patheffects as PathEffects
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.neighbors import NearestNeighbors, LSHForest
import IPython
from IPython.display import display, Audio
from numpy.random import normal
from gensim.models import word2vec
from keras.preprocessing.text import Tokenizer
#from nltk.tokenize import ToktokTokenizer, StanfordTokenizer # - changed for compatibility with conda-installed nltk
from nltk.tokenize import ToktokTokenizer # - changed for compatibility with conda-installed nltk
from nltk.tokenize.stanford import StanfordTokenizer # - changed for compatibility with conda-installed nltk
from functools import reduce
from itertools import chain
from tensorflow.python.framework import ops
#from tensorflow.contrib import rnn, legacy_seq2seq as seq2seq
from keras_tqdm import TQDMNotebookCallback
#from keras import initializations # Keras 1
from keras.applications.resnet50 import ResNet50, decode_predictions, conv_block, identity_block
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model, Sequential
from keras.layers import *
from keras.optimizers import Adam
from keras.regularizers import l2
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import decode_predictions, preprocess_input
np.set_printoptions(threshold=50, edgeitems=20)
def beep(): return Audio(filename='/home/jhoward/beep.mp3', autoplay=True)
def dump(obj, fname): pickle.dump(obj, open(fname, 'wb'))
def load(fname): return pickle.load(open(fname, 'rb'))
def limit_mem():
K.get_session().close()
cfg = K.tf.ConfigProto()
cfg.gpu_options.allow_growth = True
K.set_session(K.tf.Session(config=cfg))
def autolabel(plt, fmt='%.2f'):
rects = plt.patches
ax = rects[0].axes
y_bottom, y_top = ax.get_ylim()
y_height = y_top - y_bottom
for rect in rects:
height = rect.get_height()
if height / y_height > 0.95:
label_position = height - (y_height * 0.06)
else:
label_position = height + (y_height * 0.01)
txt = ax.text(rect.get_x() + rect.get_width()/2., label_position,
fmt % height, ha='center', va='bottom')
txt.set_path_effects([PathEffects.withStroke(linewidth=3, foreground='w')])
def column_chart(lbls, vals, val_lbls='%.2f'):
n = len(lbls)
p = plt.bar(np.arange(n), vals)
plt.xticks(np.arange(n), lbls)
if val_lbls: autolabel(p, val_lbls)
def save_array(fname, arr):
c=bcolz.carray(arr, rootdir=fname, mode='w')
c.flush()
def load_array(fname): return bcolz.open(fname)[:]
def load_glove(loc):
return (load_array(loc+'.dat'),
pickle.load(open(loc+'_words.pkl','rb'), encoding='latin1'),
pickle.load(open(loc+'_idx.pkl','rb'), encoding='latin1'))
def plot_multi(im, dim=(4,4), figsize=(6,6), **kwargs ):
plt.figure(figsize=figsize)
for i,img in enumerate(im):
plt.subplot(*dim, i+1)
plt.imshow(img, **kwargs)
plt.axis('off')
plt.tight_layout()
def plot_train(hist):
h = hist.history
if 'acc' in h:
meas='acc'
loc='lower right'
else:
meas='loss'
loc='upper right'
plt.plot(hist.history[meas])
plt.plot(hist.history['val_'+meas])
plt.title('model '+meas)
plt.ylabel(meas)
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc=loc)
def fit_gen(gen, fn, eval_fn, nb_iter):
for i in range(nb_iter):
fn(*next(gen))
if i % (nb_iter//10) == 0: eval_fn()
def wrap_config(layer):
return {'class_name': layer.__class__.__name__, 'config': layer.get_config()}
def copy_layer(layer): return layer_from_config(wrap_config(layer))
def copy_layers(layers): return [copy_layer(layer) for layer in layers]
def copy_weights(from_layers, to_layers):
for from_layer,to_layer in zip(from_layers, to_layers):
to_layer.set_weights(from_layer.get_weights())
def copy_model(m):
res = Sequential(copy_layers(m.layers))
copy_weights(m.layers, res.layers)
return res
def insert_layer(model, new_layer, index):
res = Sequential()
for i,layer in enumerate(model.layers):
if i==index: res.add(new_layer)
copied = layer_from_config(wrap_config(layer))
res.add(copied)
copied.set_weights(layer.get_weights())
return res
| apache-2.0 |
qingkaikong/useful_script | python/sklearn/learning_sklearn/simple_classification.py | 1 | 2913 | from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn import metrics
from sklearn import preprocessing
import numpy as np
#get the dataset
iris = datasets.load_iris()
X_iris, y_iris = iris.data, iris.target
# Get dataset with only the first two attributes
X, y = X_iris[:, :2], y_iris
# Split the dataset into a training and a testing set
# Test set will be the 25% taken randomly
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.25, random_state=33)
print X_train.shape, y_train.shape
# Standardize the features
scaler = preprocessing.StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
#plot the data
import matplotlib.pyplot as plt
colors = ['red', 'greenyellow', 'blue']
for i in xrange(len(colors)):
xs = X_train[:, 0][y_train == i]
ys = X_train[:, 1][y_train == i]
plt.scatter(xs, ys, c=colors[i])
plt.legend(iris.target_names, scatterpoints = 1)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.show()
#Using SGD
from sklearn.linear_model import SGDClassifier
clf = SGDClassifier()
clf.fit(X_train, y_train)
#plot decision
x_min, x_max = X_train[:, 0].min() - .5, X_train[:, 0].max() +.5
y_min, y_max = X_train[:, 1].min() - .5, X_train[:, 1].max() +.5
xs = np.arange(x_min, x_max, 0.5)
fig, axes = plt.subplots(1, 3)
fig.set_size_inches(10, 6)
for i in [0, 1, 2]:
axes[i].set_aspect('equal')
axes[i].set_title('Class '+ str(i) + ' versus the rest')
axes[i].set_xlabel('Sepal length')
axes[i].set_ylabel('Sepal width')
axes[i].set_xlim(x_min, x_max)
axes[i].set_ylim(y_min, y_max)
plt.sca(axes[i])
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train,cmap=plt.cm.prism)
ys = (-clf.intercept_[i] - xs * clf.coef_[i, 0]) / clf.coef_[i, 1]
plt.plot(xs, ys, hold=True)
plt.show()
#print out precision, recall, and F1-score
y_pred = clf.predict(X_test)
print metrics.classification_report(y_test, y_pred,target_names=iris.target_names)
#print out confusion matrix, the true classes are in rows, and predicted class
#in columns
print metrics.confusion_matrix(y_test, y_pred)
#Using cross-validation
from sklearn.cross_validation import cross_val_score, KFold
from sklearn.pipeline import Pipeline
# create a composite estimator made by a pipeline of the
#standarization and the linear model
clf = Pipeline([
('scaler', preprocessing.StandardScaler()),
('linear_model', SGDClassifier())
])
# create a k-fold cross validation iterator of k=5 folds
cv = KFold(X.shape[0], 5, shuffle=True, random_state=33)
# by default the score used is the one returned by score
#method of the estimator (accuracy)
scores = cross_val_score(clf, X, y, cv=cv)
print scores
from scipy.stats import sem
def mean_score(scores):
return ("Mean score: {0:.3f} (+/-{1:.3f})").format(np.mean(scores), sem(scores))
print mean_score(scores)
| bsd-3-clause |
ContinuumIO/blaze | blaze/compute/csv.py | 3 | 2468 | from __future__ import absolute_import, division, print_function
import pandas
import os
from toolz import curry, concat
import pandas as pd
import numpy as np
from collections import Iterator, Iterable
from odo import into, Temp
from odo.backends.csv import CSV
from odo.backends.url import URL
from multipledispatch import MDNotImplementedError
import dask.dataframe as dd
from ..dispatch import dispatch
from ..expr import Expr, Head, ElemWise, Distinct, Symbol, Projection, Field
from ..expr.core import path
from ..utils import available_memory
from ..expr.split import split
from .core import compute
from ..expr.optimize import lean_projection
from .pmap import get_default_pmap
from warnings import warn
__all__ = ['optimize', 'pre_compute']
@dispatch(Expr, CSV)
def optimize(expr, _):
return lean_projection(expr) # This is handled in pre_compute
@dispatch(Expr, CSV)
def pre_compute(expr, data, comfortable_memory=None, chunksize=None, blocksize=None, **kwargs):
comfortable_memory = comfortable_memory or min(1e9, available_memory() / 4)
kwargs = dict()
# Chunk if the file is large
if os.path.getsize(data.path) > comfortable_memory:
do_chunk = True
if chunksize is not None:
warn("Deprecation warning: chunksize keyword renamed to blocksize")
blocksize = chunksize
if blocksize is not None:
kwargs['blocksize'] = blocksize
else:
do_chunk = False
# Insert projection into read_csv
oexpr = optimize(expr, data)
leaf = oexpr._leaves()[0]
pth = list(path(oexpr, leaf))
if len(pth) >= 2 and isinstance(pth[-2], (Projection, Field)):
# NOTE: FIXME: We pass the column names through `str` to workaround a
# PY2 Pandas bug with strings / unicode objects.
kwargs['usecols'] = list(map(str, pth[-2].fields))
if do_chunk:
return dd.read_csv(data.path, **kwargs)
else:
return into(pd.DataFrame, data, dshape=leaf.dshape, **kwargs)
@dispatch((Expr, Head), URL(CSV))
def pre_compute(expr, data, **kwargs):
return pre_compute(expr, into(Temp(CSV), data, **kwargs), **kwargs)
Cheap = (Head, ElemWise, Distinct, Symbol)
@dispatch(Head, CSV)
def pre_compute(expr, data, **kwargs):
leaf = expr._leaves()[0]
if all(isinstance(e, Cheap) for e in path(expr, leaf)):
return into(Iterator, data, chunksize=10000, dshape=leaf.dshape)
else:
raise MDNotImplementedError()
| bsd-3-clause |
shakamunyi/tensorflow | tensorflow/examples/get_started/regression/imports85.py | 24 | 6638 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A dataset loader for imports85.data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
pass
URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data"
# Order is important for the csv-readers, so we use an OrderedDict here.
defaults = collections.OrderedDict([
("symboling", [0]),
("normalized-losses", [0.0]),
("make", [""]),
("fuel-type", [""]),
("aspiration", [""]),
("num-of-doors", [""]),
("body-style", [""]),
("drive-wheels", [""]),
("engine-location", [""]),
("wheel-base", [0.0]),
("length", [0.0]),
("width", [0.0]),
("height", [0.0]),
("curb-weight", [0.0]),
("engine-type", [""]),
("num-of-cylinders", [""]),
("engine-size", [0.0]),
("fuel-system", [""]),
("bore", [0.0]),
("stroke", [0.0]),
("compression-ratio", [0.0]),
("horsepower", [0.0]),
("peak-rpm", [0.0]),
("city-mpg", [0.0]),
("highway-mpg", [0.0]),
("price", [0.0])
]) # pyformat: disable
types = collections.OrderedDict((key, type(value[0]))
for key, value in defaults.items())
def _get_imports85():
path = tf.contrib.keras.utils.get_file(URL.split("/")[-1], URL)
return path
def dataset(y_name="price", train_fraction=0.7):
"""Load the imports85 data as a (train,test) pair of `Dataset`.
Each dataset generates (features_dict, label) pairs.
Args:
y_name: The name of the column to use as the label.
train_fraction: A float, the fraction of data to use for training. The
remainder will be used for evaluation.
Returns:
A (train,test) pair of `Datasets`
"""
# Download and cache the data
path = _get_imports85()
# Define how the lines of the file should be parsed
def decode_line(line):
"""Convert a csv line into a (features_dict,label) pair."""
# Decode the line to a tuple of items based on the types of
# csv_header.values().
items = tf.decode_csv(line, list(defaults.values()))
# Convert the keys and items to a dict.
pairs = zip(defaults.keys(), items)
features_dict = dict(pairs)
# Remove the label from the features_dict
label = features_dict.pop(y_name)
return features_dict, label
def has_no_question_marks(line):
"""Returns True if the line of text has no question marks."""
# split the line into an array of characters
chars = tf.string_split(line[tf.newaxis], "").values
# for each character check if it is a question mark
is_question = tf.equal(chars, "?")
any_question = tf.reduce_any(is_question)
no_question = ~any_question
return no_question
def in_training_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# If you randomly split the dataset you won't get the same split in both
# sessions if you stop and restart training later. Also a simple
# random split won't work with a dataset that's too big to `.cache()` as
# we are doing here.
num_buckets = 1000000
bucket_id = tf.string_to_hash_bucket_fast(line, num_buckets)
# Use the hash bucket id as a random number that's deterministic per example
return bucket_id < int(train_fraction * num_buckets)
def in_test_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# Items not in the training set are in the test set.
# This line must use `~` instead of `not` because `not` only works on python
# booleans but we are dealing with symbolic tensors.
return ~in_training_set(line)
base_dataset = (tf.contrib.data
# Get the lines from the file.
.TextLineDataset(path)
# drop lines with question marks.
.filter(has_no_question_marks))
train = (base_dataset
# Take only the training-set lines.
.filter(in_training_set)
# Decode each line into a (features_dict, label) pair.
.map(decode_line)
# Cache data so you only decode the file once.
.cache())
# Do the same for the test-set.
test = (base_dataset.filter(in_test_set).cache().map(decode_line))
return train, test
def raw_dataframe():
"""Load the imports85 data as a pd.DataFrame."""
# Download and cache the data
path = _get_imports85()
# Load it into a pandas dataframe
df = pd.read_csv(path, names=types.keys(), dtype=types, na_values="?")
return df
def load_data(y_name="price", train_fraction=0.7, seed=None):
"""Get the imports85 data set.
A description of the data is available at:
https://archive.ics.uci.edu/ml/datasets/automobile
The data itself can be found at:
https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data
Args:
y_name: the column to return as the label.
train_fraction: the fraction of the dataset to use for training.
seed: The random seed to use when shuffling the data. `None` generates a
unique shuffle every run.
Returns:
a pair of pairs where the first pair is the training data, and the second
is the test data:
`(x_train, y_train), (x_test, y_test) = get_imports85_dataset(...)`
`x` contains a pandas DataFrame of features, while `y` contains the label
array.
"""
# Load the raw data columns.
data = raw_dataframe()
# Delete rows with unknowns
data = data.dropna()
# Shuffle the data
np.random.seed(seed)
# Split the data into train/test subsets.
x_train = data.sample(frac=train_fraction, random_state=seed)
x_test = data.drop(x_train.index)
# Extract the label from the features dataframe.
y_train = x_train.pop(y_name)
y_test = x_test.pop(y_name)
return (x_train, y_train), (x_test, y_test)
| apache-2.0 |
geoscixyz/em_examples | em_examples/DC_Pseudosections.py | 1 | 22327 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from SimPEG import Mesh, Maps, SolverLU, Utils
import numpy as np
from SimPEG.EM.Static import DC
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import LogFormatter
from matplotlib.path import Path
import matplotlib.patches as patches
from scipy.interpolate import griddata, LinearNDInterpolator
import warnings
from ipywidgets import (
interactive, IntSlider, FloatSlider, FloatText, ToggleButtons, VBox
)
from .Base import widgetify
from SimPEG.Maps import IdentityMap
# only use this if you are sure things are working
warnings.filterwarnings('ignore')
class ParametricCircleLayerMap(IdentityMap):
slope = 1e-1
def __init__(self, mesh, logSigma=True):
assert mesh.dim == 2, (
"Working for a 2D mesh only right now. "
"But it isn't that hard to change.. :)"
)
IdentityMap.__init__(self, mesh)
# TODO: this should be done through a composition with and ExpMap
self.logSigma = logSigma
@property
def nP(self):
return 7
def _transform(self, m):
a = self.slope
sig1, sig2, sig3, x, zc, r, zh = m[0], m[1], m[2], m[3], m[4], m[5], m[6]
if self.logSigma:
sig1, sig2, sig3 = np.exp(sig1), np.exp(sig2), np.exp(sig3)
sigma = np.ones(mesh.nC) * sig1
sigma[mesh.gridCC[:,1]<zh] = sig2
blkind = Utils.ModelBuilder.getIndicesSphere(np.r_[x, zc], r, mesh.gridCC)
sigma[blkind]=sig3
return sigma
# Mesh, mapping can be globals
npad = 15
cs = 1.25
hx = [(cs, npad, -1.3), (cs, 100), (cs, npad, 1.3)]
hy = [(cs, npad, -1.3), (cs, 50)]
mesh = Mesh.TensorMesh([hx, hy], "CN")
circmap = ParametricCircleLayerMap(mesh)
circmap.slope = 1e5
mapping = circmap
dx = 5
xr = np.arange(-40, 41, dx)
dxr = np.diff(xr)
xmin = -40.
xmax = 40.
ymin = -40.
ymax = 5.
xylim = np.c_[[xmin, ymin], [xmax, ymax]]
indCC, meshcore = Utils.ExtractCoreMesh(xylim, mesh)
indx = (mesh.gridFx[:, 0] >= xmin) & (mesh.gridFx[:, 0] <= xmax) \
& (mesh.gridFx[:, 1] >= ymin) & (mesh.gridFx[:, 1] <= ymax)
indy = (mesh.gridFy[:, 0] >= xmin) & (mesh.gridFy[:, 0] <= xmax) \
& (mesh.gridFy[:, 1] >= ymin) & (mesh.gridFy[:, 1] <= ymax)
indF = np.concatenate((indx, indy))
def DC2Dsurvey(flag="PolePole"):
"""
Function that define a surface DC survey
:param str flag: Survey Type 'PoleDipole', 'DipoleDipole', 'DipolePole', 'PolePole'
"""
if flag == "PoleDipole":
ntx, nmax = xr.size-2, 8
elif flag == "DipolePole":
ntx, nmax = xr.size-2, 8
elif flag == "DipoleDipole":
ntx, nmax = xr.size-3, 8
elif flag == "PolePole":
ntx, nmax = xr.size-2, 8
else:
raise Exception('Not Implemented')
xzlocs = getPseudoLocs(xr, ntx, nmax, flag)
txList = []
zloc = -2.5
for i in range(ntx):
if flag == "PoleDipole":
A = np.r_[xr[i], zloc]
B = np.r_[mesh.vectorCCx.min(), zloc]
if i < ntx-nmax+1:
M = np.c_[xr[i+1:i+1+nmax], np.ones(nmax)*zloc]
N = np.c_[xr[i+2:i+2+nmax], np.ones(nmax)*zloc]
else:
M = np.c_[xr[i+1:ntx+1], np.ones(ntx-i)*zloc]
N = np.c_[xr[i+2:i+2+nmax], np.ones(ntx-i)*zloc]
elif flag == "DipolePole":
A = np.r_[xr[i], zloc]
B = np.r_[xr[i+1], zloc]
if i < ntx-nmax+1:
M = np.c_[xr[i+2:i+2+nmax], np.ones(nmax)*zloc]
N = np.c_[np.ones(nmax)*mesh.vectorCCx.max(),
np.ones(nmax)*zloc]
else:
M = np.c_[xr[i+2:ntx+2], np.ones(ntx-i)*zloc]
N = np.c_[np.ones(ntx-i)*mesh.vectorCCx.max(),
np.ones(ntx-i)*zloc]
elif flag == "DipoleDipole":
A = np.r_[xr[i], zloc]
B = np.r_[xr[i+1], zloc]
if i < ntx-nmax:
M = np.c_[xr[i+2:i+2+nmax],
np.ones(len(xr[i+2:i+2+nmax]))*zloc]
N = np.c_[xr[i+3:i+3+nmax],
np.ones(len(xr[i+3:i+3+nmax]))*zloc]
else:
M = np.c_[xr[i+2:len(xr)-1],
np.ones(len(xr[i+2:len(xr)-1]))*zloc]
N = np.c_[xr[i+3:len(xr)], np.ones(len(xr[i+3:len(xr)]))*zloc]
elif flag == "PolePole":
A = np.r_[xr[i], zloc]
B = np.r_[mesh.vectorCCx.min(), zloc]
if i < ntx-nmax+1:
M = np.c_[xr[i+2:i+2+nmax], np.ones(nmax)*zloc]
N = np.c_[np.ones(nmax)*mesh.vectorCCx.max(),
np.ones(nmax)*zloc]
else:
M = np.c_[xr[i+2:ntx+2], np.ones(ntx-i)*zloc]
N = np.c_[np.ones(ntx-i)*mesh.vectorCCx.max(),
np.ones(ntx-i)*zloc]
rx = DC.Rx.Dipole(M, N)
src = DC.Src.Dipole([rx], A, B)
txList.append(src)
survey = DC.Survey(txList)
problem = DC.Problem3D_CC(mesh, sigmaMap=mapping)
problem.pair(survey)
sigblk, sighalf, siglayer = 2e-2, 2e-3, 1e-3
xc, yc, r, zh = -15, -8, 4, -5
mtrue = np.r_[np.log(sighalf), np.log(siglayer), np.log(sigblk), xc, yc, r, zh]
dtrue = survey.dpred(mtrue)
perc = 0.1
floor = np.linalg.norm(dtrue)*1e-3
np.random.seed([1])
uncert = np.random.randn(survey.nD)*perc + floor
dobs = dtrue + uncert
return dobs, uncert, survey, xzlocs
def getPseudoLocs(xr, ntx, nmax, flag="PoleDipole"):
"""
Compute the midpoint pseudolocation
for each Transmitter-Receiver pair of a survey
:param numpy.array xr: electrodes positions
:param int ntx: number of transmitter
:param int nmax: max number of receiver per source
:param str flag: Survey Type 'PoleDipole', 'DipoleDipole', 'DipolePole'
"""
xloc = []
yloc = []
for i in range(ntx):
if i < ntx-nmax+1:
if flag == 'DipoleDipole':
txmid = xr[i]+dxr[i]*0.5
rxmid = xr[i+1:i+1+nmax]+dxr[i+1:i+1+nmax]*0.5
elif flag == 'PoleDipole':
txmid = xr[i]
rxmid = xr[i+1:i+1+nmax]+dxr[i+1:i+1+nmax]*0.5
elif flag == 'DipolePole':
txmid = xr[i]+dxr[i]*0.5
rxmid = xr[i+1:i+1+nmax]
elif flag == 'PolePole':
txmid = xr[i]
rxmid = xr[i+1:i+1+nmax]
mid = (txmid+rxmid)*0.5
xloc.append(mid)
yloc.append(np.arange(nmax)+1.)
else:
if flag == 'DipoleDipole':
txmid = xr[i]+dxr[i]*0.5
rxmid = xr[i+1:ntx+1]+dxr[i+1:ntx+1]*0.5
elif flag == 'PoleDipole':
txmid = xr[i]
rxmid = xr[i+1:ntx+1]+dxr[i+1:ntx+1]*0.5
elif flag == 'DipolePole':
txmid = xr[i]+dxr[i]*0.5
rxmid = xr[i+1:ntx+1]
elif flag == 'PolePole':
txmid = xr[i]
rxmid = xr[i+1:ntx+1]
mid = (txmid+rxmid)*0.5
xloc.append(mid)
yloc.append(np.arange(mid.size)+1.)
xlocvec = np.hstack(xloc)
ylocvec = np.hstack(yloc)
return np.c_[xlocvec, ylocvec]
def PseudoSectionPlotfnc(i, j, survey, flag="PoleDipole"):
"""
Plot the Pseudolocation associated with source i and receiver j
:param int i: source index
:param int j: receiver index
:param SimPEG.survey survey: SimPEG survey object
:param str flag: Survey Type 'PoleDipole', 'DipoleDipole', 'DipolePole'
"""
matplotlib.rcParams['font.size'] = 14
nmax = 8
dx = 5
xr = np.arange(-40, 41, dx)
ntx = xr.size-2
dxr = np.diff(xr)
TxObj = survey.srcList
TxLoc = TxObj[i].loc
RxLoc = TxObj[i].rxList[0].locs
fig = plt.figure(figsize=(10, 3))
ax = fig.add_subplot(111, autoscale_on=False,
xlim=(xr.min()-5, xr.max()+5), ylim=(nmax+1, -2))
plt.plot(xr, np.zeros_like(xr), 'ko', markersize=4)
if flag == "PoleDipole":
plt.plot(TxLoc[0][0], np.zeros(1), 'rv', markersize=10)
ax.annotate('A', xy=(TxLoc[0][0], np.zeros(1)), xycoords='data',
xytext=(-4.25, 7.5), textcoords='offset points')
else:
plt.plot([TxLoc[0][0], TxLoc[1][0]], np.zeros(2), 'rv', markersize=10)
ax.annotate('A', xy=(TxLoc[0][0], np.zeros(1)), xycoords='data',
xytext=(-4.25, 7.5), textcoords='offset points')
ax.annotate('B', xy=(TxLoc[1][0], np.zeros(1)), xycoords='data',
xytext=(-4.25, 7.5), textcoords='offset points')
if i < ntx-nmax+1:
if flag in ["PoleDipole", "PolePole"]:
txmid = TxLoc[0][0]
else:
txmid = (TxLoc[0][0] + TxLoc[1][0])*0.5
MLoc = RxLoc[0][j]
NLoc = RxLoc[1][j]
if flag in ["DipolePole", "PolePole"]:
plt.plot(MLoc[0], np.zeros(1), 'bv', markersize=10)
ax.annotate('M', xy=(MLoc[0], np.zeros(1)), xycoords='data',
xytext=(-4.25, 7.5), textcoords='offset points')
rxmid = MLoc[0]
else:
rxmid = (MLoc[0]+NLoc[0])*0.5
plt.plot(MLoc[0], np.zeros(1), 'bv', markersize=10)
plt.plot(NLoc[0], np.zeros(1), 'b^', markersize=10)
ax.annotate('M', xy=(MLoc[0], np.zeros(1)), xycoords='data',
xytext=(-4.25, 7.5), textcoords='offset points')
ax.annotate('N', xy=(NLoc[0], np.zeros(1)), xycoords='data',
xytext=(-4.25, 7.5), textcoords='offset points')
mid = (txmid+rxmid)*0.5
midSep = np.sqrt(np.square(txmid-rxmid))
plt.plot(txmid, np.zeros(1), 'ro')
plt.plot(rxmid, np.zeros(1), 'bo')
plt.plot(mid, midSep/2., 'go')
plt.plot(np.r_[txmid, mid], np.r_[0, midSep/2.], 'k:')
plt.plot(np.r_[rxmid, mid], np.r_[0, midSep/2.], 'k:')
else:
if flag in ["PoleDipole", "PolePole"]:
txmid = TxLoc[0][0]
else:
txmid = (TxLoc[0][0] + TxLoc[1][0])*0.5
MLoc = RxLoc[0][j]
NLoc = RxLoc[1][j]
if flag in ["DipolePole", "PolePole"]:
plt.plot(MLoc[0], np.zeros(1), 'bv', markersize=10)
ax.annotate('M', xy=(MLoc[0], np.zeros(1)), xycoords='data',
xytext=(-4.25, 7.5), textcoords='offset points')
rxmid = MLoc[0]
else:
rxmid = (MLoc[0]+NLoc[0])*0.5
plt.plot(MLoc[0], np.zeros(1), 'bv', markersize=10)
plt.plot(NLoc[0], np.zeros(1), 'b^', markersize=10)
ax.annotate('M', xy=(MLoc[0], np.zeros(1)), xycoords='data',
xytext=(-4.25, 7.5), textcoords='offset points')
ax.annotate('N', xy=(NLoc[0], np.zeros(1)), xycoords='data',
xytext=(-4.25, 7.5), textcoords='offset points')
mid = (txmid+rxmid)*0.5
plt.plot((txmid+rxmid)*0.5, np.arange(mid.size)+1., 'bo')
plt.plot(rxmid, np.zeros(rxmid.size), 'go')
plt.plot(np.r_[txmid, mid[-1]], np.r_[0, mid.size], 'k:')
for j in range(ntx-i):
plt.plot(np.r_[rxmid[j], mid[j]], np.r_[0, j+1], 'k:')
plt.xlabel("X (m)")
plt.ylabel("N-spacing")
plt.xlim(xr.min()-5, xr.max()+5)
plt.ylim(nmax*dx/2+dx, -2*dx)
plt.show()
def DipoleDipolefun(i):
"""
Plotting function to display all receivers and pseudolocations
of a dipole-dipole survey for each source i
:param int i: source index
"""
matplotlib.rcParams['font.size'] = 14
plt.figure(figsize=(10, 3))
nmax = 8
xr = np.linspace(-40, 40, 20)
ntx = xr.size-2
dxr = np.diff(xr)
plt.plot(xr[:-1]+dxr*0.5, np.zeros_like(xr[:-1]), 'ko')
plt.plot(xr[i]+dxr[i]*0.5, np.zeros(1), 'ro')
# for i in range(ntx):
if i < ntx-nmax+1:
txmid = xr[i]+dxr[i]*0.5
rxmid = xr[i+1:i+1+nmax]+dxr[i+1:i+1+nmax]*0.5
mid = (txmid+rxmid)*0.5
plt.plot(rxmid, np.zeros(rxmid.size), 'go')
plt.plot(mid, np.arange(nmax)+1., 'bo')
plt.plot(np.r_[txmid, mid[-1]], np.r_[0, nmax], 'k:')
for j in range(nmax):
plt.plot(np.r_[rxmid[j], mid[j]], np.r_[0, j+1], 'k:')
else:
txmid = xr[i]+dxr[i]*0.5
rxmid = xr[i+1:ntx+1]+dxr[i+1:ntx+1]*0.5
mid = (txmid+rxmid)*0.5
plt.plot((txmid+rxmid)*0.5, np.arange(mid.size)+1., 'bo')
plt.plot(rxmid, np.zeros(rxmid.size), 'go')
plt.plot(np.r_[txmid, mid[-1]], np.r_[0, mid.size], 'k:')
for j in range(ntx-i):
plt.plot(np.r_[rxmid[j], mid[j]], np.r_[0, j+1], 'k:')
plt.xlabel("X (m)")
plt.ylabel("N-spacing")
plt.xlim(xr.min(), xr.max())
plt.ylim(nmax+1, -1)
plt.show()
def PseudoSectionWidget(survey, flag):
"""
Wigdet to visualize the pseudolocations
associated with a particular survey
for each pair source-receiver
:param SimPEG.survey survey: Survey object
:param str flag: Survey Type 'PoleDipole', 'DipoleDipole', 'DipolePole'
"""
dx = 5
xr = np.arange(-40, 41, dx)
if flag == "PoleDipole":
ntx, nmax = xr.size-2, 8
dxr = np.diff(xr)
elif flag == "DipolePole":
ntx, nmax = xr.size-1, 7
dxr = xr
elif flag == "DipoleDipole":
ntx, nmax = xr.size-3, 8
dxr = np.diff(xr)
elif flag == "PolePole":
ntx, nmax = xr.size-2, 8
dxr = xr
xzlocs = getPseudoLocs(dxr, ntx, nmax, flag)
PseudoSectionPlot = lambda i, j: PseudoSectionPlotfnc(i, j, survey, flag)
return widgetify(
PseudoSectionPlot,
i=IntSlider(min=0, max=ntx-1, step=1, value=0),
j=IntSlider(min=0, max=nmax-1, step=1, value=0))
def MidpointPseudoSectionWidget():
"""
Widget function to display receivers and pseudolocations
of a dipole-dipole survey for each source i
:param int i: source index
"""
ntx = 18
return widgetify(
DipoleDipolefun,
i=IntSlider(min=0, max=ntx-1, step=1, value=0))
def DC2Dfwdfun(mesh, survey, mapping, xr, xzlocs, rhohalf, rhoblk, xc, yc, r,
dobs, uncert, predmis, nmax=8, plotFlag=None):
"""
Function to display the pseudosection obtained through a survey
over a known geological model
:param TensorMesh mesh: discretization of the model
:param SimPEG.Survey survey: survey object
:param SimPEG.SigmaMap mapping: sigmamap of the model
:param numpy.array xr: electrodes positions
:param numpy.array xzlocs: pseudolocations
:param float rhohalf: Resistivity of the half-space
:param float rhoblk: Resistivity of the cylinder
:param float xc: horizontal center of the cylinder
:param float zc: vertical center of the cylinder
:param float r: radius of the cylinder
:param numpy.array dobs: observed data
:param numpy.array uncert: uncertainities of the data
:param str predmis: Choose between 'mis' to display the data misfit
or 'pred' to display the predicted data
:param int nmax: Maximum number of receivers for each source
:param bool plotFlag: Plot only the predicted data
or also the observed and misfit
"""
matplotlib.rcParams['font.size'] = 14
sighalf, sigblk = 1./rhohalf, 1./rhoblk
siglayer = 1e-3
zh = -5
m0 = np.r_[np.log(sighalf), np.log(sighalf), np.log(sighalf), xc, yc, r, zh]
dini = survey.dpred(m0)
mtrue = np.r_[np.log(sighalf), np.log(siglayer), np.log(sigblk), xc, yc, r, zh]
dpred = survey.dpred(mtrue)
xi, yi = np.meshgrid(np.linspace(xr.min(), xr.max(), 120),
np.linspace(1., nmax, 100))
extent = (xi.min(), xi.max(), yi.min(), yi.max())
# Cheat to compute a geometric factor
# define as G = dV_halfspace / rho_halfspace
appres = dpred/dini/sighalf
appresobs = dobs/dini/sighalf
std = np.std(appres)
pred = griddata(
xzlocs, appres, (xi, yi), method='linear'
)
if plotFlag is not None:
fig = plt.figure(figsize=(12, 6))
ax1 = plt.subplot(211)
ax2 = plt.subplot(212)
dat1 = mesh.plotImage(np.log10(1./(mapping*mtrue)), ax=ax1,
clim=(1, 3), grid=True,
gridOpts={'color': 'k', 'alpha': 0.5})
cb1ticks = [1., 2., 3.]
cb1 = plt.colorbar(dat1[0], ax=ax1, ticks=cb1ticks)
cb1.ax.set_yticklabels(['{:.0f}'.format(10.**x) for x in cb1ticks])
cb1.set_label("Resistivity (ohm-m)")
ax1.set_ylim(-20, 1.)
ax1.set_xlim(-40, 40)
ax1.set_xlabel("")
ax1.set_ylabel("Depth (m)")
ax1.set_aspect('equal')
ax1.plot(xr, np.zeros_like(xr), 'ko')
if std < 1.:
dat2 = ax2.pcolormesh(xi, yi, pred)
else:
dat2 = ax2.contourf(xi, yi, pred, 10)
ax2.contour(xi, yi, pred, 10, colors='k', alpha=0.5)
ax2.plot(xzlocs[:, 0], xzlocs[:, 1], 'k.', ms=3)
cb2 = plt.colorbar(
dat2, ax=ax2, ticks=np.linspace(appres.min(), appres.max(), 3),
format="%.0f"
)
cb2.set_label("Apparent Resistivity \n (ohm-m)")
ax2.text(-38, 7, "Predicted")
ax2.set_ylim(nmax+1, 0.)
ax2.set_ylabel("N-spacing")
ax2.set_xlabel("Distance (m)")
else:
obs = griddata(xzlocs, appresobs, (xi, yi),
method='linear')
fig = plt.figure(figsize=(12, 9))
ax1 = plt.subplot(311)
dat1 = mesh.plotImage(np.log10(1./(mapping*mtrue)), ax=ax1,
clim=(1, 3), grid=True,
gridOpts={'color': 'k', 'alpha': 0.5})
cb1ticks = [1., 2., 3.]
cb1 = plt.colorbar(dat1[0], ax=ax1, ticks=cb1ticks)
cb1.ax.set_yticklabels(['{:.0f}'.format(10.**x) for x in cb1ticks])
cb1.set_label("Resistivity (ohm-m)")
ax1.set_ylim(-20, 0.)
ax1.set_xlim(-40, 40)
ax1.set_xlabel("")
ax1.set_ylabel("Depth (m)")
ax1.set_aspect('equal')
ax2 = plt.subplot(312)
dat2 = ax2.contourf(xi, yi, obs, 10)
ax2.contour(xi, yi, obs, 10, colors='k', alpha=0.5)
ax2.plot(xzlocs[:, 0], xzlocs[:, 1], 'k.', ms=3)
cb2 = plt.colorbar(dat2, ax=ax2)
cb2.set_label("Apparent Resistivity \n (ohm-m)")
ax2.set_ylim(nmax+1, 0.)
ax2.set_ylabel("N-spacing")
ax2.text(-38, 7, "Observed")
ax3 = plt.subplot(313)
if predmis == "pred":
if std < 1.:
dat3 = ax3.pcolormesh(xi, yi, pred)
else:
dat3 = ax3.contourf(xi, yi, pred, 10)
ax3.contour(xi, yi, pred, 10, colors='k', alpha=0.5)
ax3.plot(xzlocs[:, 0], xzlocs[:, 1], 'k.', ms=3)
cb3 = plt.colorbar(dat3, ax=ax3,
ticks=np.linspace(appres.min(), appres.max(), 5),
format="%4.0f")
cb3.set_label("Apparent Resistivity \n (ohm-m)")
ax3.text(-38, 7, "Predicted")
elif predmis == "mis":
mis = (appresobs-appres)/(appresobs) * 100
Mis = griddata(xzlocs, mis, (xi, yi),
method='linear')
dat3 = ax3.contourf(xi, yi, Mis, 10)
ax3.contour(xi, yi, Mis, 10, colors='k', alpha=0.5)
ax3.plot(xzlocs[:, 0], xzlocs[:, 1], 'k.', ms=3)
cb3 = plt.colorbar(dat3, ax=ax3,
ticks=np.linspace(mis.min(), mis.max(), 5),
format="%4.2f")
cb3.set_label("Normalized misfit (%)")
ax3.text(-38, 7, "Misifit")
ax3.set_ylim(nmax+1, 0.)
ax3.set_ylabel("N-spacing")
ax3.set_xlabel("Distance (m)")
plt.show()
def DC2DPseudoWidgetWrapper(rhohalf, rhosph, xc, zc, r, surveyType):
dobs, uncert, survey, xzlocs = DC2Dsurvey(surveyType)
DC2Dfwdfun(mesh, survey, mapping, xr, xzlocs, rhohalf, rhosph, xc, zc, r,
dobs, uncert, 'pred', plotFlag='PredOnly')
return None
def DC2DPseudoWidget():
return interactive(
DC2DPseudoWidgetWrapper,
rhohalf=FloatText(
min=10, max=1000, value=1000,
continuous_update=False, description='$\\rho_1$'
),
rhosph=FloatText(
min=10, max=1000, value=1000,
continuous_update=False, description='$\\rho_2$'
),
xc=FloatText(
min=-40, max=40, step=1, value=0,
continuous_update=False
),
zc=FloatText(
min=-20, max=0, step=1, value=-10,
continuous_update=False
),
r=FloatText(
min=0, max=15, step=0.5, value=5,
continuous_update=False
),
surveyType=ToggleButtons(
options=['PolePole', 'PoleDipole', 'DipolePole', 'DipoleDipole'],
value='DipoleDipole'
)
)
def DC2DfwdWrapper(rhohalf, rhosph, xc, zc, r, predmis, surveyType):
dobs, uncert, survey, xzlocs = DC2Dsurvey(surveyType)
DC2Dfwdfun(mesh, survey, mapping, xr, xzlocs, rhohalf, rhosph,
xc, zc, r, dobs, uncert, predmis)
return None
def DC2DfwdWidget():
return widgetify(
DC2DfwdWrapper, manual = False,
rhohalf=FloatText(min=10, max=1000, value=1000,
continuous_update=False,
description='$\\rho_1$'),
rhosph=FloatText(min=10, max=1000, value=1000,
continuous_update=False,
description='$\\rho_2$'),
xc=FloatSlider(min=-40, max=40, step=1, value=0,
continuous_update=False),
zc=FloatSlider(min=-20, max=0, step=1, value=-10,
continuous_update=False),
r=FloatSlider(min=0, max=15, step=0.5, value=5,
continuous_update=False),
predmis=ToggleButtons(options=['pred', 'mis']),
surveyType=ToggleButtons(options=['PolePole', 'PoleDipole',
'DipolePole', 'DipoleDipole'])
)
| mit |
dsullivan7/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 230 | 4762 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
jpautom/scikit-learn | benchmarks/bench_plot_nmf.py | 90 | 5742 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, init='random'):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
init : string
Method used to initialize the procedure.
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
W, H = _initialize_nmf(V, r, init, random_state=0)
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init='random', max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, init='random', tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization'
'benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
jdavidrcamacho/Tests_GP | MSc_results/results_ess.py | 2 | 29810 | # -*- coding: utf-8 -*-
import Gedi as gedi
import emcee
import sys
import numpy as np
import matplotlib.pylab as pl; pl.close("all")
import astropy.table as Table
import cPickle as pickle
import corner
### Spots dataset to analyse
ijk= 10
### Defining what's supose to run: 1 runs 0 doesn't
day_1, daydecay_1, daydecaygap_1 = 1, 1, 1 #1 measurement a day
day_4, daydecay_4, daydecaygap_4 = 1, 1, 1 #1 measurement every 4 days
#### Preparing MCMC
burns, runs = 100, 100
###############################################################################
### RV function
rvx,rvy=gedi.RV_function.RV_circular(P=30,K=16.343,T=0,gamma=0,time=100,space=100)
pl.plot(rvx,rvy,'*')
pl.savefig('rv_signal.png')
pl.close('all')
### Priors
def lnprob(p):
global kernel
#p[0]=theta; p[1]=lenghtscale; p[2]=period; p[3]=whitenoise
if any([p[0] < -6, p[0] > 6,
p[1] < -10, p[1] > np.log(10),
p[2] < np.log(10), p[2] > np.log(50),
p[3] < -10, p[3] > np.log(10)]):
return -np.inf
lnprior=0.0
# Update the kernel and compute the lnlikelihood.
kernel=gedi.kernel_optimization.new_kernel(kernel,np.exp(p))
new_likelihood=gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
return lnprior + new_likelihood
from scipy import stats
amplitude_prior=stats.uniform(np.exp(-6), np.exp(6)-np.exp(-6))
lenghtscale_prior=stats.uniform(np.exp(-10), 10-np.exp(-10))
period_prior=stats.uniform(10, 50-10)
wn_prior=stats.uniform(np.exp(-10), 10-np.exp(-10))
def from_prior():
return np.array([amplitude_prior.rvs(),lenghtscale_prior.rvs(),
period_prior.rvs(),wn_prior.rvs()])
### SOAP file to use
soap_file= 'output_spots{0}'.format(ijk)
###############################################################################
from time import time
from matplotlib.ticker import MaxNLocator
if day_1 == 1:
f= open("{0}_1day.txt".format(soap_file),"w")
sys.stdout= f
start= time()
print
print '> Preparing data.'
print
print 'Loading {0}.rdb file.'.format(soap_file)
print
#data from .rdb file
rdb_data= Table.Table.read('{0}.rdb'.format(soap_file),format='ascii')
spot= rdb_data['RV_tot'][1:101]
spot= np.array(spot)
spot= spot.astype('Float64')
spotfinal= np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into a measurement per day
spots_info= []
for i in np.arange(0,399,4):
spots_info.append(spotfinal[i]*1000)
yerr= np.array(0.5*np.random.randn(len(spots_info)))
y= np.array(spots_info+yerr+rvy)
t= np.array(range(1,101))
pl.plot(t,y,'*')
pl.savefig('{0}_RV_1day.png'.format(soap_file))
pl.close('all')
print '> Preparing kernel.'
print
kernel=gedi.kernel.ExpSineSquared(amplitude_prior.rvs(),
lenghtscale_prior.rvs(),
period_prior.rvs()) +\
gedi.kernel.WhiteNoise(wn_prior.rvs())
print 'Kernel =', kernel
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
print
print 'Done.'
print
print '> Preparing mcmc.'
print
# Set up the sampler.
nwalkers, ndim = 10, len(kernel.pars)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
p0=[np.log(from_prior()) for i in range(nwalkers)]
assert not np.isinf(map(lnprob, p0)).any()
print "Running burn-in"
print
p0, _, _ = sampler.run_mcmc(p0, burns)
#sampler.reset()
print "Running production chain"
print
sampler.run_mcmc(p0, runs)
print 'Done.'
print
print '> Preparing graphics.'
print
fig, axes = pl.subplots(4, 1, sharex=True, figsize=(8, 9))
axes[0].plot(sampler.chain[:, :, 0].T, color="k", alpha=0.4) #log
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$theta$")
axes[1].plot(np.exp(sampler.chain[:, :, 1]).T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$l$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$P$")
axes[3].plot(sampler.chain[:, :, 3].T, color="k", alpha=0.4) #log
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$WN$")
axes[3].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_1day.png'.format(soap_file))
print 'Done.'
print
print '> Preparing final solution.'
print
# Compute the quantiles.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
#save data
pickle.dump(sampler.chain[:, :, 0],open("{0}_1day_A.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 1],open("{0}_1day_L.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 2],open("{0}_1day_P.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 3],open("{0}_1day_WN.p".format(soap_file), 'w'),protocol=-1)
samples[:, 0] = np.exp(samples[:, 0]) #amplitude
samples[:, 1] = np.exp(samples[:, 1]) #lenght scale
samples[:, 2] = np.exp(samples[:, 2]) #period
samples[:, 3] = np.exp(samples[:, 3]) #white noise
theta_mcmc,l_mcmc,p_mcmc,wn_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print 'theta = {0[0]} +{0[1]} -{0[2]}'.format(theta_mcmc)
print 'l = {0[0]} +{0[1]} -{0[2]}'.format(l_mcmc)
print 'period = {0[0]} +{0[1]} -{0[2]}'.format(p_mcmc)
print 'white noise = {0[0]} +{0[1]} -{0[2]}'.format(wn_mcmc)
print
fig= corner.corner(samples,labels=["$Theta$","$l$","$P$","$WN$"])
fig.savefig('triangle_{0}_1day.png'.format(soap_file))
pl.close('all')
tempo=(time() - start)
print 'Everything took', tempo,'s'
print
print 'Done.'
print
sys.stdout = sys.__stdout__
f.close()
if daydecay_1==1:
f=open("{0}_1day_decay.txt".format(soap_file),"w")
sys.stdout = f
start=time()
print '> Preparing data.'
print
print 'Loading {0}.rdb file.'.format(soap_file)
print
#data from .rdb file
rdb_data= Table.Table.read('{0}.rdb'.format(soap_file),format='ascii')
spot= rdb_data['RV_tot'][1:101]
spot= np.array(spot)
spot= spot.astype('Float64')
spotfinal= np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into a measurement per day
spots_info= []
for i in np.arange(0,399,4):
spots_info.append(spotfinal[i]*1000)
yerr= np.array(0.5*np.random.randn(len(spots_info)))
decay=np.linspace(1,0.5,len(y))
y0= np.array(spots_info)
decay=np.linspace(1,0.5,len(y0))
y=[n*m for n,m in zip(y0,decay)]
y= np.array(y+yerr+rvy)
t= np.array(range(1,101))
pl.plot(t,y,'*')
pl.savefig('{0}_RV_1day_decay.png'.format(soap_file))
pl.close('all')
print "Done."
print
print '> Preparing kernel.'
print
kernel=gedi.kernel.ExpSineSquared(amplitude_prior.rvs(),
lenghtscale_prior.rvs(),
period_prior.rvs()) +\
gedi.kernel.WhiteNoise(wn_prior.rvs())
print 'Kernel =', kernel
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
print
print 'Done.'
print
print '> Preparing mcmc.'
print
# Set up the sampler.
nwalkers, ndim = 10, len(kernel.pars)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
p0=[np.log(from_prior()) for i in range(nwalkers)]
assert not np.isinf(map(lnprob, p0)).any()
print "Running burn-in"
print
p0, _, _ = sampler.run_mcmc(p0, burns)
#sampler.reset()
print "Running production chain"
print
sampler.run_mcmc(p0, runs)
print 'Done.'
print
print '> Preparing graphics.'
print
fig, axes = pl.subplots(4, 1, sharex=True, figsize=(8, 9))
axes[0].plot(sampler.chain[:, :, 0].T, color="k", alpha=0.4) #log
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$theta$")
axes[1].plot(np.exp(sampler.chain[:, :, 1]).T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$l$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$P$")
axes[3].plot(sampler.chain[:, :, 3].T, color="k", alpha=0.4) #log
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$WN$")
axes[3].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_1day_decay.png'.format(soap_file))
print 'Done.'
print
print '> Preparing final solution.'
print
# Compute the quantiles.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
#save data
pickle.dump(sampler.chain[:, :, 0],open("{0}_1day_decay_A.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 1],open("{0}_1day_decay_L.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 2],open("{0}_1day_decay_P.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 3],open("{0}_1day_decay_WN.p".format(soap_file), 'w'),protocol=-1)
samples[:, 0] = np.exp(samples[:, 0]) #amplitude
samples[:, 1] = np.exp(samples[:, 1]) #lenght scale
samples[:, 2] = np.exp(samples[:, 2]) #period
samples[:, 3] = np.exp(samples[:, 3]) #white noise
theta_mcmc,l_mcmc,p_mcmc,wn_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print 'theta = {0[0]} +{0[1]} -{0[2]}'.format(theta_mcmc)
print 'l = {0[0]} +{0[1]} -{0[2]}'.format(l_mcmc)
print 'period = {0[0]} +{0[1]} -{0[2]}'.format(p_mcmc)
print 'white noise = {0[0]} +{0[1]} -{0[2]}'.format(wn_mcmc)
print
fig= corner.corner(samples,labels=["$Theta$","$l$","$P$","$WN$"])
fig.savefig('triangle_{0}_1day_decay.png'.format(soap_file))
pl.close('all')
tempo=(time() - start)
print 'Everything took', tempo,'s'
print
print 'Done.'
print
sys.stdout = sys.__stdout__
f.close()
if daydecaygap_1==1:
f=open("{0}_1day_decaygap.txt".format(soap_file),"w")
sys.stdout = f
start=time()
print
print '> Preparing data.'
print
print 'Loading {0}.rdb file.'.format(soap_file)
print
#data from .rdb file
rdb_data= Table.Table.read('{0}.rdb'.format(soap_file),format='ascii')
spot= rdb_data['RV_tot'][1:101]
spot= np.array(spot)
spot= spot.astype('Float64')
spotfinal= np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into a measurement per day
spots_info= []
for i in np.arange(0,399,4):
spots_info.append(spotfinal[i]*1000)
yerr= np.array(0.5*np.random.randn(len(spots_info)))
y0= np.array(spots_info)
decay=np.linspace(1,0.5,len(y0))
y0=[n*m for n,m in zip(y0,decay)]
#new t and y
t1= range(1,30)
t2= range(60,101)
t=np.array(t1+t2)
y=[]
yerr1=[]
new_rv=[]
for i,e in enumerate(t):
y.append(y0[e-1])
yerr1.append(yerr[e-1])
new_rv.append(rvy[e-1])
yerr=np.array(yerr1)
y=[n+m+o for n,m,o in zip(y,yerr1,new_rv)]
y=np.array(y)
pl.plot(t,y,'*')
pl.savefig('{0}_RV_1day_dgap.png'.format(soap_file))
pl.close('all')
print "Done."
print
print '> Preparing kernel.'
print
kernel=gedi.kernel.ExpSineSquared(amplitude_prior.rvs(),
lenghtscale_prior.rvs(),
period_prior.rvs()) +\
gedi.kernel.WhiteNoise(wn_prior.rvs())
print 'Kernel =', kernel
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
print
print 'Done.'
print
print '> Preparing mcmc.'
print
# Set up the sampler.
nwalkers, ndim = 10, len(kernel.pars)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
p0=[np.log(from_prior()) for i in range(nwalkers)]
assert not np.isinf(map(lnprob, p0)).any()
print "Running burn-in"
print
p0, _, _ = sampler.run_mcmc(p0, burns)
#sampler.reset()
print "Running production chain"
print
sampler.run_mcmc(p0, runs)
print 'Done.'
print
print '> Preparing graphics.'
print
fig, axes = pl.subplots(4, 1, sharex=True, figsize=(8, 9))
axes[0].plot(sampler.chain[:, :, 0].T, color="k", alpha=0.4) #log
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$theta$")
axes[1].plot(np.exp(sampler.chain[:, :, 1]).T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$l$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$P$")
axes[3].plot(sampler.chain[:, :, 3].T, color="k", alpha=0.4) #log
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$WN$")
axes[3].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_1day_decaygap.png'.format(soap_file))
print 'Done.'
print
print '> Preparing final solution.'
print
# Compute the quantiles.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
#save data
pickle.dump(sampler.chain[:, :, 0],open("{0}_1day_dgap_A.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 1],open("{0}_1day_dgap_L.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 2],open("{0}_1day_dgap_P.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 3],open("{0}_1day_dgap_WN.p".format(soap_file), 'w'),protocol=-1)
samples[:, 0] = np.exp(samples[:, 0]) #amplitude
samples[:, 1] = np.exp(samples[:, 1]) #lenght scale
samples[:, 2] = np.exp(samples[:, 2]) #period
samples[:, 3] = np.exp(samples[:, 3]) #white noise
theta_mcmc,l_mcmc,p_mcmc,wn_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print 'theta = {0[0]} +{0[1]} -{0[2]}'.format(theta_mcmc)
print 'l = {0[0]} +{0[1]} -{0[2]}'.format(l_mcmc)
print 'period = {0[0]} +{0[1]} -{0[2]}'.format(p_mcmc)
print 'white noise = {0[0]} +{0[1]} -{0[2]}'.format(wn_mcmc)
print
fig= corner.corner(samples,labels=["$Theta$","$l$","$P$","$WN$"])
fig.savefig('triangle_{0}_1day_decaygap.png'.format(soap_file))
pl.close('all')
tempo=(time() - start)
print 'Everything took', tempo,'s'
print
print 'Done.'
print
sys.stdout = sys.__stdout__
f.close()
if day_4==1:
f=open("{0}_4days.txt".format(soap_file),"w")
sys.stdout = f
start=time()
print
print '> Preparing data.'
print
print 'Loading {0}.rdb file.'.format(soap_file)
print
#data from .rdb file
rdb_data= Table.Table.read('{0}.rdb'.format(soap_file),format='ascii')
spot= rdb_data['RV_tot'][1:101]
spot= np.array(spot)
spot= spot.astype('Float64')
spotfinal= np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into a measurement per 4 days
spots_info= []
for i in np.arange(0,399,4):
spots_info.append(spotfinal[i]*1000)
yerr0= np.array(0.5*np.random.randn(len(spots_info)))
y0= np.array(spots_info)
t0= np.array(range(1,101))
y=[]
yerr=[]
t=[]
new_rv=[]
for ii in np.arange(0,len(t0),4):
y.append(y0[ii])
yerr.append(yerr0[ii])
new_rv.append(rvy[ii])
t.append(t0[ii])
y=[n+m+o for n,m,o in zip(y,yerr,new_rv)]
y=np.array(y)
yerr=np.array(yerr)
t=np.array(t)
pl.plot(t,y,'*')
pl.savefig('{0}_RV_4days.png'.format(soap_file))
pl.close('all')
print '> Preparing kernel.'
print
kernel=gedi.kernel.ExpSineSquared(amplitude_prior.rvs(),
lenghtscale_prior.rvs(),
period_prior.rvs()) +\
gedi.kernel.WhiteNoise(wn_prior.rvs())
print 'Kernel =', kernel
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
print
print 'Done.'
print
print '> Preparing mcmc.'
print
# Set up the sampler.
nwalkers, ndim = 10, len(kernel.pars)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
p0=[np.log(from_prior()) for i in range(nwalkers)]
assert not np.isinf(map(lnprob, p0)).any()
print "Running burn-in"
print
p0, _, _ = sampler.run_mcmc(p0, burns)
#sampler.reset()
print "Running production chain"
print
sampler.run_mcmc(p0, runs)
print 'Done.'
print
print '> Preparing graphics.'
print
fig, axes = pl.subplots(4, 1, sharex=True, figsize=(8, 9))
axes[0].plot(sampler.chain[:, :, 0].T, color="k", alpha=0.4) #log
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$theta$")
axes[1].plot(np.exp(sampler.chain[:, :, 1]).T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$l$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$P$")
axes[3].plot(sampler.chain[:, :, 3].T, color="k", alpha=0.4) #log
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$WN$")
axes[3].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_4days.png'.format(soap_file))
print 'Done.'
print
print '> Preparing final solution.'
print
# Compute the quantiles.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
#save data
pickle.dump(sampler.chain[:, :, 0],open("{0}_4days_A.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 1],open("{0}_4days_L.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 2],open("{0}_4days_P.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 3],open("{0}_4days_WN.p".format(soap_file), 'w'),protocol=-1)
samples[:, 0] = np.exp(samples[:, 0]) #amplitude
samples[:, 1] = np.exp(samples[:, 1]) #lenght scale
samples[:, 2] = np.exp(samples[:, 2]) #period
samples[:, 3] = np.exp(samples[:, 3]) #white noise
theta_mcmc,l_mcmc,p_mcmc,wn_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print 'theta = {0[0]} +{0[1]} -{0[2]}'.format(theta_mcmc)
print 'l = {0[0]} +{0[1]} -{0[2]}'.format(l_mcmc)
print 'period = {0[0]} +{0[1]} -{0[2]}'.format(p_mcmc)
print 'white noise = {0[0]} +{0[1]} -{0[2]}'.format(wn_mcmc)
print
fig= corner.corner(samples,labels=["$Theta$","$l$","$P$","$WN$"])
fig.savefig('triangle_{0}_4days.png'.format(soap_file))
pl.close('all')
tempo=(time() - start)
print 'Everything took', tempo,'s'
print
print 'Done.'
print
sys.stdout = sys.__stdout__
f.close()
if daydecay_4==1:
f=open("{0}_4days_decay.txt".format(soap_file),"w")
sys.stdout = f
start=time()
print '> Preparing data.'
print
print 'Loading {0}.rdb file.'.format(soap_file)
print
#data from .rdb file
rdb_data= Table.Table.read('{0}.rdb'.format(soap_file),format='ascii')
spot= rdb_data['RV_tot'][1:101]
spot= np.array(spot)
spot= spot.astype('Float64')
spotfinal= np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into a measurement per 4 days
spots_info= []
for i in np.arange(0,399,4):
spots_info.append(spotfinal[i]*1000)
yerr0= np.array(0.5*np.random.randn(len(spots_info)))
y= np.array(spots_info)
decay=np.linspace(1,0.5,len(y))
y=[n*m for n,m in zip(y,decay)]
y0=np.array(y)
t0=np.array(range(1,101))
y=[]
yerr=[]
t=[]
new_rv=[]
for ii in np.arange(0,len(t0),4):
y.append(y0[ii])
yerr.append(yerr0[ii])
t.append(t0[ii])
new_rv.append(rvy[ii])
y=[n+m+o for n,m,o in zip(y,yerr,new_rv)]
y=np.array(y)
yerr=np.array(yerr)
t=np.array(t)
pl.plot(t,y,'*')
pl.savefig('{0}_RV_4days_decay.png'.format(soap_file))
pl.close('all')
print '> Preparing kernel.'
print
kernel=gedi.kernel.ExpSineSquared(amplitude_prior.rvs(),
lenghtscale_prior.rvs(),
period_prior.rvs()) +\
gedi.kernel.WhiteNoise(wn_prior.rvs())
print 'Kernel =', kernel
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
print
print 'Done.'
print
print '> Preparing mcmc.'
print
# Set up the sampler.
nwalkers, ndim = 10, len(kernel.pars)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
p0=[np.log(from_prior()) for i in range(nwalkers)]
assert not np.isinf(map(lnprob, p0)).any()
print "Running burn-in"
print
p0, _, _ = sampler.run_mcmc(p0, burns)
#sampler.reset()
print "Running production chain"
print
sampler.run_mcmc(p0, runs)
print 'Done.'
print
print '> Preparing graphics.'
print
fig, axes = pl.subplots(4, 1, sharex=True, figsize=(8, 9))
axes[0].plot(sampler.chain[:, :, 0].T, color="k", alpha=0.4) #log
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$theta$")
axes[1].plot(np.exp(sampler.chain[:, :, 1]).T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$l$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$P$")
axes[3].plot(sampler.chain[:, :, 3].T, color="k", alpha=0.4) #log
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$WN$")
axes[3].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_4days_decay.png'.format(soap_file))
print 'Done.'
print
print '> Preparing final solution.'
print
# Compute the quantiles.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
#save data
pickle.dump(sampler.chain[:, :, 0],open("{0}_4days_decay_A.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 1],open("{0}_4days_decay_L.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 2],open("{0}_4days_decay_P.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 3],open("{0}_4days_decay_WN.p".format(soap_file), 'w'),protocol=-1)
samples[:, 0] = np.exp(samples[:, 0]) #amplitude
samples[:, 1] = np.exp(samples[:, 1]) #lenght scale
samples[:, 2] = np.exp(samples[:, 2]) #period
samples[:, 3] = np.exp(samples[:, 3]) #white noise
theta_mcmc,l_mcmc,p_mcmc,wn_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print 'theta = {0[0]} +{0[1]} -{0[2]}'.format(theta_mcmc)
print 'l = {0[0]} +{0[1]} -{0[2]}'.format(l_mcmc)
print 'period = {0[0]} +{0[1]} -{0[2]}'.format(p_mcmc)
print 'white noise = {0[0]} +{0[1]} -{0[2]}'.format(wn_mcmc)
print
fig= corner.corner(samples,labels=["$Theta$","$l$","$P$","$WN$"])
fig.savefig('triangle_{0}_4days_decay.png'.format(soap_file))
pl.close('all')
tempo=(time() - start)
print 'Everything took', tempo,'s'
print
print 'Done.'
print
sys.stdout = sys.__stdout__
f.close()
if daydecaygap_4==1:
f=open("{0}_4days_decaygap.txt".format(soap_file),"w")
sys.stdout = f
start=time()
print '> Preparing data.'
print
print 'Loading {0}.rdb file.'.format(soap_file)
print
#data from .rdb file
rdb_data= Table.Table.read('{0}.rdb'.format(soap_file),format='ascii')
spot= rdb_data['RV_tot'][1:101]
spot= np.array(spot)
spot= spot.astype('Float64')
spotfinal= np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into a measurement per day
spots_info= []
for i in np.arange(0,399,4):
spots_info.append(spotfinal[i]*1000)
yerr= np.array(0.5*np.random.randn(len(spots_info)))
y0= np.array(spots_info)
decay=np.linspace(1,0.5,len(y0))
y0=[n*m for n,m in zip(y0,decay)]
#new t and y
t1= range(1,30)
t2= range(60,101)
t=np.array(t1+t2)
y=[]
yerr1=[]
old_rv=[]
for i,e in enumerate(t):
y.append(y0[e-1])
yerr1.append(yerr[e-1])
old_rv.append(rvy[e-1])
yerr0=np.array(yerr1)
y0=np.array(y)
t0=t
y=[]
yerr=[]
t=[]
new_rv=[]
for ii in np.arange(0,len(t0),4):
y.append(y0[ii])
yerr.append(yerr0[ii])
t.append(t0[ii])
new_rv.append(old_rv[ii])
y=[n+m+o for n,m,o in zip(y,yerr,new_rv)]
y=np.array(y)
yerr=np.array(yerr)
t=np.array(t)
pl.plot(t,y,'*')
pl.savefig('{0}_RV_4days_dgap.png'.format(soap_file))
pl.close('all')
print '> Preparing kernel.'
print
kernel=gedi.kernel.ExpSineSquared(amplitude_prior.rvs(),
lenghtscale_prior.rvs(),
period_prior.rvs()) +\
gedi.kernel.WhiteNoise(wn_prior.rvs())
print 'Kernel =', kernel
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
print
print 'Done.'
print
print '> Preparing mcmc.'
print
# Set up the sampler.
nwalkers, ndim = 10, len(kernel.pars)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
p0=[np.log(from_prior()) for i in range(nwalkers)]
assert not np.isinf(map(lnprob, p0)).any()
print "Running burn-in"
print
p0, _, _ = sampler.run_mcmc(p0, burns)
#sampler.reset()
print "Running production chain"
print
sampler.run_mcmc(p0, runs)
print 'Done.'
print
print '> Preparing graphics.'
print
fig, axes = pl.subplots(4, 1, sharex=True, figsize=(8, 9))
axes[0].plot(sampler.chain[:, :, 0].T, color="k", alpha=0.4) #log
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$theta$")
axes[1].plot(np.exp(sampler.chain[:, :, 1]).T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$l$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$P$")
axes[3].plot(sampler.chain[:, :, 3].T, color="k", alpha=0.4) #log
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$WN$")
axes[3].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_4days_decaygap.png'.format(soap_file))
print 'Done.'
print
print '> Preparing final solution.'
print
# Compute the quantiles.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
#save data
pickle.dump(sampler.chain[:, :, 0],open("{0}_4days_dgap_A.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 1],open("{0}_4days_dgap_L.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 2],open("{0}_4days_dgap_P.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 3],open("{0}_4days_dgap_WN.p".format(soap_file), 'w'),protocol=-1)
samples[:, 0] = np.exp(samples[:, 0]) #amplitude
samples[:, 1] = np.exp(samples[:, 1]) #lenght scale
samples[:, 2] = np.exp(samples[:, 2]) #period
samples[:, 3] = np.exp(samples[:, 3]) #white noise
theta_mcmc,l_mcmc,p_mcmc,wn_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print 'theta = {0[0]} +{0[1]} -{0[2]}'.format(theta_mcmc)
print 'l = {0[0]} +{0[1]} -{0[2]}'.format(l_mcmc)
print 'period = {0[0]} +{0[1]} -{0[2]}'.format(p_mcmc)
print 'white noise = {0[0]} +{0[1]} -{0[2]}'.format(wn_mcmc)
print
fig= corner.corner(samples,labels=["$Theta$","$l$","$P$","$WN$"])
fig.savefig('triangle_{0}_4days_decaygap.png'.format(soap_file))
pl.close('all')
tempo=(time() - start)
print 'Everything took', tempo,'s'
print
print 'Done.'
print
sys.stdout = sys.__stdout__
f.close() | mit |
mne-tools/mne-tools.github.io | 0.14/_downloads/plot_mne_inverse_label_connectivity.py | 12 | 5903 | """
=========================================================================
Compute source space connectivity and visualize it using a circular graph
=========================================================================
This example computes the all-to-all connectivity between 68 regions in
source space based on dSPM inverse solutions and a FreeSurfer cortical
parcellation. The connectivity is visualized using a circular graph which
is ordered based on the locations of the regions.
"""
# Authors: Martin Luessi <[email protected]>
# Alexandre Gramfort <[email protected]>
# Nicolas P. Rougier (graph code borrowed from his matplotlib gallery)
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
from mne.connectivity import spectral_connectivity
from mne.viz import circular_layout, plot_connectivity_circle
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
# Load data
inverse_operator = read_inverse_operator(fname_inv)
raw = mne.io.read_raw_fif(fname_raw)
events = mne.read_events(fname_event)
# Add a bad channel
raw.info['bads'] += ['MEG 2443']
# Pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
exclude='bads')
# Define epochs for left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
eog=150e-6))
# Compute inverse solution and for each epoch. By using "return_generator=True"
# stcs will be a generator object instead of a list.
snr = 1.0 # use lower SNR for single epochs
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method,
pick_ori="normal", return_generator=True)
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels = mne.read_labels_from_annot('sample', parc='aparc',
subjects_dir=subjects_dir)
label_colors = [label.color for label in labels]
# Average the source estimates within each label using sign-flips to reduce
# signal cancellations, also here we return a generator
src = inverse_operator['src']
label_ts = mne.extract_label_time_course(stcs, labels, src, mode='mean_flip',
return_generator=True)
# Now we are ready to compute the connectivity in the alpha band. Notice
# from the status messages, how mne-python: 1) reads an epoch from the raw
# file, 2) applies SSP and baseline correction, 3) computes the inverse to
# obtain a source estimate, 4) averages the source estimate to obtain a
# time series for each label, 5) includes the label time series in the
# connectivity computation, and then moves to the next epoch. This
# behaviour is because we are using generators and allows us to
# compute connectivity in computationally efficient manner where the amount
# of memory (RAM) needed is independent from the number of epochs.
fmin = 8.
fmax = 13.
sfreq = raw.info['sfreq'] # the sampling frequency
con_methods = ['pli', 'wpli2_debiased']
con, freqs, times, n_epochs, n_tapers = spectral_connectivity(
label_ts, method=con_methods, mode='multitaper', sfreq=sfreq, fmin=fmin,
fmax=fmax, faverage=True, mt_adaptive=True, n_jobs=1)
# con is a 3D array, get the connectivity for the first (and only) freq. band
# for each method
con_res = dict()
for method, c in zip(con_methods, con):
con_res[method] = c[:, :, 0]
# Now, we visualize the connectivity using a circular graph layout
# First, we reorder the labels based on their location in the left hemi
label_names = [label.name for label in labels]
lh_labels = [name for name in label_names if name.endswith('lh')]
# Get the y-location of the label
label_ypos = list()
for name in lh_labels:
idx = label_names.index(name)
ypos = np.mean(labels[idx].pos[:, 1])
label_ypos.append(ypos)
# Reorder the labels based on their location
lh_labels = [label for (yp, label) in sorted(zip(label_ypos, lh_labels))]
# For the right hemi
rh_labels = [label[:-2] + 'rh' for label in lh_labels]
# Save the plot order and create a circular layout
node_order = list()
node_order.extend(lh_labels[::-1]) # reverse the order
node_order.extend(rh_labels)
node_angles = circular_layout(label_names, node_order, start_pos=90,
group_boundaries=[0, len(label_names) / 2])
# Plot the graph using node colors from the FreeSurfer parcellation. We only
# show the 300 strongest connections.
plot_connectivity_circle(con_res['pli'], label_names, n_lines=300,
node_angles=node_angles, node_colors=label_colors,
title='All-to-All Connectivity left-Auditory '
'Condition (PLI)')
plt.savefig('circle.png', facecolor='black')
# Plot connectivity for both methods in the same plot
fig = plt.figure(num=None, figsize=(8, 4), facecolor='black')
no_names = [''] * len(label_names)
for ii, method in enumerate(con_methods):
plot_connectivity_circle(con_res[method], no_names, n_lines=300,
node_angles=node_angles, node_colors=label_colors,
title=method, padding=0, fontsize_colorbar=6,
fig=fig, subplot=(1, 2, ii + 1))
plt.show()
| bsd-3-clause |
MTgeophysics/mtpy | mtpy/gui/SmartMT/Components/PlotParameter/frequency_selection.py | 1 | 21542 | # -*- coding: utf-8 -*-
"""
Description:
Usage:
Author: YingzhiGou
Date: 24/10/2017
"""
import numpy as np
from qtpy import QtCore
from qtpy.QtCore import Signal
from qtpy.QtGui import QStandardItemModel, QStandardItem
from qtpy.QtWidgets import QGroupBox, QStyledItemDelegate, QTableWidgetItem
from mtpy.gui.SmartMT.gui.matplotlib_imabedding import MPLCanvas, Cursor
from mtpy.gui.SmartMT.ui_asset.groupbox_frequency_select import Ui_GroupBox_frequency_select
from mtpy.gui.SmartMT.ui_asset.groupbox_select_periods_from_files import Ui_GroupBox_select_from_files
from mtpy.utils.matplotlib_utils import gen_hist_bins
class FrequencySelection(QGroupBox):
"""
frequency selection
"""
def __init__(self, parent, show_period=True, show_frequency=True, allow_range_select=True,
select_multiple=True):
QGroupBox.__init__(self, parent)
self._mt_objs = None
self._unique_periods = None
self._unique_frequencies = None
self._periods = None
self._frequencies = None
self._allow_range = allow_range_select
self._select_multiple = select_multiple
self.ui = Ui_GroupBox_frequency_select()
self.ui.setupUi(self)
self.ui.label_place_holder.hide()
self.model_selected = QStandardItemModel()
self.ui.listView_selected.setModel(self.model_selected)
self.frequency_delegate = FrequencySelection.FrequencyDelegate(self.ui.listView_selected)
self.ui.listView_selected.setItemDelegate(self.frequency_delegate)
self.histogram = FrequencySelection.Histogram(self, allow_range_select=self._allow_range)
self.histogram.set_unit(self._units[0])
self.histogram.set_tol(self.ui.doubleSpinBox_tolerance.value())
self.histogram.frequency_selected.connect(self._frequency_selected)
self.histogram.frequency_range_selected.connect(self._frequency_selected)
self.ui.widget_histgram.layout().addWidget(self.histogram)
self.ui.radioButton_period.setChecked(show_period)
self.ui.radioButton_frequency.setChecked(show_frequency)
self.ui.doubleSpinBox_tolerance.setHidden(not self._allow_range)
self.ui.checkBox_existing_only.setChecked(not self._allow_range)
self.ui.checkBox_existing_only.setHidden(not self._allow_range)
self.ui.label_tolerance.setHidden(not self._allow_range)
self.ui.radioButton_period.setHidden(not (show_period and show_frequency))
self.ui.radioButton_frequency.setHidden(not (show_period and show_frequency))
if self.ui.radioButton_frequency.isHidden():
self.setTitle(self._type[1])
elif self.ui.radioButton_period.isHidden():
self.setTitle(self._type[0])
self.ui.radioButton_frequency.toggled.connect(self._frequency_toggled)
self.ui.checkBox_existing_only.toggled.connect(self.histogram.select_existing)
self.ui.checkBox_existing_only.toggled.connect(self.model_selected.clear)
self.ui.checkBox_show_existing.toggled.connect(self.histogram.show_existing)
self.ui.checkBox_x_log_scale.toggled.connect(self.histogram.set_x_log_scale)
self.ui.checkBox_y_log_scale.toggled.connect(self.histogram.set_y_log_scale)
self.ui.pushButton_clear.clicked.connect(self._clear_all)
self.ui.pushButton_delete.clicked.connect(self._delete_selected)
self.ui.doubleSpinBox_tolerance.valueChanged.connect(self.histogram.set_tol)
def set_data(self, mt_objs):
self._mt_objs = mt_objs
self._unique_frequencies = None
self._unique_periods = None
self._update_frequency()
def get_frequencies(self):
frequencies = [self.model_selected.item(index).data(QtCore.Qt.DisplayRole)
for index in range(self.model_selected.rowCount())]
if self._allow_range:
frequencies = [(freq[0], freq[1]) if isinstance(freq, tuple) else freq
for freq in frequencies]
else:
frequencies = [freq[3] if isinstance(freq, tuple) else freq
for freq in frequencies
if (isinstance(freq, tuple) and len(freq) == 5)
or isinstance(freq, float)]
# print frequencies
if self._select_multiple:
return frequencies
else:
return frequencies[0] if frequencies else self._unique_frequencies[0] # if nothing selected, return minimal frequency
_units = ['Hz', 's']
_type = ['Frequency', 'Period']
def _clear_all(self):
self.model_selected.clear()
self.histogram.clear_all_drawing()
def _delete_selected(self):
for item in [self.model_selected.item(index.row())
for index in self.ui.listView_selected.selectedIndexes()]:
x = item.data(QtCore.Qt.DisplayRole)
self.model_selected.removeRow(self.model_selected.indexFromItem(item).row())
self.histogram.remove_marker(x)
def _frequency_selected(self, x):
if not self._select_multiple:
self.histogram.clear_all_drawing()
self.model_selected.clear()
for item in [self.model_selected.item(index) for index in range(self.model_selected.rowCount())]:
value = item.data(QtCore.Qt.DisplayRole)
if value == x:
return
elif isinstance(value, tuple) and isinstance(x, float) and value[0] <= x <= value[1]:
return # x already in interval
elif isinstance(x, tuple) and isinstance(value, float) and x[0] <= value <= x[1]:
# existing value in new interval
self.model_selected.removeRow(self.model_selected.indexFromItem(item).row())
self.histogram.remove_marker(value)
elif isinstance(x, tuple) and isinstance(value, tuple):
if min(x[1], value[1]) - max(x[0], value[0]) >= 0:
# there is intersection between intervals, so marge them
mi = min(x[0], value[0])
ma = max(x[1], value[1])
uniques = self._unique_frequencies \
if self.ui.radioButton_frequency.isChecked() \
else self._unique_periods
num = len(
[freq for freq in uniques if mi <= freq <= ma]) # num of existing freqs in the new interval
x = (mi, ma, num)
# remove old interval
self.model_selected.removeRow(self.model_selected.indexFromItem(item).row())
self.histogram.remove_marker(value)
else:
prec = self.frequency_delegate.prec
while np.all(np.isclose(value, x, pow(.1, prec))):
prec += 1
self.frequency_delegate.prec = prec
new_item = FrequencySelection.FrequencyItem()
new_item.setData(x, QtCore.Qt.DisplayRole)
# update graphic
if isinstance(x, float):
self.histogram.add_marker(x)
# new_item.setData(x, QtCore.Qt.UserRole)
elif isinstance(x, tuple):
self.histogram.add_marker(x)
# new_item.setData(x[0], QtCore.Qt.UserRole)
# update model
self.model_selected.appendRow(new_item)
self.model_selected.sort(0)
def show_period(self):
self.ui.radioButton_period.setChecked(True)
def show_frequency(self):
self.ui.radioButton_frequency.setChecked(True)
def _frequency_toggled(self, is_checked):
self.histogram.set_unit(self._units[0] if is_checked else self._units[1])
self._update_frequency()
def _update_frequency(self):
self.model_selected.clear()
if self._mt_objs is not None:
if self._unique_frequencies is None:
self._frequencies = [freq for mt_obj in self._mt_objs for freq in list(mt_obj.Z.freq)]
all_unique = set(self._frequencies)
self._unique_frequencies = sorted(list(all_unique))
if self.ui.radioButton_period.isChecked() and self._unique_periods is None:
self._periods = 1. / np.array(self._frequencies)
all_unique = set(self._periods)
self._unique_periods = sorted(list(all_unique))
self.histogram.set_data(
self._periods if self.ui.radioButton_period.isChecked()
else self._frequencies,
self._unique_periods if self.ui.radioButton_period.isChecked()
else self._unique_frequencies
)
self.frequency_delegate.freqs = self._unique_periods \
if self.ui.radioButton_period.isChecked() \
else self._unique_frequencies
self.histogram.update_figure()
class FrequencyItem(QStandardItem):
def __lt__(self, other):
value = self.data(QtCore.Qt.DisplayRole)
other_value = other.data(QtCore.Qt.DisplayRole)
if isinstance(value, tuple):
value = value[0]
if isinstance(other_value, tuple):
other_value = other_value[0]
return value < other_value
class FrequencyDelegate(QStyledItemDelegate):
_prec = 5 # decimal places
def get_prec(self):
return self._prec
def set_prec(self, prec):
self._prec = prec
prec = property(get_prec, set_prec)
def displayText(self, value, locale):
if isinstance(value, float):
return '{:.{prec}f}'.format(value, prec=self._prec)
elif isinstance(value, tuple) and len(value) == 3: # (min, max, num)
return '{}{}, {}{} ({num} selected)'.format(
'(' if value[0] == -np.inf else '[',
'{:.{prec}f}'.format(value[0], prec=self._prec),
'{:.{prec}f}'.format(value[1], prec=self._prec),
')' if value[1] == np.inf else ']',
num=value[2]
)
elif len(value) == 5: # (min, max, num, freq, tol)
return '{:.{prec}f} ±{tol}% ({num} selected)'.format(
value[3], prec=self._prec, tol=value[4], num=value[2])
# elif isinstance(py_obj, set):
# return '{{}}'.format(','.join(['{:.{prec}f}'.format(f, prec=self._prec) for f in py_obj if isinstance(f, float)]))
return value
class Histogram(MPLCanvas):
def __init__(self, parent, y_log_scale=False, x_log_scale=False, allow_range_select=True):
self._frequencies = None
self._unique_frequencies = None
self._title = None
self._unit = None
self._press = None
self._tol = None
MPLCanvas.__init__(self, parent, 5, 1.5)
self._lx = {}
self._cursor = None
self._select_existing_only = False
self._show_existing = False
self._x_log_scale = x_log_scale
self._y_log_scale = y_log_scale
self._select_range = allow_range_select
if self._select_range:
self.mpl_connect('button_press_event', self.on_press)
self.mpl_connect('button_release_event', self.on_release)
def add_marker(self, x):
if isinstance(x, float):
lx = self._lx.setdefault(x, self._draw_v_line(x))
# self._axes.draw_artist(lx)
self.draw_idle()
elif isinstance(x, tuple):
if len(x) == 3:
lx = self._lx.setdefault(x, self._fill_v_area(x[0], x[1]))
elif len(x) == 5:
lx = self._lx.setdefault(x, (
self._draw_v_line(x[3]),
self._fill_v_area(x[0], x[1])
))
else:
raise NotImplemented
self.draw_idle()
def remove_marker(self, x):
if x in self._lx:
marker = self._lx[x]
if isinstance(marker, tuple):
for m in marker:
m.remove()
else:
marker.remove()
self.draw_idle()
del self._lx[x]
def clear_all_drawing(self):
for key in list(self._lx.keys()):
marker = self._lx[key]
if isinstance(marker, tuple):
for m in marker:
m.remove()
else:
marker.remove()
self._lx.clear()
self.draw_idle()
def set_unit(self, unit):
if unit != self._unit:
self._unit = unit
self._cursor = Cursor(self._axes,
track_y=False,
show_drag=self._select_range,
text_format="%f" + self._unit,
useblit=True)
def select_existing(self, select_existing):
self._select_existing_only = select_existing
self.clear_all_drawing()
def set_tol(self, tol):
self._tol = tol
def show_existing(self, show_existing):
self._show_existing = show_existing
self.update_figure()
def set_data(self, frequencies, unique_frequencies=None):
self._frequencies = frequencies
if unique_frequencies is not None:
self._unique_frequencies = unique_frequencies
else:
self._unique_frequencies = sorted(list(set(frequencies)))
self._lx.clear()
def set_y_log_scale(self, ischecked):
self._y_log_scale = ischecked
self.update_figure()
def set_x_log_scale(self, isChecked):
self._x_log_scale = isChecked
self.update_figure()
frequency_selected = Signal(float)
frequency_range_selected = Signal(tuple)
def _get_valid_cursor_loc(self, event):
if not event.inaxes:
pos = self._axes.get_position()
if self.height() * pos.y0 < event.y < self.height() * pos.y1:
x = -np.inf if event.x < self.width() * pos.x0 else np.inf
else:
x = None
else:
x = event.xdata
return x
def on_press(self, event):
self._press = self._get_valid_cursor_loc(event)
def on_release(self, event):
x = self._get_valid_cursor_loc(event)
if x:
if self._press and self._press != x: # emit (min, max, num)
if self._press < x:
self.frequency_range_selected.emit(
(
self._press,
x,
len([freq for freq in self._unique_frequencies
if self._press <= freq <= x])
)
)
elif self._press > x:
self.frequency_range_selected.emit(
(
x,
self._press,
len([freq for freq in self._unique_frequencies
if x <= freq <= self._press])
)
)
elif not self._select_range or self._select_existing_only:
x = self._find_closest(x)
self.frequency_selected.emit(x)
else: # emit (min, max, num, freq, tol)
tol = x * self._tol / 100.
min = x - tol
max = x + tol
self.frequency_range_selected.emit(
(
min,
max,
len([freq for freq in self._unique_frequencies
if min <= freq <= max]),
x,
self._tol
)
)
self._press = None
def _find_closest(self, x):
return min(self._frequencies, key=lambda freq: abs(freq - x))
def compute_initial_figure(self):
self._axes.tick_params(axis='both', which='major', labelsize=6)
self._axes.tick_params(axis='both', which='minor', labelsize=4)
if self._frequencies is not None:
bins = gen_hist_bins(self._unique_frequencies)
self._axes.hist(self._frequencies, bins=bins) # , 50, normed=1)
if self._y_log_scale:
self._axes.set_yscale('log', nonposy='clip')
if self._x_log_scale:
self._axes.set_xscale('log', nonposx='clip')
if self._show_existing:
for freq in self._unique_frequencies:
self._axes.axvline(freq, linewidth=1, color='black', alpha=0.2)
if self._title and self._unit:
self._axes.set_xlabel("%s (%s)" % (self._title, self._unit), fontsize=8)
self.figure.suptitle('%s Distribution in Selected Stations' %
self._title, fontsize=8)
self._fig.set_tight_layout(True)
def update_figure(self):
self._axes.cla()
self.compute_initial_figure()
for key in list(self._lx.keys()):
if isinstance(key, float):
self._lx[key] = self._draw_v_line(key)
elif isinstance(key, tuple):
if len(key) == 3:
self._lx[key] = self._fill_v_area(key[0], key[1])
elif len(key) == 5:
self._lx[key] = (self._draw_v_line(key[3]), self._fill_v_area(key[0], key[1]))
self.draw()
def _draw_v_line(self, x):
if x == -np.inf:
x = self._axes.get_xlim()[0]
if x == np.inf:
x = self._axes.get_xlim()[1]
return self._axes.axvline(x=x, linewidth=1, color="red")
def _fill_v_area(self, x1, x2):
if x1 == -np.inf:
x1 = self._axes.get_xlim()[0]
if x2 == np.inf:
x2 = self._axes.get_xlim()[1]
return self._axes.axvspan(x1, x2, alpha=0.5, color='red')
class FrequencySelectionFromFile(QGroupBox):
"""
select frequencies/periods from the selected edi files
"""
def __init__(self, parent):
QGroupBox.__init__(self, parent)
self._mt_obj_dict = {}
self.model_stations = QStandardItemModel()
# setup ui
self.ui = Ui_GroupBox_select_from_files()
self.ui.setupUi(self)
self.ui.listView_stations.setModel(self.model_stations)
# connect signals
self.ui.listView_stations.selectionModel().selectionChanged.connect(self._update_selection)
data_changed = Signal()
def set_data(self, mt_objs):
self._mt_obj_dict.clear()
for mt_obj in mt_objs:
self._mt_obj_dict[mt_obj.station] = mt_obj
self._update_stations()
self.data_changed.emit()
def _update_stations(self):
self.model_stations.clear()
for mt_obj in list(self._mt_obj_dict.values()):
new_item = QStandardItem()
new_item.setData(mt_obj.station, QtCore.Qt.DisplayRole)
new_item.setData(mt_obj.fn, QtCore.Qt.ToolTipRole)
self.model_stations.appendRow(new_item)
self.model_stations.sort(0)
def _update_selection(self):
self.ui.tableWidget_selected.clearContents()
unique_frequencies = set()
# combine frequencies from all selected stations
for index in self.ui.listView_stations.selectedIndexes():
item = self.model_stations.item(index.row())
station = item.data(QtCore.Qt.DisplayRole)
mt_obj = self._mt_obj_dict[station]
# get frequencies
freq = [freq for freq in list(mt_obj.Z.freq)]
unique_frequencies.update(freq)
# order !
unique_frequencies = sorted(list(unique_frequencies))
unique_periods = list(1. / np.array(unique_frequencies))
# update widget
self.ui.tableWidget_selected.setRowCount(len(unique_frequencies))
for index, freq_period in enumerate(zip(unique_frequencies, unique_periods)):
for i in [0, 1]:
newItem = QTableWidgetItem(str(freq_period[i]))
newItem.setData(QtCore.Qt.UserRole, freq_period[i])
newItem.setFlags(QtCore.Qt.ItemIsEnabled)
self.ui.tableWidget_selected.setItem(index, i, newItem)
def get_selected_frequencies(self):
return self.get_data(0)
def get_selected_periods(self):
return self.get_data(1)
def get_data(self, column_index):
data = [
self.ui.tableWidget_selected.item(index, column_index).data(QtCore.Qt.UserRole)
for index in range(self.ui.tableWidget_selected.rowCount())
]
return data
| gpl-3.0 |
RudrakshTuwani/Twitter-Scripts | Topic-Modelling/TweetTextPreProcessing.py | 1 | 3671 | import pandas as pd
import re
def main():
# Reads files from Analytics Dashboard into df.
loc = "E:/Data Science/Safecity/Twitter/Tweets Dataset/"
TweetActv1 = pd.read_csv(loc + 'April15.csv')
TweetActv2 = pd.read_csv(loc + 'May15.csv')
TweetActv3 = pd.read_csv(loc + 'June15.csv')
TweetActv4 = pd.read_csv(loc + 'July15.csv')
TweetActv5 = pd.read_csv(loc + 'Aug15.csv')
TweetActv6 = pd.read_csv(loc + 'Sep15.csv')
TweetActv7 = pd.read_csv(loc + 'Oct15.csv')
TweetActv8 = pd.read_csv(loc + 'Nov15.csv')
TweetActv9 = pd.read_csv(loc + 'Dec15.csv')
TweetActv10 = pd.read_csv(loc + 'Jan.csv')
TweetActv11 = pd.read_csv(loc + 'Feb.csv')
TweetActv12 = pd.read_csv(loc + 'March.csv')
TweetActv13 = pd.read_csv(loc + 'April.csv')
TweetActv14 = pd.read_csv(loc + 'May.csv')
# Concat Data
TweetActv = pd.concat([TweetActv1,
TweetActv2,
TweetActv3,
TweetActv4,
TweetActv5,
TweetActv6,
TweetActv7,
TweetActv8,
TweetActv9,
TweetActv10,
TweetActv11,
TweetActv12,
TweetActv13,
TweetActv14])
#Cleaning Data by getting specific columns
col_list = ['Tweet id', 'Tweet text','impressions','engagements','engagement rate']
TweetActv = TweetActv[col_list]
TweetActv = PreProcess(TweetActv)
TweetActv.to_csv("E:/Data Science/Safecity/Twitter/Tweets PreProcessed/TweetsforWC.csv",
sep='\t', encoding='utf-8')
def PreProcess(TweetActv):
# Identify which tweets are replies.
def is_reply(s):
if s[0] == '@':
return 1
else:
return 0
TweetActv['IsReply'] = TweetActv['Tweet text'].apply(is_reply)
# Extract or Remove Tagged People
def remove_tagged(s):
tagged = list(part for part in s.split() if (part.startswith('@')))
words = list(s.split())
final_words = [word for word in words if (word not in tagged)]
if len(final_words) == 0:
return ''
else:
a = ''
for word in final_words:
a = a + word + ' '
return a
def extract_tagged(s):
tagged = list(part for part in s.split() if (part.startswith('@')))
if len(tagged) == 0:
return ''
else:
a = ''
for tag in tagged:
a = a + tag + ' '
return a
# Extract links or remove links.
def extract_links(s):
links = re.findall(r'(https?://\S+)', s)
a = ''
for link in links:
a = a + link + ' '
return a
def remove_links(s):
links = re.findall(r'(https?://\S+)', s)
words = list(s.split())
final_words = [word for word in words if (word not in links)]
a = ''
for word in final_words:
a = a + word + ' '
return a
TweetActv['Links'] = TweetActv['Tweet text'].apply(extract_links)
TweetActv['ProcessedText'] = TweetActv['Tweet text'].apply(remove_links)
TweetActv['ProcessedText'] = TweetActv['ProcessedText'].apply(remove_tagged)
TweetActv['Tags'] = TweetActv['Tweet text'].apply(extract_tagged)
return TweetActv
if __name__ == "__main__":
main() | mit |
jjberry/Autotrace | matlab-version/image_diversity_nogui.py | 3 | 12973 | #!/usr/bin/env python
'''
image_diversity.py
Written by Jeff Berry on Dec 21 2010
purpose:
This script measures the distance from average for each image in the
input set, and copies the specified number of highest scoring images
to a new folder called 'diverse'. If ROI_config.txt is present in the
same folder as the input images, the ROI in that file will be used to
do the measurement. If not present, it will use a hard-coded default ROI.
usage:
python image_diversity.py <num_images> <num_testset> <num_batches>
parameters:
<num_images>: The number of images to use in the diverse set. This number
represents the most diverse images. The script will automatically
add the 50 least diverse images to the set.
<num_testset>: The number of images to save out of the diverse set as a
test set. These images will be stored in 'diverse-test'.
<num_batches>: The number of groups to organize the remaining images into
example:
python image_diversity.py 300 100 5
#This command will result in 250 images in 'diverse' and 100 test images
#in 'diverse-test'. The remaining images will be split into 5 groups in
#'batch1', 'batch2', etc.
---------------------------------------------
Modified by Jeff Berry on Feb 18 2011
reason:
Updated the script to use ROI_config.txt. This made the initial ROI selection
window unnecessary. ROI is now selected using SelectROI.py
---------------------------------------------
Modified by Jeff Berry on Feb 25 2011
reason:
added support for unique tracer codes on .traced.txt files
---------------------------------------------
Modified by Jeff Berry on Jan 26 2012
reason:
added support for splitting diverse images into train and test sets. The script
is no longer interactive due to problems with the raw_input() function interacting
with GTK. Instead, the numbers of train and test images are passed to the script
as arguments (see usage above).
'''
import cv
import os, sys
import operator
import subprocess
from numpy import *
import matplotlib.pyplot as plot
import multiprocessing
CopyQueue = multiprocessing.Queue()
FinishQueue = multiprocessing.Queue()
class CopyThread(multiprocessing.Process):
def run(self):
flag = 'ok'
while (flag != 'stop'):
cmd = CopyQueue.get()
if cmd == None:
flag = 'stop'
else:
#print ' '.join(cmd)
p = subprocess.Popen(cmd)
p.wait()
FinishQueue.put(cmd)
#print "CopyThread stopped"
class ImageWindow:
def __init__(self, data_dir, n, n_test, n_batches, add_lower50='y', make_testset='y'):
#self.onOpen()
files = os.listdir(data_dir)
datafiles = []
for i in files:
if i[-3:] == 'jpg':
datafiles.append(os.path.join(data_dir,i))
self.datafiles = datafiles
self.makeDest()
self.get_tracenames()
# get an image and open it to see the size
img = cv.LoadImageM(self.datafiles[0], iscolor=False)
self.csize = shape(img)
self.img = asarray(img)
#open up the ROI_config.txt and parse
self.pathtofiles = '/'.join(self.datafiles[0].split('/')[:-1]) + '/'
self.config = self.pathtofiles + 'ROI_config.txt'
if (os.path.isfile(self.config)):
print "Found ROI_config.txt"
c = open(self.config, 'r').readlines()
self.top = int(c[1][:-1].split('\t')[1])
self.bottom = int(c[2][:-1].split('\t')[1])
self.left = int(c[3][:-1].split('\t')[1])
self.right = int(c[4][:-1].split('\t')[1])
print "using ROI: [%d:%d, %d:%d]" % (self.top, self.bottom, self.left, self.right)
else:
print "ROI_config.txt not found"
self.top = 140 #default settings for the Sonosite Titan
self.bottom = 320
self.left = 250
self.right = 580
print "using ROI: [%d:%d, %d:%d]" % (self.top, self.bottom, self.left, self.right)
roi = img[self.top:self.bottom, self.left:self.right]
self.roisize = shape(roi)
self.get_diverse(n, n_test, n_batches, add_lower50, make_testset)
#def onOpen(self):
#fc = gtk.FileChooserDialog(title='Open Image Files', parent=None,
# action=gtk.FILE_CHOOSER_ACTION_OPEN,
# buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
# gtk.STOCK_OPEN, gtk.RESPONSE_OK))
#g_directory = fc.get_current_folder()
#fc.set_current_folder(g_directory)
#fc.set_default_response(gtk.RESPONSE_OK)
#fc.set_select_multiple(True)
#ffilter = gtk.FileFilter()
#ffilter.set_name('Image Files')
#ffilter.add_pattern('*.jpg')
#ffilter.add_pattern('*.png')
#fc.add_filter(ffilter)
#response = fc.run()
#if response == gtk.RESPONSE_OK:
# self.datafiles = fc.get_filenames()
# g_directory = fc.get_current_folder()
#fc.destroy()
def makeDest(self):
s = self.datafiles[0].split('/')
self.rootdir = '/'.join(s[:-1]) + '/'
self.destpath = '/'.join(s[:-1]) + '/diverse/'
print "images will be saved in", self.destpath
if not os.path.isdir(self.destpath):
os.mkdir(self.destpath)
print "created directory", self.destpath
def get_tracenames(self):
'''This method will look for existing trace files and create a dictionary to corresponding
image files. It will only work if all image files are in the same directory
'''
self.tracenames = {}
tracedir = '/'.join(self.datafiles[0].split('/')[:-1])+ '/'
files = os.listdir(tracedir)
traces = []
for i in files:
if ('traced.txt' in i):
traces.append(tracedir+i)
for i in self.datafiles:
for j in traces:
if i in j:
self.tracenames[i] = j
def get_average_image(self):
files = self.datafiles
ave_img = zeros(self.roisize)
for i in range(len(files)):
img = cv.LoadImageM(files[i], iscolor=False)
roi = img[self.top:self.bottom, self.left:self.right]
roi = asarray(roi)/255.
ave_img += roi
ave_img /= len(files)
return ave_img, files
def get_diverse(self, n, n_test, n_batches, add_lower50='y', make_testset='y'):
'''gets the n most diverse images from the data set and copies them into path_to_save'''
if os.path.isdir(self.destpath):
print "calculating average image"
ave_img, files = self.get_average_image()
print "measuring distances from average"
results = {}
for i in range(len(files)):
img = cv.LoadImageM(files[i], iscolor=False)
roi = img[self.top:self.bottom, self.left:self.right]
roi = asarray(roi)/255.
dif_img = abs(roi - ave_img)
results[files[i]] = sum(sum(dif_img))
sorted_results = sorted(results.iteritems(), key=operator.itemgetter(1), reverse=True)
#show rank vs. energy plot
count = 1
for (i,j) in sorted_results:
plot.plot(count, j, 'b.')
count += 1
plot.savefig(self.destpath+'rankVenergy.png')
#plot.show()
#cmd = ['open', self.destpath+'rankVenergy.png']
#p = subprocess.Popen(cmd)
#n = int(raw_input("Enter number of images to move: "))
#print n # for some reason, these raw_input calls don't work anymore
#add_lower50 = raw_input("Should I also add the 50 least different images? [Y/n]: ")
#make_testset = raw_input("Should I save out some images as a test set? [Y/n]: ")
if (make_testset == '') or (make_testset.lower() == 'y'):
TESTSET = True
#n_test = int(raw_input("Enter the number of test images to save out: "))
self.testdir = self.destpath[:-1]+'-test/'
if not os.path.isdir(self.testdir):
os.mkdir(self.testdir)
else:
TESTSET = False
#n_test = 0
numThreads = 4
for i in range(numThreads):
thread = CopyThread()
thread.start()
filenames = []
for (i,j) in sorted_results[:n]:
filenames.append(i)
if (add_lower50 == '') or (add_lower50.lower() == 'y'):
for (i,j) in sorted_results[-50:]:
filenames.append(i)
filenames = array(filenames)
if TESTSET:
inds = arange(len(filenames))
random.shuffle(inds)
testinds = inds[:n_test]
traininds = inds[n_test:]
trainfiles = filenames[traininds]
testfiles = filenames[testinds]
else:
trainfiles = filenames
count = 0
print "saving most diverse images to:", self.destpath
for i in trainfiles:
fname = i.split('/')[-1]
cmd = ['mv', i, self.destpath+fname]
#print count
count += 1
CopyQueue.put(cmd)
if self.tracenames.has_key(i):
cmd2 = ['mv', self.tracenames[i], self.destpath]
count += 1
CopyQueue.put(cmd2)
if TESTSET:
for i in testfiles:
fname = i.split('/')[-1]
cmd = ['mv', i, self.testdir+fname]
CopyQueue.put(cmd)
#print count
count += 1
if self.tracenames.has_key(i):
cmd2 = ['mv', self.tracenames[i], self.testdir]
count += 1
CopyQueue.put(cmd2)
remaining = []
for (i,j) in sorted_results[n:-50]:
remaining.append(i)
remaining = array(remaining)
inds = arange(len(remaining))
random.shuffle(inds)
breaks = linspace(0, len(remaining), n_batches+1).astype(integer)
for i in range(n_batches):
batch_inds = inds[breaks[i]:breaks[i+1]]
batch_files = remaining[batch_inds]
batch_dir = "batch%03d" % (i+1)
dest = os.path.join(self.rootdir, batch_dir)
if not os.path.isdir(dest):
os.mkdir(dest)
for j in batch_files:
fname = j.split('/')[-1]
cmd = ['mv', j, os.path.join(dest, fname)]
count += 1
CopyQueue.put(cmd)
if self.tracenames.has_key(j):
cmd2 = ['mv', self.tracenames[j], dest]
count += 1
CopyQueue.put(cmd2)
# stop the threads
for i in range(numThreads):
CopyQueue.put(None)
# write sorted_results to a .txt file for future reference
# added Mar 10 2011 by Jeff Berry
o = open(self.destpath+'SortedResults.txt', 'w')
for (i,j) in sorted_results:
o.write("%s\t%.4f\n" %(i, j))
o.close()
for i in range(count):
Fcmd = FinishQueue.get()
print ' '.join(Fcmd)
print "done"
roifile = '/'.join(self.datafiles[0].split('/')[:-1]) + '/ROI_config.txt'
if os.path.isfile(roifile):
p = subprocess.Popen(['cp', roifile, self.destpath])
p.wait()
#p = subprocess.Popen(['rm', self.destpath+'/rankVenergy.png'])
#p.wait()
#try:
# gtk.main_quit() #for some reason this is not exiting gracefully
#except RuntimeError:
# #print "press ctrl+c to quit"
# p1 = subprocess.Popen(['ps', '-ef'], stdout=subprocess.PIPE)
# p2 = subprocess.Popen(['grep', '-i', 'image_diversity'], stdin=p1.stdout, stdout=subprocess.PIPE)
# p3 = subprocess.Popen(['awk', "{print $2}"], stdin=p2.stdout, stdout=subprocess.PIPE)
# pid = p3.communicate()[0][:-1]
# print pid
# p = subprocess.Popen(['kill', pid])
if __name__ == "__main__":
ImageWindow(sys.argv[1], int(sys.argv[2]), int(sys.argv[3]), int(sys.argv[4]))
#gtk.main()
| mit |
hojonathanho/cgt | thirdparty/tabulate.py | 24 | 29021 | # -*- coding: utf-8 -*-
"""Pretty-print tabular data."""
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from platform import python_version_tuple
import re
if python_version_tuple()[0] < "3":
from itertools import izip_longest
from functools import partial
_none_type = type(None)
_int_type = int
_float_type = float
_text_type = unicode
_binary_type = str
else:
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_int_type = int
_float_type = float
_text_type = str
_binary_type = bytes
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.7.2"
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| ' }
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator*2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _latex_line_begin_tabular(colwidths, colaligns):
alignment = { "left": "l", "right": "r", "center": "c", "decimal": "r" }
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\\begin{tabular}{" + tabular_columns_fmt + "}\n\hline"
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=DataRow("", "&", "\\\\"),
datarow=DataRow("", "&", "\\\\"),
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
_invisible_codes = re.compile("\x1b\[\d*m") # ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except ValueError:
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is int or \
(isinstance(string, _binary_type) or isinstance(string, _text_type)) and \
_isconvertible(int, string)
def _type(string, has_invisible=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isint(string):
return int
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s, has_invisible=True):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = "{0:>%ds}" % iwidth
return fmt.format(s)
def _padright(width, s, has_invisible=True):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = "{0:<%ds}" % iwidth
return fmt.format(s)
def _padboth(width, s, has_invisible=True):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = "{0:^%ds}" % iwidth
return fmt.format(s)
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return len(_strip_invisible(s))
else:
return len(_text_type(s))
def _align_column(strings, alignment, minwidth=0, has_invisible=True):
"""[string] -> [padded_string]
>>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
>>> list(map(str,_align_column(['123.4', '56.7890'], None)))
['123.4', '56.7890']
"""
if alignment == "right":
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
return strings
else:
strings = [s.strip() for s in strings]
padfn = _padright
if has_invisible:
width_fn = _visible_width
else:
width_fn = len
maxwidth = max(max(map(width_fn, strings)), minwidth)
padded_strings = [padfn(maxwidth, s, has_invisible) for s in strings]
return padded_strings
def _more_generic(type1, type2):
types = { _none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4 }
invtypes = { 4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type }
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
return invtypes[moregeneric]
def _column_type(strings, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
types = [_type(s, has_invisible) for s in strings ]
return reduce(_more_generic, types, int)
def _format(val, valtype, floatfmt, missingval=""):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
return _text_type(val, "ascii")
elif valtype is float:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width):
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _normalize_tabular_data(tabular_data, headers):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = tabular_data.keys()
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v]+list(row) for v,row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type,keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")): # namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif headers == "keys" and len(rows) > 0: # keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(headers)
rows = list(map(list,rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""]*(ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=[], tablefmt="simple",
floatfmt="g", numalign="decimal", stralign="left",
missingval=""):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, a two-dimensional NumPy array,
NumPy record array, or a Pandas' dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point.
`None` values are replaced with a `missingval` string:
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
and 'latex'. Variable `tabulate_formats` contains the list of
currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"""
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
if has_invisible:
width_fn = _visible_width
else:
width_fn = len
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval) for v in c]
for c,ct in zip(cols, coltypes)]
# align columns
aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
minwidths = [width_fn(h)+2 for h in headers] if headers else [0]*len(cols)
cols = [_align_column(c, a, minw, has_invisible)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, cols)]
headers = [_align_header(h, a, minw)
for h, a, minw in zip(headers, aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns)
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill*w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _pad_row(cells, padding):
if cells:
pad = " "*padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _format_table(fmt, headers, rows, colwidths, colaligns):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2*pad) for w in colwidths]
padded_headers = _pad_row(headers, pad)
padded_rows = [_pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.lineabove))
if padded_headers:
lines.append(_build_row(padded_headers, padded_widths, colaligns, headerrow))
if fmt.linebelowheader and "linebelowheader" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelowheader))
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
lines.append(_build_line(padded_widths, colaligns, fmt.linebetweenrows))
# the last row without a line below
lines.append(_build_row(padded_rows[-1], padded_widths, colaligns, fmt.datarow))
else:
for row in padded_rows:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
if fmt.linebelow and "linebelow" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelow))
return "\n".join(lines)
| mit |
xuewei4d/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 46 | 4325 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of squared
distances to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.nipy_spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
Averroes/statsmodels | statsmodels/tsa/statespace/tests/test_sarimax.py | 9 | 63059 | """
Tests for SARIMAX models
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
import warnings
from statsmodels.tsa.statespace import sarimax, tools
from statsmodels.tsa import arima_model as arima
from .results import results_sarimax
from statsmodels.tools import add_constant
from numpy.testing import assert_equal, assert_almost_equal, assert_raises, assert_allclose
from nose.exc import SkipTest
current_path = os.path.dirname(os.path.abspath(__file__))
coverage_path = 'results' + os.sep + 'results_sarimax_coverage.csv'
coverage_results = pd.read_csv(current_path + os.sep + coverage_path)
class TestSARIMAXStatsmodels(object):
"""
Test ARIMA model using SARIMAX class against statsmodels ARIMA class
"""
def __init__(self):
self.true = results_sarimax.wpi1_stationary
endog = self.true['data']
self.model_a = arima.ARIMA(endog, order=(1, 1, 1))
self.result_a = self.model_a.fit(disp=-1)
self.model_b = sarimax.SARIMAX(endog, order=(1, 1, 1), trend='c',
simple_differencing=True,
hamilton_representation=True)
self.result_b = self.model_b.fit(disp=-1, cov_type='oim')
def test_loglike(self):
assert_allclose(self.result_b.llf, self.result_a.llf)
def test_aic(self):
assert_allclose(self.result_b.aic, self.result_a.aic)
def test_bic(self):
assert_allclose(self.result_b.bic, self.result_a.bic)
def test_hqic(self):
assert_allclose(self.result_b.hqic, self.result_a.hqic)
def test_mle(self):
# ARIMA estimates the mean of the process, whereas SARIMAX estimates
# the intercept. Convert the mean to intercept to compare
params_a = self.result_a.params
params_a[0] = (1 - params_a[1]) * params_a[0]
assert_allclose(self.result_b.params[:-1], params_a, atol=5e-5)
def test_bse(self):
# Make sure the default type is OIM for this example
assert(self.result_b.cov_type == 'oim')
# Test the OIM BSE values
assert_allclose(
self.result_b.bse[1:-1],
self.result_a.bse[1:],
atol=1e-2
)
def test_t_test(self):
import statsmodels.tools._testing as smt
#self.result_b.pvalues
#self.result_b._cache['pvalues'] += 1 # use to trigger failure
smt.check_ttest_tvalues(self.result_b)
smt.check_ftest_pvalues(self.result_b)
class SARIMAXStataTests(object):
def test_loglike(self):
assert_almost_equal(
self.result.llf,
self.true['loglike'], 4
)
def test_aic(self):
assert_almost_equal(
self.result.aic,
self.true['aic'], 3
)
def test_bic(self):
assert_almost_equal(
self.result.bic,
self.true['bic'], 3
)
def test_hqic(self):
hqic = (
-2*self.result.llf +
2*np.log(np.log(self.result.nobs)) *
self.result.params.shape[0]
)
assert_almost_equal(
self.result.hqic,
hqic, 3
)
class ARIMA(SARIMAXStataTests):
"""
ARIMA model
Stata arima documentation, Example 1
"""
def __init__(self, true, *args, **kwargs):
self.true = true
endog = true['data']
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
self.model = sarimax.SARIMAX(endog, order=(1, 1, 1), trend='c',
*args, **kwargs)
# Stata estimates the mean of the process, whereas SARIMAX estimates
# the intercept of the process. Get the intercept.
intercept = (1 - true['params_ar'][0]) * true['params_mean'][0]
params = np.r_[intercept, true['params_ar'], true['params_ma'],
true['params_variance']]
self.result = self.model.filter(params)
def test_mle(self):
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-3
)
class TestARIMAStationary(ARIMA):
def __init__(self):
super(TestARIMAStationary, self).__init__(
results_sarimax.wpi1_stationary
)
def test_bse(self):
# Default covariance type (OPG)
assert_allclose(
self.result.bse[1], self.true['se_ar_opg'],
atol=1e-3,
)
assert_allclose(
self.result.bse[2], self.true['se_ma_opg'],
atol=1e-3,
)
def test_bse_oim(self):
# OIM covariance type
oim_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(
oim_bse[1], self.true['se_ar_oim'],
atol=1e-3,
)
assert_allclose(
oim_bse[2], self.true['se_ma_oim'],
atol=1e-2,
)
def test_bse_cs(self):
# CS covariance type
cs_bse = self.result.cov_params_cs.diagonal()**0.5
assert_allclose(
cs_bse[1], self.true['se_ar_oim'],
atol=1e-2,
)
assert_allclose(
cs_bse[2], self.true['se_ma_oim'],
atol=1e-2,
)
def test_bse_robust(self):
robust_oim_bse = self.result.cov_params_robust_oim.diagonal()**0.5
robust_cs_bse = self.result.cov_params_robust_cs.diagonal()**0.5
true_robust_bse = np.r_[
self.true['se_ar_robust'], self.true['se_ma_robust']
]
assert_allclose(
robust_oim_bse[1:3], true_robust_bse,
atol=1e-2,
)
assert_allclose(
robust_cs_bse[1:3], true_robust_bse,
atol=1e-1,
)
class TestARIMADiffuse(ARIMA):
def __init__(self, **kwargs):
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = (
results_sarimax.wpi1_diffuse['initial_variance']
)
super(TestARIMADiffuse, self).__init__(results_sarimax.wpi1_diffuse,
**kwargs)
def test_bse(self):
# Make sure the default type is OPG
assert_equal(self.result.cov_type, 'opg')
# Test the OPG BSE values
assert_allclose(
self.result.bse[1], self.true['se_ar_opg'],
atol=1e-1,
)
assert_allclose(
self.result.bse[2], self.true['se_ma_opg'],
atol=1e-1, rtol=1e-1
)
def test_bse_oim(self):
# OIM covariance type
oim_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(
oim_bse[1], self.true['se_ar_oim'],
atol=1e-2,
)
assert_allclose(
oim_bse[2], self.true['se_ma_oim'],
atol=1e-2, rtol=1e-1
)
def test_bse_cs(self):
# CS covariance type
cs_bse = self.result.cov_params_cs.diagonal()**0.5
assert_allclose(
cs_bse[1], self.true['se_ar_oim'],
atol=1e-2,
)
assert_allclose(
cs_bse[2], self.true['se_ma_oim'],
atol=1e-2, rtol=1e-1
)
class AdditiveSeasonal(SARIMAXStataTests):
"""
ARIMA model with additive seasonal effects
Stata arima documentation, Example 2
"""
def __init__(self, true, *args, **kwargs):
self.true = true
endog = np.log(true['data'])
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
self.model = sarimax.SARIMAX(
endog, order=(1, 1, (1, 0, 0, 1)), trend='c', *args, **kwargs
)
# Stata estimates the mean of the process, whereas SARIMAX estimates
# the intercept of the process. Get the intercept.
intercept = (1 - true['params_ar'][0]) * true['params_mean'][0]
params = np.r_[intercept, true['params_ar'], true['params_ma'],
true['params_variance']]
self.result = self.model.filter(params)
def test_mle(self):
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-3
)
class TestAdditiveSeasonal(AdditiveSeasonal):
def __init__(self):
super(TestAdditiveSeasonal, self).__init__(
results_sarimax.wpi1_seasonal
)
def test_bse(self):
# Make sure the default type is OPG
assert_equal(self.result.cov_type, 'opg')
# Test the OPG BSE values
assert_allclose(
self.result.bse[1], self.true['se_ar_opg'],
atol=1e-3,
)
assert_allclose(
self.result.bse[2:4], self.true['se_ma_opg'],
atol=1e-3,
)
def test_bse_oim(self):
# OIM covariance type
oim_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(
oim_bse[1], self.true['se_ar_oim'],
atol=1e-2,
)
assert_allclose(
oim_bse[2:4], self.true['se_ma_oim'],
atol=1e-1
)
def test_bse_cs(self):
# CS covariance type
cs_bse = self.result.cov_params_cs.diagonal()**0.5
assert_allclose(
cs_bse[1], self.true['se_ar_oim'],
atol=1e-2,
)
assert_allclose(
cs_bse[2:4], self.true['se_ma_oim'],
atol=1e-1
)
class Airline(SARIMAXStataTests):
"""
Multiplicative SARIMA model: "Airline" model
Stata arima documentation, Example 3
"""
def __init__(self, true, *args, **kwargs):
self.true = true
endog = np.log(true['data'])
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
self.model = sarimax.SARIMAX(
endog, order=(0, 1, 1), seasonal_order=(0, 1, 1, 12),
trend='n', *args, **kwargs
)
params = np.r_[true['params_ma'], true['params_seasonal_ma'],
true['params_variance']]
self.result = self.model.filter(params)
def test_mle(self):
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-4
)
class TestAirlineHamilton(Airline):
def __init__(self):
super(TestAirlineHamilton, self).__init__(
results_sarimax.air2_stationary
)
def test_bse(self):
# Make sure the default type is OPG
assert_equal(self.result.cov_type, 'opg')
# Test the OPG BSE values
assert_allclose(
self.result.bse[0], self.true['se_ma_opg'],
atol=1e-4,
)
assert_allclose(
self.result.bse[1], self.true['se_seasonal_ma_opg'],
atol=1e-3,
)
def test_bse_oim(self):
# OIM covariance type
oim_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(
oim_bse[0], self.true['se_ma_oim'],
atol=1e-2,
)
assert_allclose(
oim_bse[1], self.true['se_seasonal_ma_oim'],
atol=1e-1
)
def test_bse_cs(self):
# CS covariance type
cs_bse = self.result.cov_params_cs.diagonal()**0.5
assert_allclose(
cs_bse[0], self.true['se_ma_oim'],
atol=1e-2,
)
assert_allclose(
cs_bse[1], self.true['se_seasonal_ma_oim'],
atol=1e-1
)
class TestAirlineHarvey(Airline):
def __init__(self):
super(TestAirlineHarvey, self).__init__(
results_sarimax.air2_stationary, hamilton_representation=False
)
def test_bse(self):
# Make sure the default type is OPG
assert_equal(self.result.cov_type, 'opg')
# Test the OPG BSE values
assert_allclose(
self.result.bse[0], self.true['se_ma_opg'],
atol=1e-3,
)
assert_allclose(
self.result.bse[1], self.true['se_seasonal_ma_opg'],
atol=1e-3,
)
def test_bse_oim(self):
# OIM covariance type
oim_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(
oim_bse[0], self.true['se_ma_oim'],
atol=1e-2,
)
assert_allclose(
oim_bse[1], self.true['se_seasonal_ma_oim'],
atol=1e-1
)
def test_bse_cs(self):
# OIM covariance type
cs_bse = self.result.cov_params_cs.diagonal()**0.5
assert_allclose(
cs_bse[0], self.true['se_ma_oim'],
atol=1e-2,
)
assert_allclose(
cs_bse[1], self.true['se_seasonal_ma_oim'],
atol=1e-1
)
class TestAirlineStateDifferencing(Airline):
def __init__(self):
super(TestAirlineStateDifferencing, self).__init__(
results_sarimax.air2_stationary, simple_differencing=False,
hamilton_representation=False
)
def test_bic(self):
# Due to diffuse component of the state (which technically changes the
# BIC calculation - see Durbin and Koopman section 7.4), this is the
# best we can do for BIC
assert_almost_equal(
self.result.bic,
self.true['bic'], 0
)
def test_mle(self):
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-3
)
def test_bse(self):
# Make sure the default type is OPG
assert_equal(self.result.cov_type, 'opg')
# Test the OPG BSE values
assert_allclose(
self.result.bse[0], self.true['se_ma_opg'],
atol=1e-3,
)
assert_allclose(
self.result.bse[1], self.true['se_seasonal_ma_opg'],
atol=1e-4,
)
def test_bse_oim(self):
# OIM covariance type
oim_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(
oim_bse[0], self.true['se_ma_oim'],
atol=1e-2,
)
assert_allclose(
oim_bse[1], self.true['se_seasonal_ma_oim'],
atol=1e-1
)
def test_bse_cs(self):
# CS covariance type
cs_bse = self.result.cov_params_cs.diagonal()**0.5
assert_allclose(
cs_bse[0], self.true['se_ma_oim'],
atol=1e-2,
)
assert_allclose(
cs_bse[1], self.true['se_seasonal_ma_oim'],
atol=1e-1
)
class Friedman(SARIMAXStataTests):
"""
ARMAX model: Friedman quantity theory of money
Stata arima documentation, Example 4
"""
def __init__(self, true, exog=None, *args, **kwargs):
self.true = true
endog = np.r_[true['data']['consump']]
if exog is None:
exog = add_constant(true['data']['m2'])
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
self.model = sarimax.SARIMAX(
endog, exog=exog, order=(1, 0, 1), *args, **kwargs
)
params = np.r_[true['params_exog'], true['params_ar'],
true['params_ma'], true['params_variance']]
self.result = self.model.filter(params)
class TestFriedmanMLERegression(Friedman):
def __init__(self):
super(TestFriedmanMLERegression, self).__init__(
results_sarimax.friedman2_mle
)
def test_mle(self):
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-2, rtol=1e-3
)
def test_bse(self):
# Make sure the default type is OPG
assert_equal(self.result.cov_type, 'opg')
# Test the OPG BSE values
assert_allclose(
self.result.bse[0], self.true['se_exog_opg'][0],
rtol=1e-1
)
assert_allclose(
self.result.bse[1], self.true['se_exog_opg'][1],
atol=1e-2,
)
assert_allclose(
self.result.bse[2], self.true['se_ar_opg'],
atol=1e-2,
)
assert_allclose(
self.result.bse[3], self.true['se_ma_opg'],
atol=1e-2,
)
def test_bse_oim(self):
# OIM covariance type
oim_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(
oim_bse[0], self.true['se_exog_oim'][0],
rtol=1e-1
)
assert_allclose(
oim_bse[1], self.true['se_exog_oim'][1],
atol=1e-2,
)
assert_allclose(
oim_bse[2], self.true['se_ar_oim'],
atol=1e-2,
)
assert_allclose(
oim_bse[3], self.true['se_ma_oim'],
atol=1e-2,
)
def test_bse_cs(self):
# CS covariance type
cs_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(
cs_bse[0], self.true['se_exog_oim'][0],
rtol=1e-1
)
assert_allclose(
cs_bse[1], self.true['se_exog_oim'][1],
atol=1e-2,
)
assert_allclose(
cs_bse[2], self.true['se_ar_oim'],
atol=1e-2,
)
assert_allclose(
cs_bse[3], self.true['se_ma_oim'],
atol=1e-2,
)
class TestFriedmanStateRegression(Friedman):
def __init__(self):
# Remove the regression coefficients from the parameters, since they
# will be estimated as part of the state vector
true = dict(results_sarimax.friedman2_mle)
exog = add_constant(true['data']['m2']) / 10.
true['mle_params_exog'] = true['params_exog'][:]
true['mle_se_exog'] = true['se_exog_opg'][:]
true['params_exog'] = []
true['se_exog'] = []
super(TestFriedmanStateRegression, self).__init__(
true, exog=exog, mle_regression=False
)
self.true_params = np.r_[true['params_exog'], true['params_ar'],
true['params_ma'], true['params_variance']]
self.result = self.model.filter(self.true_params)
def test_mle(self):
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-1, rtol=1e-1
)
def test_regression_parameters(self):
# The regression effects are integrated into the state vector as
# the last two states (thus the index [-2:]). The filtered
# estimates of the state vector produced by the Kalman filter and
# stored in `filtered_state` for these state elements give the
# recursive least squares estimates of the regression coefficients
# at each time period. To get the estimates conditional on the
# entire dataset, use the filtered states from the last time
# period (thus the index [-1]).
assert_almost_equal(
self.result.filter_results.filtered_state[-2:, -1] / 10.,
self.true['mle_params_exog'], 1
)
# Loglikelihood (and so aic, bic) is slightly different when states are
# integrated into the state vector
def test_loglike(self):
pass
def test_aic(self):
pass
def test_bic(self):
pass
def test_bse(self):
# Make sure the default type is OPG
assert_equal(self.result.cov_type, 'opg')
# Test the OPG BSE values
assert_allclose(
self.result.bse[0], self.true['se_ar_opg'],
atol=1e-2
)
assert_allclose(
self.result.bse[1], self.true['se_ma_opg'],
atol=1e-2
)
def test_bse_oim(self):
# OIM covariance type
oim_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(
oim_bse[0], self.true['se_ar_oim'],
atol=1e-1,
)
assert_allclose(
oim_bse[1], self.true['se_ma_oim'],
atol=1e-2, rtol=1e-2
)
def test_bse_cs(self):
# CS covariance type
cs_bse = self.result.cov_params_cs.diagonal()**0.5
assert_allclose(
cs_bse[0], self.true['se_ar_oim'],
atol=1e-1,
)
assert_allclose(
cs_bse[1], self.true['se_ma_oim'],
atol=1e-2, rtol=1e-2
)
class TestFriedmanPredict(Friedman):
"""
ARMAX model: Friedman quantity theory of money, prediction
Stata arima postestimation documentation, Example 1 - Dynamic forecasts
This follows the given Stata example, although it is not truly forecasting
because it compares using the actual data (which is available in the
example but just not used in the parameter MLE estimation) against dynamic
prediction of that data. Here `test_predict` matches the first case, and
`test_dynamic_predict` matches the second.
"""
def __init__(self):
super(TestFriedmanPredict, self).__init__(
results_sarimax.friedman2_predict
)
# loglike, aic, bic are not the point of this test (they could pass, but we
# would have to modify the data so that they were calculated to
# exclude the last 15 observations)
def test_loglike(self):
pass
def test_aic(self):
pass
def test_bic(self):
pass
def test_predict(self):
assert_almost_equal(
self.result.predict()[0],
self.true['predict'], 3
)
def test_dynamic_predict(self):
dynamic = len(self.true['data']['consump'])-15-1
assert_almost_equal(
self.result.predict(dynamic=dynamic)[0],
self.true['dynamic_predict'], 3
)
class TestFriedmanForecast(Friedman):
"""
ARMAX model: Friedman quantity theory of money, forecasts
Variation on:
Stata arima postestimation documentation, Example 1 - Dynamic forecasts
This is a variation of the Stata example, in which the endogenous data is
actually made to be missing so that the predict command must forecast.
As another unit test, we also compare against the case in State when
predict is used against missing data (so forecasting) with the dynamic
option also included. Note, however, that forecasting in State space models
amounts to running the Kalman filter against missing datapoints, so it is
not clear whether "dynamic" forecasting (where instead of missing
datapoints for lags, we plug in previous forecasted endog values) is
meaningful.
"""
def __init__(self):
true = dict(results_sarimax.friedman2_predict)
true['forecast_data'] = {
'consump': true['data']['consump'][-15:],
'm2': true['data']['m2'][-15:]
}
true['data'] = {
'consump': true['data']['consump'][:-15],
'm2': true['data']['m2'][:-15]
}
super(TestFriedmanForecast, self).__init__(true)
self.result = self.model.filter(self.result.params)
# loglike, aic, bic are not the point of this test (they could pass, but we
# would have to modify the data so that they were calculated to
# exclude the last 15 observations)
def test_loglike(self):
pass
def test_aic(self):
pass
def test_bic(self):
pass
def test_forecast(self):
end = len(self.true['data']['consump'])+15-1
exog = add_constant(self.true['forecast_data']['m2'])
assert_almost_equal(
self.result.predict(end=end, exog=exog)[0],
self.true['forecast'], 3
)
def test_dynamic_forecast(self):
end = len(self.true['data']['consump'])+15-1
dynamic = len(self.true['data']['consump'])-1
exog = add_constant(self.true['forecast_data']['m2'])
assert_almost_equal(
self.result.predict(end=end, dynamic=dynamic, exog=exog)[0],
self.true['dynamic_forecast'], 3
)
class SARIMAXCoverageTest(object):
def __init__(self, i, decimal=4, endog=None, *args, **kwargs):
# Dataset
if endog is None:
endog = results_sarimax.wpi1_data
# Loglikelihood, parameters
self.true_loglike = coverage_results.loc[i]['llf']
self.true_params = np.array([float(x) for x in coverage_results.loc[i]['parameters'].split(',')])
# Stata reports the standard deviation; make it the variance
self.true_params[-1] = self.true_params[-1]**2
# Test parameters
self.decimal = decimal
# Compare using the Hamilton representation and simple differencing
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
self.model = sarimax.SARIMAX(endog, *args, **kwargs)
def test_loglike(self):
self.result = self.model.filter(self.true_params)
assert_allclose(
self.result.llf,
self.true_loglike,
atol=0.7 * 10**(-self.decimal)
)
def test_start_params(self):
# just a quick test that start_params isn't throwing an exception
# (other than related to invertibility)
self.model.enforce_stationarity = False
self.model.enforce_invertibility = False
self.model.start_params
self.model.enforce_stationarity = True
self.model.enforce_invertibility = True
def test_transform_untransform(self):
true_constrained = self.true_params
# Sometimes the parameters given by Stata are not stationary and / or
# invertible, so we need to skip those transformations for those
# parameter sets
self.model.update(self.true_params)
contracted_polynomial_seasonal_ar = self.model.polynomial_seasonal_ar[self.model.polynomial_seasonal_ar.nonzero()]
self.model.enforce_stationarity = (
(self.model.k_ar == 0 or tools.is_invertible(np.r_[1, -self.model.polynomial_ar[1:]])) and
(len(contracted_polynomial_seasonal_ar) <= 1 or tools.is_invertible(np.r_[1, -contracted_polynomial_seasonal_ar[1:]]))
)
contracted_polynomial_seasonal_ma = self.model.polynomial_seasonal_ma[self.model.polynomial_seasonal_ma.nonzero()]
self.model.enforce_invertibility = (
(self.model.k_ma == 0 or tools.is_invertible(np.r_[1, -self.model.polynomial_ma[1:]])) and
(len(contracted_polynomial_seasonal_ma) <= 1 or tools.is_invertible(np.r_[1, -contracted_polynomial_seasonal_ma[1:]]))
)
unconstrained = self.model.untransform_params(true_constrained)
constrained = self.model.transform_params(unconstrained)
assert_almost_equal(constrained, true_constrained, 4)
self.model.enforce_stationarity = True
self.model.enforce_invertibility = True
def test_results(self):
self.result = self.model.filter(self.true_params)
# Just make sure that no exceptions are thrown during summary
self.result.summary()
# And make sure no expections are thrown calculating any of the
# covariance matrix types
self.result.cov_params_default
self.result.cov_params_cs
# Some of the below models have non-invertible parameters, which causes
# problems with the reverse parameter transformation used in the
# `cov_params_delta` procedure. This is unavoidable with these types of
# parameters, and should not be considered a failure.
try:
self.result.cov_params_delta
except np.linalg.LinAlgError:
pass
except ValueError:
pass
self.result.cov_params_oim
self.result.cov_params_opg
def test_predict(self):
result = self.model.filter(self.true_params)
# Test predict does not throw exceptions, and produces the right shaped
# output
predict = result.predict()
assert_equal(predict.shape, (1, self.model.nobs))
predict = result.predict(start=10, end=20)
assert_equal(predict.shape, (1, 11))
predict = result.predict(start=10, end=20, dynamic=10)
assert_equal(predict.shape, (1, 11))
# Test forecasts
if self.model.k_exog == 0:
predict = result.predict(start=self.model.nobs,
end=self.model.nobs+10, dynamic=-10)
assert_equal(predict.shape, (1, 11))
predict = result.predict(start=self.model.nobs,
end=self.model.nobs+10, dynamic=-10)
forecast = result.forecast()
assert_equal(forecast.shape, (1, 1))
forecast = result.forecast(10)
assert_equal(forecast.shape, (1, 10))
else:
exog = np.r_[[0]*self.model.k_exog*11].reshape(11, self.model.k_exog)
predict = result.predict(start=self.model.nobs,
end=self.model.nobs+10, dynamic=-10,
exog=exog)
assert_equal(predict.shape, (1, 11))
predict = result.predict(start=self.model.nobs,
end=self.model.nobs+10, dynamic=-10,
exog=exog)
exog = np.r_[[0]*self.model.k_exog].reshape(1, self.model.k_exog)
forecast = result.forecast(exog=exog)
assert_equal(forecast.shape, (1, 1))
def test_init_keys_replicate(self):
mod1 = self.model
kwargs = self.model._get_init_kwds()
endog = mod1.data.orig_endog
exog = mod1.data.orig_exog
model2 = sarimax.SARIMAX(endog, exog, **kwargs)
res1 = self.model.filter(self.true_params)
res2 = model2.filter(self.true_params)
assert_allclose(res2.llf, res1.llf, rtol=1e-13)
class Test_ar(SARIMAXCoverageTest):
# // AR: (p,0,0) x (0,0,0,0)
# arima wpi, arima(3,0,0) noconstant vce(oim)
# save_results 1
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
super(Test_ar, self).__init__(0, *args, **kwargs)
class Test_ar_as_polynomial(SARIMAXCoverageTest):
# // AR: (p,0,0) x (0,0,0,0)
# arima wpi, arima(3,0,0) noconstant vce(oim)
# save_results 1
def __init__(self, *args, **kwargs):
kwargs['order'] = ([1,1,1],0,0)
super(Test_ar_as_polynomial, self).__init__(0, *args, **kwargs)
class Test_ar_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, arima(3,0,0) noconstant vce(oim)
# save_results 2
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
kwargs['trend'] = 'c'
super(Test_ar_trend_c, self).__init__(1, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[0] = (1 - self.true_params[1:4].sum()) * self.true_params[0]
class Test_ar_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, arima(3,0,0) noconstant vce(oim)
# save_results 3
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
kwargs['trend'] = 'ct'
super(Test_ar_trend_ct, self).__init__(2, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:2] = (1 - self.true_params[2:5].sum()) * self.true_params[:2]
class Test_ar_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1,0,0,1]
# arima wpi c t3, arima(3,0,0) noconstant vce(oim)
# save_results 4
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
kwargs['trend'] = [1,0,0,1]
super(Test_ar_trend_polynomial, self).__init__(3, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:2] = (1 - self.true_params[2:5].sum()) * self.true_params[:2]
class Test_ar_diff(SARIMAXCoverageTest):
# // AR and I(d): (p,d,0) x (0,0,0,0)
# arima wpi, arima(3,2,0) noconstant vce(oim)
# save_results 5
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,2,0)
super(Test_ar_diff, self).__init__(4, *args, **kwargs)
class Test_ar_seasonal_diff(SARIMAXCoverageTest):
# // AR and I(D): (p,0,0) x (0,D,0,s)
# arima wpi, arima(3,0,0) sarima(0,2,0,4) noconstant vce(oim)
# save_results 6
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
kwargs['seasonal_order'] = (0,2,0,4)
super(Test_ar_seasonal_diff, self).__init__(5, *args, **kwargs)
class Test_ar_diffuse(SARIMAXCoverageTest):
# // AR and diffuse initialization
# arima wpi, arima(3,0,0) noconstant vce(oim) diffuse
# save_results 7
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_ar_diffuse, self).__init__(6, *args, **kwargs)
class Test_ar_no_enforce(SARIMAXCoverageTest):
# // AR: (p,0,0) x (0,0,0,0)
# arima wpi, arima(3,0,0) noconstant vce(oim)
# save_results 1
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
kwargs['enforce_stationarity'] = False
kwargs['enforce_invertibility'] = False
kwargs['initial_variance'] = 1e9
# kwargs['loglikelihood_burn'] = 0
super(Test_ar_no_enforce, self).__init__(6, *args, **kwargs)
# Reset loglikelihood burn, which gets automatically set to the number
# of states if enforce_stationarity = False
self.model.ssm.loglikelihood_burn = 0
def test_loglike(self):
# Regression in the state vector gives a different loglikelihood, so
# just check that it's approximately the same
self.result = self.model.filter(self.true_params)
assert_allclose(
self.result.llf,
self.true_loglike,
atol=2
)
class Test_ar_exogenous(SARIMAXCoverageTest):
# // ARX
# arima wpi x, arima(3,0,0) noconstant vce(oim)
# save_results 8
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_ar_exogenous, self).__init__(7, *args, **kwargs)
class Test_ar_exogenous_in_state(SARIMAXCoverageTest):
# // ARX
# arima wpi x, arima(3,0,0) noconstant vce(oim)
# save_results 8
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
kwargs['mle_regression'] = False
super(Test_ar_exogenous_in_state, self).__init__(7, *args, **kwargs)
self.true_regression_coefficient = self.true_params[0]
self.true_params = self.true_params[1:]
def test_loglike(self):
# Regression in the state vector gives a different loglikelihood, so
# just check that it's approximately the same
self.result = self.model.filter(self.true_params)
assert_allclose(
self.result.llf,
self.true_loglike,
atol=2
)
def test_regression_coefficient(self):
# Test that the regression coefficient (estimated as the last filtered
# state estimate for the regression state) is the same as the Stata
# MLE state
self.result = self.model.filter(self.true_params)
assert_allclose(
self.result.filter_results.filtered_state[3][-1],
self.true_regression_coefficient,
self.decimal
)
class Test_ma(SARIMAXCoverageTest):
# // MA: (0,0,q) x (0,0,0,0)
# arima wpi, arima(0,0,3) noconstant vce(oim)
# save_results 9
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,3)
super(Test_ma, self).__init__(8, *args, **kwargs)
class Test_ma_as_polynomial(SARIMAXCoverageTest):
# // MA: (0,0,q) x (0,0,0,0)
# arima wpi, arima(0,0,3) noconstant vce(oim)
# save_results 9
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,[1,1,1])
super(Test_ma_as_polynomial, self).__init__(8, *args, **kwargs)
class Test_ma_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, arima(0,0,3) noconstant vce(oim)
# save_results 10
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,3)
kwargs['trend'] = 'c'
super(Test_ma_trend_c, self).__init__(9, *args, **kwargs)
class Test_ma_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, arima(0,0,3) noconstant vce(oim)
# save_results 11
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,3)
kwargs['trend'] = 'ct'
super(Test_ma_trend_ct, self).__init__(10, *args, **kwargs)
class Test_ma_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1,0,0,1]
# arima wpi c t3, arima(0,0,3) noconstant vce(oim)
# save_results 12
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,3)
kwargs['trend'] = [1,0,0,1]
super(Test_ma_trend_polynomial, self).__init__(11, *args, **kwargs)
class Test_ma_diff(SARIMAXCoverageTest):
# // MA and I(d): (0,d,q) x (0,0,0,0)
# arima wpi, arima(0,2,3) noconstant vce(oim)
# save_results 13
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,2,3)
super(Test_ma_diff, self).__init__(12, *args, **kwargs)
class Test_ma_seasonal_diff(SARIMAXCoverageTest):
# // MA and I(D): (p,0,0) x (0,D,0,s)
# arima wpi, arima(0,0,3) sarima(0,2,0,4) noconstant vce(oim)
# save_results 14
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,3)
kwargs['seasonal_order'] = (0,2,0,4)
super(Test_ma_seasonal_diff, self).__init__(13, *args, **kwargs)
class Test_ma_diffuse(SARIMAXCoverageTest):
# // MA and diffuse initialization
# arima wpi, arima(0,0,3) noconstant vce(oim) diffuse
# save_results 15
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,3)
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_ma_diffuse, self).__init__(14, *args, **kwargs)
class Test_ma_exogenous(SARIMAXCoverageTest):
# // MAX
# arima wpi x, arima(0,0,3) noconstant vce(oim)
# save_results 16
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,3)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_ma_exogenous, self).__init__(15, *args, **kwargs)
class Test_arma(SARIMAXCoverageTest):
# // ARMA: (p,0,q) x (0,0,0,0)
# arima wpi, arima(3,0,3) noconstant vce(oim)
# save_results 17
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,3)
super(Test_arma, self).__init__(16, *args, **kwargs)
class Test_arma_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, arima(3,0,2) noconstant vce(oim)
# save_results 18
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,2)
kwargs['trend'] = 'c'
super(Test_arma_trend_c, self).__init__(17, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:1] = (1 - self.true_params[1:4].sum()) * self.true_params[:1]
class Test_arma_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, arima(3,0,2) noconstant vce(oim)
# save_results 19
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,2)
kwargs['trend'] = 'ct'
super(Test_arma_trend_ct, self).__init__(18, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:2] = (1 - self.true_params[2:5].sum()) * self.true_params[:2]
class Test_arma_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1,0,0,1]
# arima wpi c t3, arima(3,0,2) noconstant vce(oim)
# save_results 20
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,2)
kwargs['trend'] = [1,0,0,1]
super(Test_arma_trend_polynomial, self).__init__(19, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:2] = (1 - self.true_params[2:5].sum()) * self.true_params[:2]
class Test_arma_diff(SARIMAXCoverageTest):
# // ARMA and I(d): (p,d,q) x (0,0,0,0)
# arima wpi, arima(3,2,2) noconstant vce(oim)
# save_results 21
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,2,2)
super(Test_arma_diff, self).__init__(20, *args, **kwargs)
class Test_arma_seasonal_diff(SARIMAXCoverageTest):
# // ARMA and I(D): (p,0,q) x (0,D,0,s)
# arima wpi, arima(3,0,2) sarima(0,2,0,4) noconstant vce(oim)
# save_results 22
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,2)
kwargs['seasonal_order'] = (0,2,0,4)
super(Test_arma_seasonal_diff, self).__init__(21, *args, **kwargs)
class Test_arma_diff_seasonal_diff(SARIMAXCoverageTest):
# // ARMA and I(d) and I(D): (p,d,q) x (0,D,0,s)
# arima wpi, arima(3,2,2) sarima(0,2,0,4) noconstant vce(oim)
# save_results 23
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,2,2)
kwargs['seasonal_order'] = (0,2,0,4)
super(Test_arma_diff_seasonal_diff, self).__init__(22, *args, **kwargs)
class Test_arma_diffuse(SARIMAXCoverageTest):
# // ARMA and diffuse initialization
# arima wpi, arima(3,0,2) noconstant vce(oim) diffuse
# save_results 24
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,2)
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_arma_diffuse, self).__init__(23, *args, **kwargs)
class Test_arma_exogenous(SARIMAXCoverageTest):
# // ARMAX
# arima wpi x, arima(3,0,2) noconstant vce(oim)
# save_results 25
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,2)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_arma_exogenous, self).__init__(24, *args, **kwargs)
class Test_seasonal_ar(SARIMAXCoverageTest):
# // SAR: (0,0,0) x (P,0,0,s)
# arima wpi, sarima(3,0,0,4) noconstant vce(oim)
# save_results 26
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,0,4)
super(Test_seasonal_ar, self).__init__(25, *args, **kwargs)
class Test_seasonal_ar_as_polynomial(SARIMAXCoverageTest):
# // SAR: (0,0,0) x (P,0,0,s)
# arima wpi, sarima(3,0,0,4) noconstant vce(oim)
# save_results 26
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = ([1,1,1],0,0,4)
super(Test_seasonal_ar_as_polynomial, self).__init__(25, *args, **kwargs)
class Test_seasonal_ar_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, sarima(3,0,0,4) noconstant vce(oim)
# save_results 27
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,0,4)
kwargs['trend'] = 'c'
super(Test_seasonal_ar_trend_c, self).__init__(26, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:1] = (1 - self.true_params[1:4].sum()) * self.true_params[:1]
class Test_seasonal_ar_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, sarima(3,0,0,4) noconstant vce(oim)
# save_results 28
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,0,4)
kwargs['trend'] = 'ct'
super(Test_seasonal_ar_trend_ct, self).__init__(27, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:2] = (1 - self.true_params[2:5].sum()) * self.true_params[:2]
class Test_seasonal_ar_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1,0,0,1]
# arima wpi c t3, sarima(3,0,0,4) noconstant vce(oim)
# save_results 29
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,0,4)
kwargs['trend'] = [1,0,0,1]
super(Test_seasonal_ar_trend_polynomial, self).__init__(28, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:2] = (1 - self.true_params[2:5].sum()) * self.true_params[:2]
class Test_seasonal_ar_diff(SARIMAXCoverageTest):
# // SAR and I(d): (0,d,0) x (P,0,0,s)
# arima wpi, arima(0,2,0) sarima(3,0,0,4) noconstant vce(oim)
# save_results 30
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,2,0)
kwargs['seasonal_order'] = (3,0,0,4)
super(Test_seasonal_ar_diff, self).__init__(29, *args, **kwargs)
class Test_seasonal_ar_seasonal_diff(SARIMAXCoverageTest):
# // SAR and I(D): (0,0,0) x (P,D,0,s)
# arima wpi, sarima(3,2,0,4) noconstant vce(oim)
# save_results 31
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,2,0,4)
super(Test_seasonal_ar_seasonal_diff, self).__init__(30, *args, **kwargs)
class Test_seasonal_ar_diffuse(SARIMAXCoverageTest):
# // SAR and diffuse initialization
# arima wpi, sarima(3,0,0,4) noconstant vce(oim) diffuse
# save_results 32
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,0,4)
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_seasonal_ar_diffuse, self).__init__(31, *args, **kwargs)
class Test_seasonal_ar_exogenous(SARIMAXCoverageTest):
# // SARX
# arima wpi x, sarima(3,0,0,4) noconstant vce(oim)
# save_results 33
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,0,4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_seasonal_ar_exogenous, self).__init__(32, *args, **kwargs)
class Test_seasonal_ma(SARIMAXCoverageTest):
# // SMA
# arima wpi, sarima(0,0,3,4) noconstant vce(oim)
# save_results 34
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (0,0,3,4)
super(Test_seasonal_ma, self).__init__(33, *args, **kwargs)
class Test_seasonal_ma_as_polynomial(SARIMAXCoverageTest):
# // SMA
# arima wpi, sarima(0,0,3,4) noconstant vce(oim)
# save_results 34
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (0,0,[1,1,1],4)
super(Test_seasonal_ma_as_polynomial, self).__init__(33, *args, **kwargs)
class Test_seasonal_ma_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, sarima(0,0,3,4) noconstant vce(oim)
# save_results 35
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (0,0,3,4)
kwargs['trend'] = 'c'
kwargs['decimal'] = 3
super(Test_seasonal_ma_trend_c, self).__init__(34, *args, **kwargs)
class Test_seasonal_ma_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, sarima(0,0,3,4) noconstant vce(oim)
# save_results 36
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (0,0,3,4)
kwargs['trend'] = 'ct'
super(Test_seasonal_ma_trend_ct, self).__init__(35, *args, **kwargs)
class Test_seasonal_ma_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1,0,0,1]
# arima wpi c t3, sarima(0,0,3,4) noconstant vce(oim)
# save_results 37
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (0,0,3,4)
kwargs['trend'] = [1,0,0,1]
kwargs['decimal'] = 3
super(Test_seasonal_ma_trend_polynomial, self).__init__(36, *args, **kwargs)
class Test_seasonal_ma_diff(SARIMAXCoverageTest):
# // SMA and I(d): (0,d,0) x (0,0,Q,s)
# arima wpi, arima(0,2,0) sarima(0,0,3,4) noconstant vce(oim)
# save_results 38
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,2,0)
kwargs['seasonal_order'] = (0,0,3,4)
super(Test_seasonal_ma_diff, self).__init__(37, *args, **kwargs)
class Test_seasonal_ma_seasonal_diff(SARIMAXCoverageTest):
# // SMA and I(D): (0,0,0) x (0,D,Q,s)
# arima wpi, sarima(0,2,3,4) noconstant vce(oim)
# save_results 39
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (0,2,3,4)
super(Test_seasonal_ma_seasonal_diff, self).__init__(38, *args, **kwargs)
class Test_seasonal_ma_diffuse(SARIMAXCoverageTest):
# // SMA and diffuse initialization
# arima wpi, sarima(0,0,3,4) noconstant vce(oim) diffuse
# save_results 40
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (0,0,3,4)
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_seasonal_ma_diffuse, self).__init__(39, *args, **kwargs)
class Test_seasonal_ma_exogenous(SARIMAXCoverageTest):
# // SMAX
# arima wpi x, sarima(0,0,3,4) noconstant vce(oim)
# save_results 41
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (0,0,3,4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_seasonal_ma_exogenous, self).__init__(40, *args, **kwargs)
class Test_seasonal_arma(SARIMAXCoverageTest):
# // SARMA: (0,0,0) x (P,0,Q,s)
# arima wpi, sarima(3,0,2,4) noconstant vce(oim)
# save_results 42
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,2,4)
super(Test_seasonal_arma, self).__init__(41, *args, **kwargs)
class Test_seasonal_arma_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, sarima(3,0,2,4) noconstant vce(oim)
# save_results 43
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,2,4)
kwargs['trend'] = 'c'
super(Test_seasonal_arma_trend_c, self).__init__(42, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:1] = (1 - self.true_params[1:4].sum()) * self.true_params[:1]
class Test_seasonal_arma_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, sarima(3,0,2,4) noconstant vce(oim)
# save_results 44
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,2,4)
kwargs['trend'] = 'ct'
super(Test_seasonal_arma_trend_ct, self).__init__(43, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:2] = (1 - self.true_params[2:5].sum()) * self.true_params[:2]
class Test_seasonal_arma_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1,0,0,1]
# arima wpi c t3, sarima(3,0,2,4) noconstant vce(oim)
# save_results 45
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,2,4)
kwargs['trend'] = [1,0,0,1]
kwargs['decimal'] = 3
super(Test_seasonal_arma_trend_polynomial, self).__init__(44, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:2] = (1 - self.true_params[2:5].sum()) * self.true_params[:2]
def test_results(self):
self.result = self.model.filter(self.true_params)
# Just make sure that no exceptions are thrown during summary
self.result.summary()
# And make sure no expections are thrown calculating any of the
# covariance matrix types
self.result.cov_params_default
# Known failure due to the complex step inducing non-stationary
# parameters, causing a failure in the solve_discrete_lyapunov call
# self.result.cov_params_cs
# self.result.cov_params_delta
self.result.cov_params_oim
self.result.cov_params_opg
class Test_seasonal_arma_diff(SARIMAXCoverageTest):
# // SARMA and I(d): (0,d,0) x (P,0,Q,s)
# arima wpi, arima(0,2,0) sarima(3,0,2,4) noconstant vce(oim)
# save_results 46
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,2,0)
kwargs['seasonal_order'] = (3,0,2,4)
super(Test_seasonal_arma_diff, self).__init__(45, *args, **kwargs)
class Test_seasonal_arma_seasonal_diff(SARIMAXCoverageTest):
# // SARMA and I(D): (0,0,0) x (P,D,Q,s)
# arima wpi, sarima(3,2,2,4) noconstant vce(oim)
# save_results 47
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,2,2,4)
super(Test_seasonal_arma_seasonal_diff, self).__init__(46, *args, **kwargs)
class Test_seasonal_arma_diff_seasonal_diff(SARIMAXCoverageTest):
# // SARMA and I(d) and I(D): (0,d,0) x (P,D,Q,s)
# arima wpi, arima(0,2,0) sarima(3,2,2,4) noconstant vce(oim)
# save_results 48
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,2,0)
kwargs['seasonal_order'] = (3,2,2,4)
super(Test_seasonal_arma_diff_seasonal_diff, self).__init__(47, *args, **kwargs)
def test_results(self):
self.result = self.model.filter(self.true_params)
# Just make sure that no exceptions are thrown during summary
self.result.summary()
# And make sure no expections are thrown calculating any of the
# covariance matrix types
self.result.cov_params_default
# Known failure due to the complex step inducing non-stationary
# parameters, causing a failure in the solve_discrete_lyapunov call
# self.result.cov_params_cs
#s self.result.cov_params_delta
self.result.cov_params_oim
self.result.cov_params_opg
class Test_seasonal_arma_diffuse(SARIMAXCoverageTest):
# // SARMA and diffuse initialization
# arima wpi, sarima(3,0,2,4) noconstant vce(oim) diffuse
# save_results 49
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,2,4)
kwargs['decimal'] = 3
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_seasonal_arma_diffuse, self).__init__(48, *args, **kwargs)
class Test_seasonal_arma_exogenous(SARIMAXCoverageTest):
# // SARMAX
# arima wpi x, sarima(3,0,2,4) noconstant vce(oim)
# save_results 50
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,2,4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_seasonal_arma_exogenous, self).__init__(49, *args, **kwargs)
class Test_sarimax_exogenous(SARIMAXCoverageTest):
# // SARIMAX and exogenous
# arima wpi x, arima(3,2,2) sarima(3,2,2,4) noconstant vce(oim)
# save_results 51
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,2,2)
kwargs['seasonal_order'] = (3,2,2,4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_sarimax_exogenous, self).__init__(50, *args, **kwargs)
class Test_sarimax_exogenous_not_hamilton(SARIMAXCoverageTest):
# // SARIMAX and exogenous
# arima wpi x, arima(3,2,2) sarima(3,2,2,4) noconstant vce(oim)
# save_results 51
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,2,2)
kwargs['seasonal_order'] = (3,2,2,4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
kwargs['hamilton_representation'] = False
kwargs['simple_differencing'] = False
super(Test_sarimax_exogenous_not_hamilton, self).__init__(50, *args, **kwargs)
class Test_sarimax_exogenous_diffuse(SARIMAXCoverageTest):
# // SARIMAX and exogenous diffuse
# arima wpi x, arima(3,2,2) sarima(3,2,2,4) noconstant vce(oim) diffuse
# save_results 52
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,2,2)
kwargs['seasonal_order'] = (3,2,2,4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
kwargs['decimal'] = 2
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_sarimax_exogenous_diffuse, self).__init__(51, *args, **kwargs)
class Test_arma_exog_trend_polynomial_missing(SARIMAXCoverageTest):
# // ARMA and exogenous and trend polynomial and missing
# gen wpi2 = wpi
# replace wpi2 = . in 10/19
# arima wpi2 x c t3, arima(3,0,2) noconstant vce(oim)
# save_results 53
def __init__(self, *args, **kwargs):
endog = np.r_[results_sarimax.wpi1_data]
# Note we're using the non-missing exog data
kwargs['exog'] = ((endog - np.floor(endog))**2)[1:]
endog[9:19] = np.nan
endog = endog[1:] - endog[:-1]
endog[9] = np.nan
kwargs['order'] = (3,0,2)
kwargs['trend'] = [0,0,0,1]
kwargs['decimal'] = 1
super(Test_arma_exog_trend_polynomial_missing, self).__init__(52, endog=endog, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[0] = (1 - self.true_params[2:5].sum()) * self.true_params[0]
# Miscellaneous coverage tests
def test_simple_time_varying():
# This tests time-varying parameters regression when in fact the parameters
# are not time-varying, and in fact the regression fit is perfect
endog = np.arange(100)*1.0
exog = 2*endog
mod = sarimax.SARIMAX(endog, exog=exog, order=(0,0,0), time_varying_regression=True, mle_regression=False)
# Ignore the warning that MLE doesn't converge
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = mod.fit(disp=-1)
# Test that the estimated variances of the errors are essentially zero
assert_almost_equal(res.params, [0,0], 7)
# Test that the time-varying coefficients are all 0.5 (except the first
# one)
assert_almost_equal(res.filter_results.filtered_state[0][1:], [0.5]*99, 9)
def test_invalid_time_varying():
assert_raises(ValueError, sarimax.SARIMAX, endog=[1,2,3], mle_regression=True, time_varying_regression=True)
def test_manual_stationary_initialization():
endog = results_sarimax.wpi1_data
# Create the first model to compare against
mod1 = sarimax.SARIMAX(endog, order=(3,0,0))
res1 = mod1.filter([0.5,0.2,0.1,1])
# Create a second model with "known" initialization
mod2 = sarimax.SARIMAX(endog, order=(3,0,0))
mod2.ssm.initialize_known(res1.filter_results.initial_state,
res1.filter_results.initial_state_cov)
mod2.initialize_state() # a noop in this case (include for coverage)
res2 = mod2.filter([0.5,0.2,0.1,1])
# Create a third model with "known" initialization, but specified in kwargs
mod3 = sarimax.SARIMAX(endog, order=(3,0,0),
initialization='known',
initial_state=res1.filter_results.initial_state,
initial_state_cov=res1.filter_results.initial_state_cov)
res3 = mod3.filter([0.5,0.2,0.1,1])
# Create the forth model with stationary initialization specified in kwargs
mod4 = sarimax.SARIMAX(endog, order=(3,0,0), initialization='stationary')
res4 = mod4.filter([0.5,0.2,0.1,1])
# Just test a couple of things to make sure the results are the same
assert_almost_equal(res1.llf, res2.llf)
assert_almost_equal(res1.filter_results.filtered_state,
res2.filter_results.filtered_state)
assert_almost_equal(res1.llf, res3.llf)
assert_almost_equal(res1.filter_results.filtered_state,
res3.filter_results.filtered_state)
assert_almost_equal(res1.llf, res4.llf)
assert_almost_equal(res1.filter_results.filtered_state,
res4.filter_results.filtered_state)
def test_manual_approximate_diffuse_initialization():
endog = results_sarimax.wpi1_data
# Create the first model to compare against
mod1 = sarimax.SARIMAX(endog, order=(3,0,0))
mod1.ssm.initialize_approximate_diffuse(1e9)
res1 = mod1.filter([0.5,0.2,0.1,1])
# Create a second model with "known" initialization
mod2 = sarimax.SARIMAX(endog, order=(3,0,0))
mod2.ssm.initialize_known(res1.filter_results.initial_state,
res1.filter_results.initial_state_cov)
mod2.initialize_state() # a noop in this case (include for coverage)
res2 = mod2.filter([0.5,0.2,0.1,1])
# Create a third model with "known" initialization, but specified in kwargs
mod3 = sarimax.SARIMAX(endog, order=(3,0,0),
initialization='known',
initial_state=res1.filter_results.initial_state,
initial_state_cov=res1.filter_results.initial_state_cov)
res3 = mod3.filter([0.5,0.2,0.1,1])
# Create the forth model with approximate diffuse initialization specified
# in kwargs
mod4 = sarimax.SARIMAX(endog, order=(3,0,0),
initialization='approximate_diffuse',
initial_variance=1e9)
res4 = mod4.filter([0.5,0.2,0.1,1])
# Just test a couple of things to make sure the results are the same
assert_almost_equal(res1.llf, res2.llf)
assert_almost_equal(res1.filter_results.filtered_state,
res2.filter_results.filtered_state)
assert_almost_equal(res1.llf, res3.llf)
assert_almost_equal(res1.filter_results.filtered_state,
res3.filter_results.filtered_state)
assert_almost_equal(res1.llf, res4.llf)
assert_almost_equal(res1.filter_results.filtered_state,
res4.filter_results.filtered_state)
def test_results():
endog = results_sarimax.wpi1_data
mod = sarimax.SARIMAX(endog, order=(1,0,1))
res = mod.filter([0.5,-0.5,1], cov_type='oim')
assert_almost_equal(res.arroots, 2.)
assert_almost_equal(res.maroots, 2.)
assert_almost_equal(res.arfreq, np.arctan2(0, 2) / (2*np.pi))
assert_almost_equal(res.mafreq, np.arctan2(0, 2) / (2*np.pi))
assert_almost_equal(res.arparams, [0.5])
assert_almost_equal(res.maparams, [-0.5])
| bsd-3-clause |
pluskid/mxnet | example/reinforcement-learning/ddpg/strategies.py | 15 | 1705 | import numpy as np
class BaseStrategy(object):
"""
Base class of exploration strategy.
"""
def get_action(self, obs, policy):
raise NotImplementedError
def reset(self):
pass
class OUStrategy(BaseStrategy):
"""
Ornstein-Uhlenbeck process: dxt = theta * (mu - xt) * dt + sigma * dWt
where Wt denotes the Wiener process.
"""
def __init__(self, env_spec, mu=0, theta=0.15, sigma=0.3):
self.mu = mu
self.theta = theta
self.sigma = sigma
self.action_space = env_spec.action_space
self.state = np.ones(self.action_space.flat_dim) * self.mu
def evolve_state(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))
self.state = x + dx
return self.state
def reset(self):
self.state = np.ones(self.action_space.flat_dim) * self.mu
def get_action(self, obs, policy):
# get_action accepts a 2D tensor with one row
obs = obs.reshape((1, -1))
action = policy.get_action(obs)
increment = self.evolve_state()
return np.clip(action + increment,
self.action_space.low,
self.action_space.high)
if __name__ == "__main__":
class Env1(object):
def __init__(self):
self.action_space = Env2()
class Env2(object):
def __init__(self):
self.flat_dim = 2
env_spec = Env1()
test = OUStrategy(env_spec)
states = []
for i in range(1000):
states.append(test.evolve_state()[0])
import matplotlib.pyplot as plt
plt.plot(states)
plt.show()
| apache-2.0 |
samklr/HiBench | bin/report_gen_plot.py | 22 | 5011 | #!/usr/bin/env python
#coding: utf-8
import sys, os, re
from pprint import pprint
from collections import defaultdict, namedtuple
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
import numpy as np
RecordRaw=namedtuple("RecordRaw", "type durtation data_size throughput_total throughput_per_node")
Record=namedtuple("Record", "type language durtation data_size throughput_total throughput_per_node")
def human_readable_size(n):
"convert number into human readable string"
if n<1000: return str(n)
if n<800000: return "%.3fK" % (n/1000.0)
if n<800000000: return "%.3fM" % (n/1000000.0)
if n<800000000000: return "%.3fG" % (n/1000000000.0)
return "%.3fT" % (n/1000000000000.0)
def group_by_type(datas):
groups = defaultdict(dict)
for i in datas:
words = re.sub(r'((?<=[a-z])[A-Z]|(?<!\A)[A-Z](?=[a-z]))', r' \1', i.type).split()
prefix = words[0].lower()
suffix = "_".join([x.lower() for x in words[1:]])
groups[suffix][prefix] = Record(type = "".join(words[1:]),
language = prefix,
durtation = i.durtation,
data_size = i.data_size,
throughput_total = i.throughput_total,
throughput_per_node = i.throughput_per_node
)
return dict(groups)
def report_plot(fn):
if not os.path.isfile(fn):
print "Failed to find `sparkbench.report`"
sys.exit(1)
with open(fn) as f:
data = [x.split() for x in f.readlines()[1:] if x.strip() and not x.strip().startswith('#')]
pprint(data, width=300)
groups = group_by_type([RecordRaw(type = x[0],
data_size = int(x[3]),
durtation = float(x[4]),
throughput_total = int(x[5]) / 1024.0 / 1024,
throughput_per_node = int(x[6]) / 1024.0 /1024
) for x in data])
#print groups
base_dir = os.path.dirname(fn)
plot(groups, "Seconds of durtations (Less is better)", "Seconds", "durtation", os.path.join(base_dir, "durtation.png"))
# plot(groups, "Throughput in total (Higher is better)", "MB/s", "throughput_total", os.path.join(base_dir, "throughput_total.png"))
# plot(groups, "Throughput per node (Higher is better)", "MB/s", "throughput_per_node", os.path.join(base_dir, "throughput_per_node.png"))
def plot(groups, title="Seconds of durtations", ylabel="Seconds", value_field="durtation", fig_fn = "foo.png"):
# plot it
keys = groups.keys()
languages = sorted(reduce(lambda x,y: x.union(y), [set([groups[x][y].language for y in groups[x]]) for x in groups]))
width = 0.15
rects = []
fig = plt.figure()
ax = plt.axes()
colors='rgbcymw'
# NCURVES=10
# curves = [np.random.random(20) for i in range(NCURVES)]
# values = range(NCURVES)
# jet = colors.Colormap('jet')
# cNorm = colors.Normalize(vmin=0, vmax=values[-1])
# scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
patterns = ('-', '+', 'x', '\\', '/', '*', '.', 'O')
for idx, lang in enumerate(languages):
rects.append(ax.bar([x + width * (idx + 1) for x in range(len(keys))], # x index
[getattr(groups[x][lang], value_field) if x in groups and groups[x].has_key(lang) else 0 for x in keys], # value
width,
color = colors[idx],
hatch = patterns[idx]
) # width
)
# Set the tick labels font
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Arial')
label.set_fontsize(24)
ax.set_ylabel(ylabel, fontname="Arial", size="32")
ax.set_title(title, fontname="Arial", size="44")
x_axis_offset = len(languages)* width /2.0
ax.set_xticks([(x + width + x_axis_offset) for x in range(len(keys))])
ax.set_xticklabels(["%s \n@%s" % (x, human_readable_size(groups[x].values()[0].data_size)) for x in keys])
ax.grid(True)
ax.legend([x[0] for x in rects],
languages)
def autolabel(rects):
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d' % int(height),
ha='center', va='bottom')
# [autolabel(x) for x in rects]
fig = plt.gcf()
fig.set_size_inches(18.5,10.5)
plt.savefig(fig_fn, dpi=100)
if __name__ == "__main__":
try:
default_report_fn = sys.argv[1]
except:
default_report_fn = os.path.join(os.path.dirname(__file__), "..", "sparkbench.report")
report_plot(default_report_fn)
| apache-2.0 |
cancan101/nolearn | nolearn/lasagne/tests/conftest.py | 4 | 4595 | import numpy as np
import pytest
from sklearn.datasets import load_boston
from sklearn.datasets import fetch_mldata
from sklearn.preprocessing import StandardScaler
from sklearn.utils import shuffle
from lasagne.layers import Conv2DLayer
from lasagne.layers import DenseLayer
from lasagne.layers import InputLayer
from lasagne.layers import MaxPool2DLayer
from lasagne.layers import NonlinearityLayer
from lasagne.nonlinearities import softmax
from lasagne.updates import nesterov_momentum
@pytest.fixture(scope='session')
def NeuralNet():
from nolearn.lasagne import NeuralNet
return NeuralNet
@pytest.fixture
def nn(NeuralNet):
return NeuralNet([('input', object())], input_shape=(10, 10))
@pytest.fixture(scope='session')
def mnist():
dataset = fetch_mldata('mnist-original')
X, y = dataset.data, dataset.target
X = X.astype(np.float32) / 255.0
y = y.astype(np.int32)
return shuffle(X, y, random_state=42)
@pytest.fixture(scope='session')
def boston():
dataset = load_boston()
X, y = dataset.data, dataset.target
# X, y = make_regression(n_samples=100000, n_features=13)
X = StandardScaler().fit_transform(X).astype(np.float32)
y = y.reshape(-1, 1).astype(np.float32)
return shuffle(X, y, random_state=42)
class _OnEpochFinished:
def __call__(self, nn, train_history):
self.train_history = train_history
if len(train_history) > 1:
raise StopIteration()
@pytest.fixture(scope='session')
def X_train(mnist):
X, y = mnist
return X[:10000].reshape(-1, 1, 28, 28)
@pytest.fixture(scope='session')
def y_train(mnist):
X, y = mnist
return y[:10000]
@pytest.fixture(scope='session')
def X_test(mnist):
X, y = mnist
return X[60000:].reshape(-1, 1, 28, 28)
@pytest.fixture(scope='session')
def y_pred(net_fitted, X_test):
return net_fitted.predict(X_test)
@pytest.fixture(scope='session')
def net(NeuralNet):
l = InputLayer(shape=(None, 1, 28, 28))
l = Conv2DLayer(l, name='conv1', filter_size=(5, 5), num_filters=8)
l = MaxPool2DLayer(l, name='pool1', pool_size=(2, 2))
l = Conv2DLayer(l, name='conv2', filter_size=(5, 5), num_filters=8)
l = MaxPool2DLayer(l, name='pool2', pool_size=(2, 2))
l = DenseLayer(l, name='hidden1', num_units=128)
l = DenseLayer(l, name='output', nonlinearity=softmax, num_units=10)
return NeuralNet(
layers=l,
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
max_epochs=5,
on_epoch_finished=[_OnEpochFinished()],
verbose=99,
)
@pytest.fixture(scope='session')
def net_fitted(net, X_train, y_train):
return net.fit(X_train, y_train)
@pytest.fixture(scope='session')
def net_color_non_square(NeuralNet):
l = InputLayer(shape=(None, 3, 20, 28))
l = Conv2DLayer(l, name='conv1', filter_size=(5, 5), num_filters=1)
l = MaxPool2DLayer(l, name='pool1', pool_size=(2, 2))
l = Conv2DLayer(l, name='conv2', filter_size=(5, 5), num_filters=8)
l = MaxPool2DLayer(l, name='pool2', pool_size=(2, 2))
l = DenseLayer(l, name='hidden1', num_units=128)
l = DenseLayer(l, name='output', nonlinearity=softmax, num_units=10)
net = NeuralNet(
layers=l,
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
max_epochs=1,
)
net.initialize()
return net
@pytest.fixture(scope='session')
def net_with_nonlinearity_layer(NeuralNet):
l = InputLayer(shape=(None, 1, 28, 28))
l = Conv2DLayer(l, name='conv1', filter_size=(5, 5), num_filters=8)
l = MaxPool2DLayer(l, name='pool1', pool_size=(2, 2))
l = Conv2DLayer(l, name='conv2', filter_size=(5, 5), num_filters=8)
l = MaxPool2DLayer(l, name='pool2', pool_size=(2, 2))
l = DenseLayer(l, name='hidden1', num_units=128)
l = DenseLayer(l, name='output', nonlinearity=softmax, num_units=10)
l = NonlinearityLayer(l)
net = NeuralNet(
layers=l,
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
max_epochs=5,
on_epoch_finished=[_OnEpochFinished()],
verbose=99,
)
net.initialize()
return net
@pytest.fixture
def net_no_conv(NeuralNet):
l = InputLayer(shape=(None, 100))
l = DenseLayer(l, name='output', nonlinearity=softmax, num_units=10)
return NeuralNet(
layers=l,
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
max_epochs=1,
verbose=99,
)
| mit |
herilalaina/scikit-learn | examples/gaussian_process/plot_gpr_noisy.py | 49 | 3847 | """
=============================================================
Gaussian process regression (GPR) with noise-level estimation
=============================================================
This example illustrates that GPR with a sum-kernel including a WhiteKernel can
estimate the noise level of data. An illustration of the
log-marginal-likelihood (LML) landscape shows that there exist two local
maxima of LML. The first corresponds to a model with a high noise level and a
large length scale, which explains all variations in the data by noise. The
second one has a smaller noise level and shorter length scale, which explains
most of the variation by the noise-free functional relationship. The second
model has a higher likelihood; however, depending on the initial value for the
hyperparameters, the gradient-based optimization might also converge to the
high-noise solution. It is thus important to repeat the optimization several
times for different initializations.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
rng = np.random.RandomState(0)
X = rng.uniform(0, 5, 20)[:, np.newaxis]
y = 0.5 * np.sin(3 * X[:, 0]) + rng.normal(0, 0.5, X.shape[0])
# First run
plt.figure(0)
kernel = 1.0 * RBF(length_scale=100.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0).fit(X, y)
X_ = np.linspace(0, 5, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - np.sqrt(np.diag(y_cov)),
y_mean + np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k')
plt.plot(X_, 0.5*np.sin(3*X_), 'r', lw=3, zorder=9)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10, edgecolors=(0, 0, 0))
plt.title("Initial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
% (kernel, gp.kernel_,
gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
# Second run
plt.figure(1)
kernel = 1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0).fit(X, y)
X_ = np.linspace(0, 5, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - np.sqrt(np.diag(y_cov)),
y_mean + np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k')
plt.plot(X_, 0.5*np.sin(3*X_), 'r', lw=3, zorder=9)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10, edgecolors=(0, 0, 0))
plt.title("Initial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
% (kernel, gp.kernel_,
gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
# Plot LML landscape
plt.figure(2)
theta0 = np.logspace(-2, 3, 49)
theta1 = np.logspace(-2, 0, 50)
Theta0, Theta1 = np.meshgrid(theta0, theta1)
LML = [[gp.log_marginal_likelihood(np.log([0.36, Theta0[i, j], Theta1[i, j]]))
for i in range(Theta0.shape[0])] for j in range(Theta0.shape[1])]
LML = np.array(LML).T
vmin, vmax = (-LML).min(), (-LML).max()
vmax = 50
level = np.around(np.logspace(np.log10(vmin), np.log10(vmax), 50), decimals=1)
plt.contour(Theta0, Theta1, -LML,
levels=level, norm=LogNorm(vmin=vmin, vmax=vmax))
plt.colorbar()
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Length-scale")
plt.ylabel("Noise-level")
plt.title("Log-marginal-likelihood")
plt.tight_layout()
plt.show()
| bsd-3-clause |
Aasmi/scikit-learn | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 408 | 8061 | import re
import inspect
import textwrap
import pydoc
from .docscrape import NumpyDocString
from .docscrape import FunctionDoc
from .docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
import sphinx # local import to avoid test dependency
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Methods',):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
OpringaoDoTurno/airflow | airflow/contrib/operators/hive_to_dynamodb.py | 15 | 3701 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from airflow.contrib.hooks.aws_dynamodb_hook import AwsDynamoDBHook
from airflow.hooks.hive_hooks import HiveServer2Hook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class HiveToDynamoDBTransferOperator(BaseOperator):
"""
Moves data from Hive to DynamoDB, note that for now the data is loaded
into memory before being pushed to DynamoDB, so this operator should
be used for smallish amount of data.
:param sql: SQL query to execute against the hive database
:type sql: str
:param table_name: target DynamoDB table
:type table_name: str
:param table_keys: partition key and sort key
:type table_keys: list
:param pre_process: implement pre-processing of source data
:type pre_process: function
:param pre_process_args: list of pre_process function arguments
:type pre_process_args: list
:param pre_process_kwargs: dict of pre_process function arguments
:type pre_process_kwargs: dict
:param region_name: aws region name (example: us-east-1)
:type region_name: str
:param schema: hive database schema
:type schema: str
:param hiveserver2_conn_id: source hive connection
:type hiveserver2_conn_id: str
:param aws_conn_id: aws connection
:type aws_conn_id: str
"""
template_fields = ('sql',)
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(
self,
sql,
table_name,
table_keys,
pre_process=None,
pre_process_args=None,
pre_process_kwargs=None,
region_name=None,
schema='default',
hiveserver2_conn_id='hiveserver2_default',
aws_conn_id='aws_default',
*args, **kwargs):
super(HiveToDynamoDBTransferOperator, self).__init__(*args, **kwargs)
self.sql = sql
self.table_name = table_name
self.table_keys = table_keys
self.pre_process = pre_process
self.pre_process_args = pre_process_args
self.pre_process_kwargs = pre_process_kwargs
self.region_name = region_name
self.schema = schema
self.hiveserver2_conn_id = hiveserver2_conn_id
self.aws_conn_id = aws_conn_id
def execute(self, context):
hive = HiveServer2Hook(hiveserver2_conn_id=self.hiveserver2_conn_id)
logging.info('Extracting data from Hive')
logging.info(self.sql)
data = hive.get_pandas_df(self.sql, schema=self.schema)
dynamodb = AwsDynamoDBHook(aws_conn_id=self.aws_conn_id,
table_name=self.table_name, table_keys=self.table_keys, region_name=self.region_name)
logging.info('Inserting rows into dynamodb')
if self.pre_process is None:
dynamodb.write_batch_data(
json.loads(data.to_json(orient='records')))
else:
dynamodb.write_batch_data(
self.pre_process(data=data, args=self.pre_process_args, kwargs=self.pre_process_kwargs))
logging.info('Done.')
| apache-2.0 |
ChinaQuants/bokeh | bokeh/_legacy_charts/tests/test_legacy_data_adapter.py | 6 | 3293 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh._legacy_charts import DataAdapter
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestDataAdapter(unittest.TestCase):
def setUp(self):
self._values = OrderedDict()
self._values['first'] = [2., 5., 3.]
self._values['second'] = [4., 1., 4.]
self._values['third'] = [6., 4., 3.]
def test_list(self):
values = list(self._values.values())
da = DataAdapter(values)
self.assertEqual(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['0', '1', '2'])
self.assertEqual(da.keys(), ['0', '1', '2'])
self.assertEqual(da.index, ['a', 'b', 'c'])
def test_array(self):
values = np.array(list(self._values.values()))
da = DataAdapter(values)
assert_array_equal(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['0', '1', '2'])
self.assertEqual(da.keys(), ['0', '1', '2'])
self.assertEqual(da.index, ['a', 'b', 'c'])
def test_pandas(self):
values = pd.DataFrame(self._values)
da = DataAdapter(values)
# TODO: THIS SHOULD BE FIXED..
#self.assertEqual(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['first', 'second', 'third'])
self.assertEqual(da.keys(), ['first', 'second', 'third'])
# We expect data adapter index to be the same as the underlying pandas
# object and not the default created by DataAdapter
self.assertEqual(da.index, [0, 1, 2])
def test_ordered_dict(self):
da = DataAdapter(self._values)
self.assertEqual(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['first', 'second', 'third'])
self.assertEqual(da.keys(), ['first', 'second', 'third'])
self.assertEqual(da.index, ['a', 'b', 'c'])
def test_blaze_data_no_fields(self):
import blaze
valuesdf = pd.DataFrame(self._values)
values = blaze.Data(valuesdf)
da = DataAdapter(values)
assert_array_equal(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['first', 'second', 'third'])
self.assertEqual(da.keys(), ['first', 'second', 'third'])
self.assertEqual(da.index, [0, 1, 2])
xs, _values = DataAdapter.get_index_and_data(values, None)
assert_array_equal([0,1,2], xs)
| bsd-3-clause |
colettace/wnd-charm | wndcharm/__init__.py | 1 | 4542 | #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Copyright (C) 2015 National Institutes of Health
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Written by: Christopher Coletta (github.com/colettace)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__version__ = "unknown"
from wndcharm import *
try:
from _version import __version__
except ImportError:
# We're running in a tree that doesn't have a _version.py, so we don't know what our version is.
pass
try:
from _git_hash import __git_hash__
__version__ = __version__+ '+' + __git_hash__
except:
pass
class _diagnostics( object ):
"""Report the versions of various Python packages WND-CHARM
depends on/is often used with"""
def __init__( self ):
self.module_list = ['wndcharm', 'numpy', 'scipy', 'matplotlib', 'sklearn', \
'skimage', 'IPython', 'tifffile', 'PIL', 'pandas']
def get_package_versions( self ):
"""Runs through self.module_list, tries to import,
then gets .__version__ or .VERSION"""
ret = []
import sys
ret.append( ('python', sys.version ) )
for name in self.module_list:
m = None
ver = None
try: # 1. can we import it?
m = __import__( name )
try: #2. does it have a __version__?
ver = m.__version__
except AttributeError:
try: # 3. Is it PIL which has a .VERSION instead?
ver = m.VERSION
except AttributeError:
ver = 'version not available'
except ImportError:
pass
ret.append( ( name, ver ) )
return ret
def __call__( self ):
return self.get_package_versions()
def __str__( self ):
outstr = "WND-CHARM Python API Diagnostics\n"
outstr += "================================\n"
from sys import executable
outstr += "Executable:" + '\n\t' + str( executable ) + '\n'
from os import getenv
outstr += 'PYTHONPATH environment variable:\n\t' + \
getenv( 'PYTHONPATH', '<unset>') + '\n'
import wndcharm
outstr += 'WND-CHARM library path:\n\t' + wndcharm.__file__ + '\n'
outstr += 'Package versions:\n'
retval = self.get_package_versions()
for name, ver in retval:
outstr += '\t' + str( name ).ljust(10) + '\t' + str( ver ).replace( '\n', ' ') + '\n'
return outstr
diagnostics = _diagnostics()
# The numbers *must* be consistent with what's defined in wndchrm C-codebase.
feature_vector_major_version = 3
# Feature vector lengths in current version
# #define NUM_LC_FEATURES 4059
# #define NUM_L_FEATURES 2919
# #define NUM_C_FEATURES 2199
# #define NUM_DEF_FEATURES 1059
# These are definitions for Version 2 features.
feature_vector_minor_version_from_num_features = {
1059:1,
2919:2,
2199:3,
4059:4
}
# // original lengths prior to Version 2:
# // no Gini coefficient, no inverse otsu features
# // #define NUM_LC_FEATURES 4008
# // #define NUM_L_FEATURES 2873
# // #define NUM_C_FEATURES 2160
# // #define NUM_DEF_FEATURES 1025
feature_vector_minor_version_from_num_features_v1 = {
1025:1,
2873:2,
2160:3,
4008:4
}
feature_vector_minor_version_from_vector_type = {
'short':1,
'long':2,
'short_color':3,
'long_color':4
}
feature_vector_num_features_from_vector_type = {
'short':1059,
'long':2919,
'short_color':2199,
'long_color':4059
}
| lgpl-2.1 |
adamgreenhall/scikit-learn | sklearn/covariance/robust_covariance.py | 198 | 29735 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
ZenDevelopmentSystems/scikit-learn | examples/datasets/plot_random_multilabel_dataset.py | 278 | 3402 | """
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the `datasets.make_multilabel_classification` dataset
generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
| bsd-3-clause |
ksirg/pyKMLib | examples/gpu_kernels.py | 1 | 12923 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 30 13:18:51 2013
@author: Krszysztof Sopyła
@email: [email protected]
@githubuser: ksirg
@license: MIT
"""
"""
It demostrates the usage of pycuda.
"""
import numpy as np
import scipy.sparse as sp
from sklearn import datasets
import sys
sys.path.append("../pyKMLib/")
import SparseFormats as spf
import Kernels as ker
#load and reorganize the dataset
#dsName = 'Data/glass.scale_binary'
#dsName ='Data/w8a'
#dsName = 'Data/glass.scale.txt'
dsName = 'Data/mnist.scale'
#X, Y = datasets.load_svmlight_file('Data/toy_2d_20_ones.train',dtype=np.float32)
#X, Y = datasets.load_svmlight_file('Data/toy_2d_20_order.train',dtype=np.float32)
print "Dataset: ",dsName
X, Y = datasets.load_svmlight_file(dsName,dtype=np.float32)
Y=Y.astype(np.float32)
#used for showing some elements in results array
skip= 30
##reorder the dataset and compute class statistics
#get all different class labels np. 0,1,4,7 - not all might occur
cls, idx_cls = np.unique(Y, return_inverse=True)
#number of different class
nr_cls = cls.shape[0]
#create new continous class numbers from 0 til nr_cls
new_classes = np.arange(0,nr_cls,dtype=np.int32)
#remap class labels, change from orginal to new_classes
y_map = new_classes[idx_cls]
#sort by class label,
order =np.argsort(a=y_map,kind='mergesort')
#reorder dataset, group class together
x = X.todense()
x = x[order,:]
X = sp.csr_matrix(x)
Y = Y[order]
y_map=y_map[order]
### y mapped to binary
#which class should be mapped
bin_cls = np.array([0,1],dtype=np.int32)
#bin_map = np.zeros(new_classes.shape)
y_map_bin = np.zeros_like(y_map,dtype=np.float32)
y_map_bin[y_map==bin_cls[0]] =-1
y_map_bin[y_map==bin_cls[1]] = 1
#first class is mapped to -1, second to 1
#bin_map[bin_cls]=np.array([-1,1])
#for i,val in enumerate(new_classes):
# y_map_bin[y_map==i]=bin_map[i]
count_cls=np.bincount(y_map).astype(np.int32)
start_cls = count_cls.cumsum()
start_cls=np.insert(start_cls,0,0).astype(np.int32)
i=start_cls[ bin_cls[0] ]+1
j=start_cls[ bin_cls[1] ]+1
print i,j
#---------------------
num_el,dim = X.shape
gamma = 0.5
threadsPerRow = 1
prefetch=2
rbf = ker.RBF()
rbf.gamma=gamma
rbf.init(X,Y)
vecI = X[i,:].toarray()
vecJ = X[j,:].toarray()
import time
#t0=time.clock()
t0=time.time()
#ki =Y[i]*Y* rbf.K_vec(vecI).flatten()
#kj =Y[j]*Y*rbf.K_vec(vecJ).flatten()
ki =y_map_bin[i]*y_map_bin* rbf.K_vec(vecI).flatten()
kj =y_map_bin[j]*y_map_bin*rbf.K_vec(vecJ).flatten()
#t1=time.clock()
t1=time.time()
print 'CPU RBF takes',t1-t0, 's'
kij= np.array( [ki,kj]).flatten()
print 'Total sum:',kij.sum()
print kij[0:1000:skip]
import pycuda.driver as cuda
import pycuda.tools
import pycuda.autoinit
from pycuda.compiler import SourceModule
##----------------------------------------------
# Ellpakc gpu kernel
v,c,r=spf.csr2ellpack(X,align=prefetch)
sd=rbf.Diag
self_dot = rbf.Xsquare
results = np.zeros(2*num_el,dtype=np.float32)
kernel_file = "ellpackKernel.cu"
with open (kernel_file,"r") as CudaFile:
data = CudaFile.read();
#copy memory to device
g_val = cuda.to_device(v)
g_col = cuda.to_device(c)
g_r = cuda.to_device(r)
g_self = cuda.to_device(self_dot)
g_y = cuda.to_device(y_map_bin)
g_out = cuda.to_device(results)
#compile module
#module = SourceModule(data,cache_dir='./nvcc_cache',keep=True,no_extern_c=True)
module = SourceModule(data,keep=True,no_extern_c=True,options=["--ptxas-options=-v"])
#get module function
func = module.get_function('rbfEllpackILPcol2')
#get module texture
vecI_tex=module.get_texref('VecI_TexRef')
vecJ_tex=module.get_texref('VecJ_TexRef')
#copy data to tex ref
g_vecI = cuda.to_device(vecI)
vecI_tex.set_address(g_vecI,vecI.nbytes)
g_vecJ = cuda.to_device(vecJ)
vecJ_tex.set_address(g_vecJ,vecJ.nbytes)
#texture list, necessary for pycuda launch function
texList=[vecI_tex,vecJ_tex]
tpb=128#block size, power of 2
#grid size, number of blocks
bpg =int( np.ceil( (threadsPerRow*num_el+0.0)/tpb ))
g_num_el = np.int32(num_el)
g_i = np.int32(i)
g_j = np.int32(j)
g_gamma = np.float32(gamma)
start_event = cuda.Event()
stop_event = cuda.Event()
start_event.record()
func(g_val,g_col,g_r,g_self,g_y,g_out,g_num_el,g_i,g_j,g_gamma,block=(tpb,1,1),grid=(bpg,1),texrefs=texList)
stop_event.record()
stop_event.synchronize()
cuTime=stop_event.time_since(start_event)
cuda.memcpy_dtoh(results,g_out)
resultsEll = np.copy(results)
print "\nEllpack time ",cuTime*1e-3
print 'Total sum:',resultsEll.sum()
print "Error to CPU:",np.square(resultsEll-kij).sum()
print resultsEll[0:1000:skip]
#print results
##------------------------------------------
# SERTILP gpu kernel
sliceSize=64
threadsPerRow=2
prefetch=2
minAlign=64 #8
v,c,r,ss=spf.csr2sertilp(X,
threadsPerRow=threadsPerRow,
prefetch=prefetch,
sliceSize=sliceSize,
minAlign=minAlign)
sd=rbf.Diag
self_dot = rbf.Xsquare
results = np.zeros(2*num_el,dtype=np.float32)
kernel_file = "sertilpMulti2Col.cu"
with open (kernel_file,"r") as CudaFile:
data = CudaFile.read();
#compile module
#module = SourceModule(data,cache_dir='./nvcc_cache',keep=True,no_extern_c=True)
module = SourceModule(data,keep=True,no_extern_c=True,options=["--ptxas-options=-v"])
#get module function
func = module.get_function('rbfSERTILP2multi')
#class align to sliceSize
cls_align=sliceSize
cls1_n = count_cls[bin_cls[0]]
align_cls1_n = cls1_n+(cls_align-cls1_n%cls_align)%cls_align
cls2_n = count_cls[bin_cls[1]]
align_cls2_n = cls2_n+(cls_align-cls2_n%cls_align)%cls_align
#block size, power of 2
tpb=sliceSize*threadsPerRow
#grid size, number of blocks
bpg =np.ceil(((align_cls1_n+align_cls2_n)*threadsPerRow+0.0)/(tpb))
bpg=int(bpg)
#get module texture
vecI_tex=module.get_texref('VecI_TexRef')
vecJ_tex=module.get_texref('VecJ_TexRef')
#copy data to tex ref
g_vecI = cuda.to_device(vecI)
vecI_tex.set_address(g_vecI,vecI.nbytes)
g_vecJ = cuda.to_device(vecJ)
vecJ_tex.set_address(g_vecJ,vecJ.nbytes)
texList=[vecI_tex,vecJ_tex]
#copy memory to device
g_val = cuda.to_device(v)
g_col = cuda.to_device(c)
g_r = cuda.to_device(r)
g_slice = cuda.to_device(ss)
g_self = cuda.to_device(self_dot)
g_y = cuda.to_device(y_map_bin)
g_out = cuda.to_device(results)
g_num_el = np.int32(num_el)
align = np.ceil( 1.0*sliceSize*threadsPerRow/minAlign)*minAlign
g_align = np.int32(align)
g_i = np.int32(i)
g_j = np.int32(j)
g_i_ds= np.int32(i)
g_j_ds= np.int32(j)
g_cls1N_aligned = np.int32(align_cls1_n)
#gamma, copy to constant memory
(g_gamma,gsize)=module.get_global('GAMMA')
cuda.memcpy_htod(g_gamma, np.float32(gamma) )
g_cls_start = cuda.to_device(start_cls)
g_cls_count = cuda.to_device(count_cls)
g_cls = cuda.to_device(bin_cls)
#start_event = cuda.Event()
#stop_event = cuda.Event()
start_event.record()
func(g_val,
g_col,
g_r,
g_slice,
g_self,
g_y,
g_out,
g_num_el,
g_align,
g_i,
g_j,
g_i_ds,
g_j_ds,
g_cls1N_aligned,
g_cls_start,
g_cls_count,
g_cls,
block=(tpb,1,1),grid=(bpg,1),texrefs=texList)
stop_event.record()
stop_event.synchronize()
cuTime=stop_event.time_since(start_event)
cuda.memcpy_dtoh(results,g_out)
resultsSEll = np.copy(results)
print "\nSERTILP time ",cuTime*1e-3
print 'Total sum:',resultsSEll.sum()
print "Error to CPU:",np.square(resultsSEll-kij).sum()
print "Error to ELlpack:",np.square(resultsSEll-resultsEll).sum()
print resultsSEll[0:1000:skip]
##------------------------------------------
# SERTILP class aligna gpu kernel
sliceSize=64
threadsPerRow=2
prefetch=2
minAlign=64 #8
v,c,r,ss,cls_slice=spf.csr2sertilp_class(X,y_map,
threadsPerRow=threadsPerRow,
prefetch=prefetch,
sliceSize=sliceSize,
minAlign=minAlign)
sd=rbf.Diag
self_dot = rbf.Xsquare
results = np.zeros(2*num_el,dtype=np.float32)
kernel_file = "sertilpMulti2Col.cu"
with open (kernel_file,"r") as CudaFile:
data = CudaFile.read();
#compile module
#module = SourceModule(data,cache_dir='./nvcc_cache',keep=True,no_extern_c=True)
module = SourceModule(data,keep=True,no_extern_c=True,options=["--ptxas-options=-v"])
#get module function
func = module.get_function('rbfSERTILP2multi_class')
#class align to sliceSize
cls_align=sliceSize
cls1_n = count_cls[bin_cls[0]]
align_cls1_n = cls1_n+(cls_align-cls1_n%cls_align)%cls_align
cls2_n = count_cls[bin_cls[1]]
align_cls2_n = cls2_n+(cls_align-cls2_n%cls_align)%cls_align
#block size, power of 2
tpb=sliceSize*threadsPerRow
#grid size, number of blocks
bpg =np.ceil(((align_cls1_n+align_cls2_n)*threadsPerRow+0.0)/(tpb))
bpg=int(bpg)
#get module texture
vecI_tex=module.get_texref('VecI_TexRef')
vecJ_tex=module.get_texref('VecJ_TexRef')
#copy data to tex ref
g_vecI = cuda.to_device(vecI)
vecI_tex.set_address(g_vecI,vecI.nbytes)
g_vecJ = cuda.to_device(vecJ)
vecJ_tex.set_address(g_vecJ,vecJ.nbytes)
texList=[vecI_tex,vecJ_tex]
#copy memory to device
g_val = cuda.to_device(v)
g_col = cuda.to_device(c)
g_r = cuda.to_device(r)
g_slice = cuda.to_device(ss)
g_cls_slice = cuda.to_device(cls_slice)
g_self = cuda.to_device(self_dot)
g_y = cuda.to_device(y_map_bin)
g_out = cuda.to_device(results)
g_num_el = np.int32(num_el)
align = np.ceil( 1.0*sliceSize*threadsPerRow/minAlign)*minAlign
g_align = np.int32(align)
g_i = np.int32(i)
g_j = np.int32(j)
g_i_ds= np.int32(i)
g_j_ds= np.int32(j)
g_cls1N_aligned = np.int32(align_cls1_n)
#gamma, copy to constant memory
(g_gamma,gsize)=module.get_global('GAMMA')
cuda.memcpy_htod(g_gamma, np.float32(gamma) )
g_cls_start = cuda.to_device(start_cls)
g_cls_count = cuda.to_device(count_cls)
g_cls = cuda.to_device(bin_cls)
#start_event = cuda.Event()
#stop_event = cuda.Event()
start_event.record()
func(g_val,
g_col,
g_r,
g_slice,
g_self,
g_y,
g_out,
g_num_el,
g_align,
g_i,
g_j,
g_i_ds,
g_j_ds,
g_cls1N_aligned,
g_cls_start,
g_cls_count,
g_cls,
g_cls_slice,
block=(tpb,1,1),grid=(bpg,1),texrefs=texList)
stop_event.record()
stop_event.synchronize()
cuTime=stop_event.time_since(start_event)
cuda.memcpy_dtoh(results,g_out)
resultsSEllC = np.copy(results)
print "\nSERTILP class time ",cuTime*1e-3
print 'Total sum:',resultsSEllC.sum()
print "Error to CPU:",np.square(resultsSEllC-kij).sum()
print "Error to ELlpack:",np.square(resultsSEllC-resultsEll).sum()
print resultsSEllC[0:1000:skip]
| mit |
ChanderG/scikit-learn | sklearn/kernel_ridge.py | 155 | 6545 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
nlandolfi/test-infra-1 | mungegithub/issue-labeler/simple_app.py | 4 | 4784 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
from logging.handlers import RotatingFileHandler
import numpy as np
from flask import Flask, request
from sklearn.feature_extraction import FeatureHasher
from sklearn.externals import joblib
from sklearn.linear_model import SGDClassifier
from nltk.tokenize import RegexpTokenizer
from nltk.stem.porter import PorterStemmer
app = Flask(__name__)
#Parameters
team_fn = './models/trained_teams_model.pkl'
component_fn = './models/trained_components_model.pkl'
logFile = '/tmp/issue-labeler.log'
logSize = 1024*1024*100
numFeatures = 262144
myLoss = 'hinge'
myAlpha = .1
myPenalty = 'l2'
myHasher = FeatureHasher(input_type='string', n_features=numFeatures, non_negative=True)
myStemmer = PorterStemmer()
tokenizer = RegexpTokenizer(r'\w+')
stopwords = []
try:
if not stopwords:
stop_fn = './stopwords.txt'
with open(stop_fn, 'r') as fp:
stopwords = list([word.strip() for word in fp])
except: # pylint:disable=bare-except
#don't remove any stopwords
stopwords = []
@app.errorhandler(500)
def internal_error(exception):
return str(exception), 500
@app.route("/", methods=['POST'])
def get_labels():
"""
The request should contain 2 form-urlencoded parameters
1) title : title of the issue
2) body: body of the issue
It returns a team/<label> and a component/<label>
"""
title = request.form.get('title', '')
body = request.form.get('body', '')
tokens = tokenize_stem_stop(" ".join([title, body]))
team_mod = joblib.load(team_fn)
comp_mod = joblib.load(component_fn)
vec = myHasher.transform([tokens])
tlabel = team_mod.predict(vec)[0]
clabel = comp_mod.predict(vec)[0]
return ",".join([tlabel, clabel])
def tokenize_stem_stop(inputString):
inputString = inputString.encode('utf-8')
curTitleBody = tokenizer.tokenize(inputString.decode('utf-8').lower())
return map(myStemmer.stem, filter(lambda x: x not in stopwords, curTitleBody))
@app.route("/update_models", methods=['PUT'])
def update_model():
"""
data should contain three fields
titles: list of titles
bodies: list of bodies
labels: list of list of labels
"""
data = request.json
titles = data.get('titles')
bodies = data.get('bodies')
labels = data.get('labels')
tTokens = []
cTokens = []
team_labels = []
component_labels = []
for (title, body, label_list) in zip(titles, bodies, labels):
tLabel = filter(lambda x: x.startswith('team'), label_list)
cLabel = filter(lambda x: x.startswith('component'), label_list)
tokens = tokenize_stem_stop(" ".join([title, body]))
if tLabel:
team_labels += tLabel
tTokens += [tokens]
if cLabel:
component_labels += cLabel
cTokens += [tokens]
tVec = myHasher.transform(tTokens)
cVec = myHasher.transform(cTokens)
if team_labels:
if os.path.isfile(team_fn):
team_model = joblib.load(team_fn)
team_model.partial_fit(tVec, np.array(team_labels))
else:
#no team model stored so build a new one
team_model = SGDClassifier(loss=myLoss, penalty=myPenalty, alpha=myAlpha)
team_model.fit(tVec, np.array(team_labels))
if component_labels:
if os.path.isfile(component_fn):
component_model = joblib.load(component_fn)
component_model.partial_fit(cVec, np.array(component_labels))
else:
#no comp model stored so build a new one
component_model = SGDClassifier(loss=myLoss, penalty=myPenalty, alpha=myAlpha)
component_model.fit(cVec, np.array(component_labels))
joblib.dump(team_model, team_fn)
joblib.dump(component_model, component_fn)
return ""
def configure_logger():
FORMAT = '%(asctime)-20s %(levelname)-10s %(message)s'
file_handler = RotatingFileHandler(logFile, maxBytes=logSize, backupCount=3)
formatter = logging.Formatter(FORMAT)
file_handler.setFormatter(formatter)
app.logger.addHandler(file_handler)
if __name__ == "__main__":
configure_logger()
app.run(host="0.0.0.0")
| apache-2.0 |
costypetrisor/scikit-learn | sklearn/preprocessing/tests/test_data.py | 3 | 35967 | import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.utils.validation import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
| bsd-3-clause |
mattgiguere/scikit-learn | sklearn/metrics/setup.py | 299 | 1024 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
untom/scikit-learn | sklearn/learning_curve.py | 110 | 13467 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
yipenggao/moose | modules/porous_flow/doc/tests/radialinjection.py | 5 | 4190 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
#
# The two phase radial injection problem has a similarity solution (r^2/t)
#
# Read MOOSE simulation data for constant time (tdata) and constant
# radial distance (rdata)
tdata = np.genfromtxt('../../tests/dirackernels/theis3_line_0016.csv', delimiter = ',', names = True, dtype = float)
rdata = np.genfromtxt('../../tests/dirackernels/theis3.csv', delimiter = ',', names = True, dtype = float)
# Distance where data is sampled as a function of time
r = 4
# Time where data is sampled along the spatial dimension
t = 2e4
fig, axes = plt.subplots(1, 2, figsize = (15, 4))
# Water pressure vs similarity solution
axes[0].plot(tdata['x']**2 / t, tdata['ppwater'] * 1e-6, label = 'Fixed $t$')
axes[0].plot(r**2 / rdata['time'], rdata['ppwater'] * 1e-6, 'o', label = 'Fixed $r$')
axes[0].set_xscale('log')
axes[0].set_xlim([5e-4, 5e1])
axes[0].set_xlabel('$\zeta = r^2/t$')
axes[0].set_ylabel('Liquid pressure (MPa)')
axes[0].legend()
# Gas saturation vs similarity solution
axes[1].plot(tdata['x']**2 / t, tdata['sgas'], label = 'Fixed $t$')
axes[1].plot(r**2 / rdata['time'], rdata['sgas'], 'o', label = 'Fixed $r$')
axes[1].set_xscale('log')
axes[1].set_xlim([5e-4, 5e1])
axes[1].set_ylim([-0.1, 1.1])
axes[1].set_xlabel('$\zeta = r^2/t$')
axes[1].set_ylabel('Gas saturation (-)')
axes[1].legend()
plt.tight_layout()
plt.savefig("theis_similarity_fig.pdf")
#
# The similarity solution (r^2/t) is applicable even when dissolution is included
#
# Read MOOSE simulation data for constant time (tdata) and constant
# radial distance (rdata) using the water-ncg fluid state
tdata = np.genfromtxt('../../tests/fluidstate/theis_csvout_line_0028.csv', delimiter = ',', names = True, dtype = float)
rdata = np.genfromtxt('../../tests/fluidstate/theis_csvout.csv', delimiter = ',', names = True, dtype = float)
# Distance where data is sampled as a function of time
r = 4
# Time where data is sampled along the spatial dimension
t = 1e5
fig, axes = plt.subplots(1, 2, figsize = (15, 4))
# Gas pressure vs similarity solution
axes[0].plot(tdata['x']**2 / t, tdata['pgas'] * 1e-6, label = 'Fixed $t$')
axes[0].plot(r**2 / rdata['time'], rdata['pgas'] * 1e-6, 'o', label = 'Fixed $r$')
axes[0].set_xscale('log')
axes[0].set_xlim([1e-4, 5e1])
axes[0].set_xlabel('$\zeta = r^2/t$')
axes[0].set_ylabel('Gas pressure (MPa)')
axes[0].legend()
# Total mass fraction vs similarity solution
axes[1].plot(tdata['x']**2 / t, tdata['zi'], label = 'Fixed $t$')
axes[1].plot(r**2 / rdata['time'], rdata['zi'], 'o', label = 'Fixed $r$')
axes[1].set_xscale('log')
axes[1].set_xlim([1e-4, 5e1])
axes[1].set_ylim([-0.1, 1.1])
axes[1].set_xlabel('$\zeta = r^2/t$')
axes[1].set_ylabel('Total mass fraction (-)')
axes[1].legend()
plt.tight_layout()
plt.savefig("theis_similarity_waterncg_fig.pdf")
#
# Read MOOSE simulation data for constant time (tdata) and constant
# radial distance (rdata) using the brine-co2 fluid state
tdata = np.genfromtxt('../../tests/fluidstate/theis_brineco2_csvout_line_0028.csv', delimiter = ',', names = True, dtype = float)
rdata = np.genfromtxt('../../tests/fluidstate/theis_brineco2_csvout.csv', delimiter = ',', names = True, dtype = float)
# Distance where data is sampled as a function of time
r = 4
# Time where data is sampled along the spatial dimension
t = 1e5
fig, axes = plt.subplots(1, 2, figsize = (15, 4))
# Gas pressure vs similarity solution
axes[0].plot(tdata['x']**2 / t, tdata['pgas'] * 1e-6, label = 'Fixed $t$')
axes[0].plot(r**2 / rdata['time'], rdata['pgas'] * 1e-6, 'o', label = 'Fixed $r$')
axes[0].set_xscale('log')
axes[0].set_xlim([1e-4, 5e1])
axes[0].set_xlabel('$\zeta = r^2/t$')
axes[0].set_ylabel('Gas pressure (MPa)')
axes[0].legend()
# Total mass fraction vs similarity solution
axes[1].plot(tdata['x']**2 / t, tdata['zi'], label = 'Fixed $t$')
axes[1].plot(r**2 / rdata['time'], rdata['zi'], 'o', label = 'Fixed $r$')
axes[1].set_xscale('log')
axes[1].set_xlim([1e-4, 5e1])
axes[1].set_ylim([-0.1, 1.1])
axes[1].set_xlabel('$\zeta = r^2/t$')
axes[1].set_ylabel('Total mass fraction (-)')
axes[1].legend()
plt.tight_layout()
plt.savefig("theis_similarity_brineco2_fig.pdf")
| lgpl-2.1 |
chengjunjian/tushare | setup.py | 21 | 2592 | from setuptools import setup, find_packages
import codecs
import os
import tushare
def read(fname):
return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()
long_desc = """
TuShare
===============
.. image:: https://api.travis-ci.org/waditu/tushare.png?branch=master
:target: https://travis-ci.org/waditu/tushare
.. image:: https://badge.fury.io/py/tushare.png
:target: http://badge.fury.io/py/tushare
* easy to use as most of the data returned are pandas DataFrame objects
* can be easily saved as csv, excel or json files
* can be inserted into MySQL or Mongodb
Target Users
--------------
* financial market analyst of China
* learners of financial data analysis with pandas/NumPy
* people who are interested in China financial data
Installation
--------------
pip install tushare
Upgrade
---------------
pip install tushare --upgrade
Quick Start
--------------
::
import tushare as ts
ts.get_hist_data('600848')
return::
open high close low volume p_change ma5 \
date
2012-01-11 6.880 7.380 7.060 6.880 14129.96 2.62 7.060
2012-01-12 7.050 7.100 6.980 6.900 7895.19 -1.13 7.020
2012-01-13 6.950 7.000 6.700 6.690 6611.87 -4.01 6.913
2012-01-16 6.680 6.750 6.510 6.480 2941.63 -2.84 6.813
2012-01-17 6.660 6.880 6.860 6.460 8642.57 5.38 6.822
2012-01-18 7.000 7.300 6.890 6.880 13075.40 0.44 6.788
2012-01-19 6.690 6.950 6.890 6.680 6117.32 0.00 6.770
2012-01-20 6.870 7.080 7.010 6.870 6813.09 1.74 6.832
"""
setup(
name='tushare',
version=tushare.__version__,
description='A utility for crawling historical and Real-time Quotes data of China stocks',
# long_description=read("READM.rst"),
long_description = long_desc,
author='Jimmy Liu',
author_email='[email protected]',
license='BSD',
url='http://tushare.org',
keywords='china stock data',
classifiers=['Development Status :: 4 - Beta',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: BSD License'],
packages=['tushare','tushare.stock','tushare.data','tushare.util'],
package_data={'': ['*.csv']},
) | bsd-3-clause |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/IPython/core/display.py | 6 | 34087 | # -*- coding: utf-8 -*-
"""Top-level display functions for displaying object in different formats."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
try:
from base64 import encodebytes as base64_encode
except ImportError:
from base64 import encodestring as base64_encode
import json
import mimetypes
import os
import struct
import sys
import warnings
from IPython.utils.py3compat import (string_types, cast_bytes_py2, cast_unicode,
unicode_type)
from IPython.testing.skipdoctest import skip_doctest
__all__ = ['display', 'display_pretty', 'display_html', 'display_markdown',
'display_svg', 'display_png', 'display_jpeg', 'display_latex', 'display_json',
'display_javascript', 'display_pdf', 'DisplayObject', 'TextDisplayObject',
'Pretty', 'HTML', 'Markdown', 'Math', 'Latex', 'SVG', 'JSON', 'Javascript',
'Image', 'clear_output', 'set_matplotlib_formats', 'set_matplotlib_close',
'publish_display_data']
#-----------------------------------------------------------------------------
# utility functions
#-----------------------------------------------------------------------------
def _safe_exists(path):
"""Check path, but don't let exceptions raise"""
try:
return os.path.exists(path)
except Exception:
return False
def _merge(d1, d2):
"""Like update, but merges sub-dicts instead of clobbering at the top level.
Updates d1 in-place
"""
if not isinstance(d2, dict) or not isinstance(d1, dict):
return d2
for key, value in d2.items():
d1[key] = _merge(d1.get(key), value)
return d1
def _display_mimetype(mimetype, objs, raw=False, metadata=None):
"""internal implementation of all display_foo methods
Parameters
----------
mimetype : str
The mimetype to be published (e.g. 'image/png')
objs : tuple of objects
The Python objects to display, or if raw=True raw text data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
if metadata:
metadata = {mimetype: metadata}
if raw:
# turn list of pngdata into list of { 'image/png': pngdata }
objs = [ {mimetype: obj} for obj in objs ]
display(*objs, raw=raw, metadata=metadata, include=[mimetype])
#-----------------------------------------------------------------------------
# Main functions
#-----------------------------------------------------------------------------
def publish_display_data(data, metadata=None, source=None):
"""Publish data and metadata to all frontends.
See the ``display_data`` message in the messaging documentation for
more details about this message type.
The following MIME types are currently implemented:
* text/plain
* text/html
* text/markdown
* text/latex
* application/json
* application/javascript
* image/png
* image/jpeg
* image/svg+xml
Parameters
----------
data : dict
A dictionary having keys that are valid MIME types (like
'text/plain' or 'image/svg+xml') and values that are the data for
that MIME type. The data itself must be a JSON'able data
structure. Minimally all data should have the 'text/plain' data,
which can be displayed by all frontends. If more than the plain
text is given, it is up to the frontend to decide which
representation to use.
metadata : dict
A dictionary for metadata related to the data. This can contain
arbitrary key, value pairs that frontends can use to interpret
the data. mime-type keys matching those in data can be used
to specify metadata about particular representations.
source : str, deprecated
Unused.
"""
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.instance().display_pub.publish(
data=data,
metadata=metadata,
)
def display(*objs, **kwargs):
"""Display a Python object in all frontends.
By default all representations will be computed and sent to the frontends.
Frontends can decide which representation is used and how.
Parameters
----------
objs : tuple of objects
The Python objects to display.
raw : bool, optional
Are the objects to be displayed already mimetype-keyed dicts of raw display data,
or Python objects that need to be formatted before display? [default: False]
include : list or tuple, optional
A list of format type strings (MIME types) to include in the
format data dict. If this is set *only* the format types included
in this list will be computed.
exclude : list or tuple, optional
A list of format type strings (MIME types) to exclude in the format
data dict. If this is set all format types will be computed,
except for those included in this argument.
metadata : dict, optional
A dictionary of metadata to associate with the output.
mime-type keys in this dictionary will be associated with the individual
representation formats, if they exist.
"""
raw = kwargs.get('raw', False)
include = kwargs.get('include')
exclude = kwargs.get('exclude')
metadata = kwargs.get('metadata')
from IPython.core.interactiveshell import InteractiveShell
if not raw:
format = InteractiveShell.instance().display_formatter.format
for obj in objs:
if raw:
publish_display_data(data=obj, metadata=metadata)
else:
format_dict, md_dict = format(obj, include=include, exclude=exclude)
if not format_dict:
# nothing to display (e.g. _ipython_display_ took over)
continue
if metadata:
# kwarg-specified metadata gets precedence
_merge(md_dict, metadata)
publish_display_data(data=format_dict, metadata=md_dict)
def display_pretty(*objs, **kwargs):
"""Display the pretty (default) representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw text data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/plain', objs, **kwargs)
def display_html(*objs, **kwargs):
"""Display the HTML representation of an object.
Note: If raw=False and the object does not have a HTML
representation, no HTML will be shown.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw HTML data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/html', objs, **kwargs)
def display_markdown(*objs, **kwargs):
"""Displays the Markdown representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw markdown data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/markdown', objs, **kwargs)
def display_svg(*objs, **kwargs):
"""Display the SVG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw svg data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/svg+xml', objs, **kwargs)
def display_png(*objs, **kwargs):
"""Display the PNG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw png data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/png', objs, **kwargs)
def display_jpeg(*objs, **kwargs):
"""Display the JPEG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw JPEG data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/jpeg', objs, **kwargs)
def display_latex(*objs, **kwargs):
"""Display the LaTeX representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw latex data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/latex', objs, **kwargs)
def display_json(*objs, **kwargs):
"""Display the JSON representation of an object.
Note that not many frontends support displaying JSON.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw json data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/json', objs, **kwargs)
def display_javascript(*objs, **kwargs):
"""Display the Javascript representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw javascript data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/javascript', objs, **kwargs)
def display_pdf(*objs, **kwargs):
"""Display the PDF representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw javascript data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/pdf', objs, **kwargs)
#-----------------------------------------------------------------------------
# Smart classes
#-----------------------------------------------------------------------------
class DisplayObject(object):
"""An object that wraps data to be displayed."""
_read_flags = 'r'
_show_mem_addr = False
def __init__(self, data=None, url=None, filename=None):
"""Create a display object given raw data.
When this object is returned by an expression or passed to the
display function, it will result in the data being displayed
in the frontend. The MIME type of the data should match the
subclasses used, so the Png subclass should be used for 'image/png'
data. If the data is a URL, the data will first be downloaded
and then displayed. If
Parameters
----------
data : unicode, str or bytes
The raw data or a URL or file to load the data from
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
"""
if data is not None and isinstance(data, string_types):
if data.startswith('http') and url is None:
url = data
filename = None
data = None
elif _safe_exists(data) and filename is None:
url = None
filename = data
data = None
self.data = data
self.url = url
self.filename = None if filename is None else unicode_type(filename)
self.reload()
self._check_data()
def __repr__(self):
if not self._show_mem_addr:
cls = self.__class__
r = "<%s.%s object>" % (cls.__module__, cls.__name__)
else:
r = super(DisplayObject, self).__repr__()
return r
def _check_data(self):
"""Override in subclasses if there's something to check."""
pass
def reload(self):
"""Reload the raw data from file or URL."""
if self.filename is not None:
with open(self.filename, self._read_flags) as f:
self.data = f.read()
elif self.url is not None:
try:
try:
from urllib.request import urlopen # Py3
except ImportError:
from urllib2 import urlopen
response = urlopen(self.url)
self.data = response.read()
# extract encoding from header, if there is one:
encoding = None
for sub in response.headers['content-type'].split(';'):
sub = sub.strip()
if sub.startswith('charset'):
encoding = sub.split('=')[-1].strip()
break
# decode data, if an encoding was specified
if encoding:
self.data = self.data.decode(encoding, 'replace')
except:
self.data = None
class TextDisplayObject(DisplayObject):
"""Validate that display data is text"""
def _check_data(self):
if self.data is not None and not isinstance(self.data, string_types):
raise TypeError("%s expects text, not %r" % (self.__class__.__name__, self.data))
class Pretty(TextDisplayObject):
def _repr_pretty_(self):
return self.data
class HTML(TextDisplayObject):
def _repr_html_(self):
return self.data
def __html__(self):
"""
This method exists to inform other HTML-using modules (e.g. Markupsafe,
htmltag, etc) that this object is HTML and does not need things like
special characters (<>&) escaped.
"""
return self._repr_html_()
class Markdown(TextDisplayObject):
def _repr_markdown_(self):
return self.data
class Math(TextDisplayObject):
def _repr_latex_(self):
s = self.data.strip('$')
return "$$%s$$" % s
class Latex(TextDisplayObject):
def _repr_latex_(self):
return self.data
class SVG(DisplayObject):
_read_flags = 'rb'
# wrap data in a property, which extracts the <svg> tag, discarding
# document headers
_data = None
@property
def data(self):
return self._data
@data.setter
def data(self, svg):
if svg is None:
self._data = None
return
# parse into dom object
from xml.dom import minidom
svg = cast_bytes_py2(svg)
x = minidom.parseString(svg)
# get svg tag (should be 1)
found_svg = x.getElementsByTagName('svg')
if found_svg:
svg = found_svg[0].toxml()
else:
# fallback on the input, trust the user
# but this is probably an error.
pass
svg = cast_unicode(svg)
self._data = svg
def _repr_svg_(self):
return self.data
class JSON(DisplayObject):
"""JSON expects a JSON-able dict or list
not an already-serialized JSON string.
Scalar types (None, number, string) are not allowed, only dict or list containers.
"""
# wrap data in a property, which warns about passing already-serialized JSON
_data = None
def _check_data(self):
if self.data is not None and not isinstance(self.data, (dict, list)):
raise TypeError("%s expects JSONable dict or list, not %r" % (self.__class__.__name__, self.data))
@property
def data(self):
return self._data
@data.setter
def data(self, data):
if isinstance(data, string_types):
warnings.warn("JSON expects JSONable dict or list, not JSON strings")
data = json.loads(data)
self._data = data
def _repr_json_(self):
return self.data
css_t = """$("head").append($("<link/>").attr({
rel: "stylesheet",
type: "text/css",
href: "%s"
}));
"""
lib_t1 = """$.getScript("%s", function () {
"""
lib_t2 = """});
"""
class Javascript(TextDisplayObject):
def __init__(self, data=None, url=None, filename=None, lib=None, css=None):
"""Create a Javascript display object given raw data.
When this object is returned by an expression or passed to the
display function, it will result in the data being displayed
in the frontend. If the data is a URL, the data will first be
downloaded and then displayed.
In the Notebook, the containing element will be available as `element`,
and jQuery will be available. Content appended to `element` will be
visible in the output area.
Parameters
----------
data : unicode, str or bytes
The Javascript source code or a URL to download it from.
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
lib : list or str
A sequence of Javascript library URLs to load asynchronously before
running the source code. The full URLs of the libraries should
be given. A single Javascript library URL can also be given as a
string.
css: : list or str
A sequence of css files to load before running the source code.
The full URLs of the css files should be given. A single css URL
can also be given as a string.
"""
if isinstance(lib, string_types):
lib = [lib]
elif lib is None:
lib = []
if isinstance(css, string_types):
css = [css]
elif css is None:
css = []
if not isinstance(lib, (list,tuple)):
raise TypeError('expected sequence, got: %r' % lib)
if not isinstance(css, (list,tuple)):
raise TypeError('expected sequence, got: %r' % css)
self.lib = lib
self.css = css
super(Javascript, self).__init__(data=data, url=url, filename=filename)
def _repr_javascript_(self):
r = ''
for c in self.css:
r += css_t % c
for l in self.lib:
r += lib_t1 % l
r += self.data
r += lib_t2*len(self.lib)
return r
# constants for identifying png/jpeg data
_PNG = b'\x89PNG\r\n\x1a\n'
_JPEG = b'\xff\xd8'
def _pngxy(data):
"""read the (width, height) from a PNG header"""
ihdr = data.index(b'IHDR')
# next 8 bytes are width/height
w4h4 = data[ihdr+4:ihdr+12]
return struct.unpack('>ii', w4h4)
def _jpegxy(data):
"""read the (width, height) from a JPEG header"""
# adapted from http://www.64lines.com/jpeg-width-height
idx = 4
while True:
block_size = struct.unpack('>H', data[idx:idx+2])[0]
idx = idx + block_size
if data[idx:idx+2] == b'\xFF\xC0':
# found Start of Frame
iSOF = idx
break
else:
# read another block
idx += 2
h, w = struct.unpack('>HH', data[iSOF+5:iSOF+9])
return w, h
class Image(DisplayObject):
_read_flags = 'rb'
_FMT_JPEG = u'jpeg'
_FMT_PNG = u'png'
_ACCEPTABLE_EMBEDDINGS = [_FMT_JPEG, _FMT_PNG]
def __init__(self, data=None, url=None, filename=None, format=None,
embed=None, width=None, height=None, retina=False,
unconfined=False, metadata=None):
"""Create a PNG/JPEG image object given raw data.
When this object is returned by an input cell or passed to the
display function, it will result in the image being displayed
in the frontend.
Parameters
----------
data : unicode, str or bytes
The raw image data or a URL or filename to load the data from.
This always results in embedded image data.
url : unicode
A URL to download the data from. If you specify `url=`,
the image data will not be embedded unless you also specify `embed=True`.
filename : unicode
Path to a local file to load the data from.
Images from a file are always embedded.
format : unicode
The format of the image data (png/jpeg/jpg). If a filename or URL is given
for format will be inferred from the filename extension.
embed : bool
Should the image data be embedded using a data URI (True) or be
loaded using an <img> tag. Set this to True if you want the image
to be viewable later with no internet connection in the notebook.
Default is `True`, unless the keyword argument `url` is set, then
default value is `False`.
Note that QtConsole is not able to display images if `embed` is set to `False`
width : int
Width in pixels to which to constrain the image in html
height : int
Height in pixels to which to constrain the image in html
retina : bool
Automatically set the width and height to half of the measured
width and height.
This only works for embedded images because it reads the width/height
from image data.
For non-embedded images, you can just set the desired display width
and height directly.
unconfined: bool
Set unconfined=True to disable max-width confinement of the image.
metadata: dict
Specify extra metadata to attach to the image.
Examples
--------
# embedded image data, works in qtconsole and notebook
# when passed positionally, the first arg can be any of raw image data,
# a URL, or a filename from which to load image data.
# The result is always embedding image data for inline images.
Image('http://www.google.fr/images/srpr/logo3w.png')
Image('/path/to/image.jpg')
Image(b'RAW_PNG_DATA...')
# Specifying Image(url=...) does not embed the image data,
# it only generates `<img>` tag with a link to the source.
# This will not work in the qtconsole or offline.
Image(url='http://www.google.fr/images/srpr/logo3w.png')
"""
if filename is not None:
ext = self._find_ext(filename)
elif url is not None:
ext = self._find_ext(url)
elif data is None:
raise ValueError("No image data found. Expecting filename, url, or data.")
elif isinstance(data, string_types) and (
data.startswith('http') or _safe_exists(data)
):
ext = self._find_ext(data)
else:
ext = None
if format is None:
if ext is not None:
if ext == u'jpg' or ext == u'jpeg':
format = self._FMT_JPEG
if ext == u'png':
format = self._FMT_PNG
else:
format = ext.lower()
elif isinstance(data, bytes):
# infer image type from image data header,
# only if format has not been specified.
if data[:2] == _JPEG:
format = self._FMT_JPEG
# failed to detect format, default png
if format is None:
format = 'png'
if format.lower() == 'jpg':
# jpg->jpeg
format = self._FMT_JPEG
self.format = unicode_type(format).lower()
self.embed = embed if embed is not None else (url is None)
if self.embed and self.format not in self._ACCEPTABLE_EMBEDDINGS:
raise ValueError("Cannot embed the '%s' image format" % (self.format))
self.width = width
self.height = height
self.retina = retina
self.unconfined = unconfined
self.metadata = metadata
super(Image, self).__init__(data=data, url=url, filename=filename)
if retina:
self._retina_shape()
def _retina_shape(self):
"""load pixel-doubled width and height from image data"""
if not self.embed:
return
if self.format == 'png':
w, h = _pngxy(self.data)
elif self.format == 'jpeg':
w, h = _jpegxy(self.data)
else:
# retina only supports png
return
self.width = w // 2
self.height = h // 2
def reload(self):
"""Reload the raw data from file or URL."""
if self.embed:
super(Image,self).reload()
if self.retina:
self._retina_shape()
def _repr_html_(self):
if not self.embed:
width = height = klass = ''
if self.width:
width = ' width="%d"' % self.width
if self.height:
height = ' height="%d"' % self.height
if self.unconfined:
klass = ' class="unconfined"'
return u'<img src="{url}"{width}{height}{klass}/>'.format(
url=self.url,
width=width,
height=height,
klass=klass,
)
def _data_and_metadata(self):
"""shortcut for returning metadata with shape information, if defined"""
md = {}
if self.width:
md['width'] = self.width
if self.height:
md['height'] = self.height
if self.unconfined:
md['unconfined'] = self.unconfined
if self.metadata:
md.update(self.metadata)
if md:
return self.data, md
else:
return self.data
def _repr_png_(self):
if self.embed and self.format == u'png':
return self._data_and_metadata()
def _repr_jpeg_(self):
if self.embed and (self.format == u'jpeg' or self.format == u'jpg'):
return self._data_and_metadata()
def _find_ext(self, s):
return unicode_type(s.split('.')[-1].lower())
class Video(DisplayObject):
def __init__(self, data=None, url=None, filename=None, embed=False, mimetype=None):
"""Create a video object given raw data or an URL.
When this object is returned by an input cell or passed to the
display function, it will result in the video being displayed
in the frontend.
Parameters
----------
data : unicode, str or bytes
The raw video data or a URL or filename to load the data from.
Raw data will require passing `embed=True`.
url : unicode
A URL for the video. If you specify `url=`,
the image data will not be embedded.
filename : unicode
Path to a local file containing the video.
Will be interpreted as a local URL unless `embed=True`.
embed : bool
Should the video be embedded using a data URI (True) or be
loaded using a <video> tag (False).
Since videos are large, embedding them should be avoided, if possible.
You must confirm embedding as your intention by passing `embed=True`.
Local files can be displayed with URLs without embedding the content, via::
Video('./video.mp4')
mimetype: unicode
Specify the mimetype for embedded videos.
Default will be guessed from file extension, if available.
Examples
--------
Video('https://archive.org/download/Sita_Sings_the_Blues/Sita_Sings_the_Blues_small.mp4')
Video('path/to/video.mp4')
Video('path/to/video.mp4', embed=True)
Video(b'raw-videodata', embed=True)
"""
if url is None and isinstance(data, string_types) and data.startswith(('http:', 'https:')):
url = data
data = None
elif os.path.exists(data):
filename = data
data = None
if data and not embed:
msg = ''.join([
"To embed videos, you must pass embed=True ",
"(this may make your notebook files huge)\n",
"Consider passing Video(url='...')",
])
raise ValueError(msg)
self.mimetype = mimetype
self.embed = embed
super(Video, self).__init__(data=data, url=url, filename=filename)
def _repr_html_(self):
# External URLs and potentially local files are not embedded into the
# notebook output.
if not self.embed:
url = self.url if self.url is not None else self.filename
output = """<video src="{0}" controls>
Your browser does not support the <code>video</code> element.
</video>""".format(url)
return output
# Embedded videos are base64-encoded.
mimetype = self.mimetype
if self.filename is not None:
if not mimetype:
mimetype, _ = mimetypes.guess_type(self.filename)
with open(self.filename, 'rb') as f:
video = f.read()
else:
video = self.data
if isinstance(video, unicode_type):
# unicode input is already b64-encoded
b64_video = video
else:
b64_video = base64_encode(video).decode('ascii').rstrip()
output = """<video controls>
<source src="data:{0};base64,{1}" type="{0}">
Your browser does not support the video tag.
</video>""".format(mimetype, b64_video)
return output
def reload(self):
# TODO
pass
def _repr_png_(self):
# TODO
pass
def _repr_jpeg_(self):
# TODO
pass
def clear_output(wait=False):
"""Clear the output of the current cell receiving output.
Parameters
----------
wait : bool [default: false]
Wait to clear the output until new output is available to replace it."""
from IPython.core.interactiveshell import InteractiveShell
if InteractiveShell.initialized():
InteractiveShell.instance().display_pub.clear_output(wait)
else:
print('\033[2K\r', end='')
sys.stdout.flush()
print('\033[2K\r', end='')
sys.stderr.flush()
@skip_doctest
def set_matplotlib_formats(*formats, **kwargs):
"""Select figure formats for the inline backend. Optionally pass quality for JPEG.
For example, this enables PNG and JPEG output with a JPEG quality of 90%::
In [1]: set_matplotlib_formats('png', 'jpeg', quality=90)
To set this in your config files use the following::
c.InlineBackend.figure_formats = {'png', 'jpeg'}
c.InlineBackend.print_figure_kwargs.update({'quality' : 90})
Parameters
----------
*formats : strs
One or more figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
**kwargs :
Keyword args will be relayed to ``figure.canvas.print_figure``.
"""
from IPython.core.interactiveshell import InteractiveShell
from IPython.core.pylabtools import select_figure_formats
# build kwargs, starting with InlineBackend config
kw = {}
from ipykernel.pylab.config import InlineBackend
cfg = InlineBackend.instance()
kw.update(cfg.print_figure_kwargs)
kw.update(**kwargs)
shell = InteractiveShell.instance()
select_figure_formats(shell, formats, **kw)
@skip_doctest
def set_matplotlib_close(close=True):
"""Set whether the inline backend closes all figures automatically or not.
By default, the inline backend used in the IPython Notebook will close all
matplotlib figures automatically after each cell is run. This means that
plots in different cells won't interfere. Sometimes, you may want to make
a plot in one cell and then refine it in later cells. This can be accomplished
by::
In [1]: set_matplotlib_close(False)
To set this in your config files use the following::
c.InlineBackend.close_figures = False
Parameters
----------
close : bool
Should all matplotlib figures be automatically closed after each cell is
run?
"""
from ipykernel.pylab.config import InlineBackend
cfg = InlineBackend.instance()
cfg.close_figures = close
| apache-2.0 |
nelango/ViralityAnalysis | model/lib/sklearn/utils/estimator_checks.py | 6 | 55016 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.decomposition import NMF, ProjectedGradientNMF
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils import ConvergenceWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.utils.fixes import signature
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
# Estimators with deprecated transform methods. Should be removed in 0.19 when
# _LearntSelectorMixin is removed.
DEPRECATED_TRANSFORM = [
"RandomForestClassifier", "RandomForestRegressor", "ExtraTreesClassifier",
"ExtraTreesRegressor", "DecisionTreeClassifier",
"DecisionTreeRegressor", "ExtraTreeClassifier", "ExtraTreeRegressor",
"LinearSVC", "SGDClassifier", "SGDRegressor", "Perceptron",
"LogisticRegression", "LogisticRegressionCV",
"GradientBoostingClassifier", "GradientBoostingRegressor"]
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
yield check_classifiers_regression_target
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
if name not in DEPRECATED_TRANSFORM:
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_testing_parameters(estimator):
# set parameters to speed up some estimators and
# avoid deprecated behaviour
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == 'NMF':
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if "decision_function_shape" in params:
# SVC
estimator.set_params(decision_function_shape='ovo')
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
if isinstance(estimator, NMF):
if not isinstance(estimator, ProjectedGradientNMF):
estimator.set_params(solver='cd')
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_testing_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_testing_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if (hasattr(estimator, "transform") and
name not in DEPRECATED_TRANSFORM):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
try:
assert_warns(DeprecationWarning,
getattr(estimator, method), X[0])
except ValueError:
pass
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_testing_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
# fit_transform method should work on non fitted estimator
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
if name in DEPRECATED_TRANSFORM:
funcs = ["score"]
else:
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
if name in DEPRECATED_TRANSFORM:
funcs = ["fit", "score", "partial_fit", "fit_predict"]
else:
funcs = [
"fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
assert_true(args[1] in ["y", "Y"],
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, Estimator.__name__, args))
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
if name in DEPRECATED_TRANSFORM:
methods = ["predict", "decision_function", "predict_proba"]
else:
methods = [
"predict", "transform", "decision_function", "predict_proba"]
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in methods:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_testing_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* is required."
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if (hasattr(estimator, "transform") and
name not in DEPRECATED_TRANSFORM):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
@ignore_warnings
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
if name in DEPRECATED_TRANSFORM:
check_methods = ["predict", "decision_function", "predict_proba"]
else:
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_testing_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_testing_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_testing_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_testing_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_testing_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_testing_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_testing_parameters(regressor_1)
set_testing_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_testing_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_testing_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_testing_parameters(estimator_1)
set_testing_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator"""
return (p.name != 'self'
and p.kind != p.VAR_KEYWORD
and p.kind != p.VAR_POSITIONAL)
init_params = [p for p in signature(init).parameters.values()
if param_filter(p)]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they can need a non-default argument
init_params = init_params[1:]
for init_param in init_params:
assert_not_equal(init_param.default, init_param.empty,
"parameter %s for %s has no default value"
% (init_param.name, type(estimator).__name__))
assert_in(type(init_param.default),
[str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert_true(init_param.default is None)
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
assert_equal(param_value, init_param.default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV', 'RandomizedSearchCV', 'SelectFromModel'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
def check_classifiers_regression_target(name, Estimator):
# Check if classifier throws an exception when fed regression targets
boston = load_boston()
X, y = boston.data, boston.target
e = Estimator()
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg, e.fit, X, y)
| mit |
darioizzo/pykep | pykep/trajopt/_mr_lt_nep.py | 2 | 7937 | import pykep as pk
class mr_lt_nep:
"""
This class represents, as a global optimization problem (linearly constrained,
high dimensional), a Multiple Rendezvous trajectory of a low-thrust spacecraft equipped
with a nuclear electric propulsion engine.
- Izzo, D. et al., GTOC7 - Solution Returned by the ACT/ISAS team
The decision vector (chromosome) is:
[t1, tof1, rest1, m_f1] + [throttles1] +
[t2, tof2, rest2, m_f2] + [throttles2] + ....
.... + [total_tof]
where the units are [mjd2000, days, days, kg] + [n/a] + .... + [days]
.. note::
The resulting problem is non linearly constrained. The resulting trajectory is not time-bounded.
"""
def __init__(
self,
seq=[pk.planet.gtoc7(3413), pk.planet.gtoc7(
234), pk.planet.gtoc7(11432)],
n_seg=5,
t0=[13000, 13200],
leg_tof=[1, 365.25 * 3],
rest=[30., 365.25],
mass=[800, 2000],
Tmax=0.3,
Isp=3000.,
traj_tof=365.25 * 6,
objective='mass',
c_tol=1e-05
):
"""
prob = mr_lt_nep(seq=[pykep.gtoc7(3413),pykep.gtoc7(234), pykep.gtoc7(11432)], n_seg=5, t0=[13000, 13200],
leg_tof=[1, 365.25 * 3], rest=[30., 365.25], mass=[800, 2000], Tmax=0.3,
Isp=3000., traj_tof=365.25 * 6, objective='mass', c_tol=1e-05)
* seq: list of pykep.planet defining the encounter sequence for the trajectory (including the initial planet)
* n_seg: list of integers containing the number of segments to be used for each leg (len(n_seg) = len(seq)-1)
* t0: list of two pykep epochs defining the launch window
* leg_tof: list of two floats defining the minimum and maximum time of each leg (days)
* rest: list of two floats defining the minimum and maximum time the spacecraft can rest at one planet (days)
* mass: list of two floats defining the minimum final spacecraft mass and the starting spacecraft mass (kg)
* Tmax: maximum thrust (N)
* Isp: engine specific impulse (sec)
* traj_tof maximum total mission duration (days)
* c_tol: tolerance on the constraints
"""
# Number of legs
n = len(seq) - 1
# Problem dimension
dim = (4 + n_seg * 3) * n + 1
# Number of equality constraints
self.__dim_eq = 7 * n
# Number of Inequality constraints
self.__dim_ineq = n * n_seg + n
# We define data members
self.__seq = seq
self.__num_legs = n
self.__nseg = n_seg
self.__dim_leg = 4 + n_seg * 3
self.__start_mass = mass[1]
self.__max_total_time = traj_tof
self.__t0 = t0
self.__leg_tof = leg_tof
self.__rest = rest
self.__mass = mass
# We create n distinct legs objects
self.__legs = []
for i in range(n):
self.__legs.append(pk.sims_flanagan.leg())
for leg in self.__legs:
leg.high_fidelity = True
leg.set_mu(pk.MU_SUN)
if objective not in ['mass', 'time']:
raise ValueError(
"Error in defining the objective. Was it one of mass or time?")
self.__objective = objective
def fitness(self, x_full):
retval = []
# 1 - obj fun
if self.__objective == 'mass':
retval.append(x_full[-1 - self.__dim_leg + 3])
elif self.__objective == 'time':
retval.append(x_full[-1 - self.__dim_leg] - x_full[0])
sc_mass = self.__start_mass
eqs = []
ineqs = []
# 2 - constraints
for i in range(self.__num_legs):
x = x_full[i * self.__dim_leg:(i + 1) * self.__dim_leg]
start = pk.epoch(x[0])
end = pk.epoch(x[0] + x[1])
# Computing starting spaceraft state
r, v = self.__seq[i].eph(start)
x0 = pk.sims_flanagan.sc_state(r, v, sc_mass)
# Computing ending spaceraft state
r, v = self.__seq[i + 1].eph(end)
xe = pk.sims_flanagan.sc_state(r, v, x[3])
# Building the SF leg
self.__legs[i].set_spacecraft(
pk.sims_flanagan.spacecraft(sc_mass, .3, 3000.))
self.__legs[i].set(start, x0, x[-3 * self.__nseg:], end, xe)
# Setting all constraints
eqs.extend(self.__legs[i].mismatch_constraints())
ineqs.extend(self.__legs[i].throttles_constraints())
eqs[-7] /= pk.AU
eqs[-6] /= pk.AU
eqs[-5] /= pk.AU
eqs[-4] /= pk.EARTH_VELOCITY
eqs[-3] /= pk.EARTH_VELOCITY
eqs[-2] /= pk.EARTH_VELOCITY
eqs[-1] /= self.__start_mass
sc_mass = x[3] # update mass to final mass of leg
if i < self.__num_legs - 1:
x_next = x_full[
(i + 1) * self.__dim_leg:(i + 2) * self.__dim_leg]
time_ineq = x[0] + x[1] + x[2] - x_next[0]
ineqs.append(time_ineq / 365.25)
else:
final_time_ineq = x[0] + x[1] + x[2] - \
x_full[0] - x_full[-1] # <- total time
ineqs.append(final_time_ineq / 365.25)
retval = retval + eqs + ineqs
return retval
def get_bounds(self):
t0 = self.__t0
leg_tof = self.__leg_tof
rest = self.__rest
mass = self.__mass
nseg = self.__nseg
traj_tof = self.__max_total_time
n = self.__num_legs
# We set the problem box-bounds
# set leg bounds
lb_leg = [t0[0], leg_tof[0], rest[0], mass[0]] + [-1] * nseg * 3
ub_leg = [t0[1] + traj_tof * n, leg_tof[1],
rest[1], mass[1]] + [1] * nseg * 3
# set n leg bounds
lb = lb_leg * n
ub = [t0[1], leg_tof[1], rest[1], mass[1]] + \
[1] * nseg * 3 + ub_leg * (n - 1)
# set total time bounds
lb += [1.]
ub += [self.__max_total_time]
return (lb, ub)
def get_nic(self):
return self.__dim_ineq
def get_nec(self):
return self.__dim_eq
def resting_times(self, x):
return list(x[2::self.__dim_leg])
def plot(self, x, ax=None):
"""
ax = prob.plot(x, ax=None)
- x: encoded trajectory
- ax: matplotlib axis where to plot. If None figure and axis will be created
- [out] ax: matplotlib axis where to plot
Plots the trajectory represented by a decision vector x on the 3d axis ax
Example::
ax = prob.plot(x)
"""
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from pykep import epoch, AU
from pykep.sims_flanagan import sc_state
from pykep.orbit_plots import plot_planet, plot_sf_leg
# Creating the axis if necessary
if ax is None:
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
axis = fig.gca(projection='3d')
else:
axis = ax
# Plotting the Sun ........
axis.scatter([0], [0], [0], color='y')
# Plotting the pykep.planet both at departure and arrival dates
for i in range(self.__num_legs):
idx = i * self.__dim_leg
plot_planet(self.__seq[i], epoch(x[idx]), units=AU, legend=True, color=(
0.7, 0.7, 0.7), s=30, axes=axis)
plot_planet(self.__seq[i + 1], epoch(x[idx] + x[idx + 1]),
units=AU, legend=False, color=(0.7, 0.7, 0.7), s=30, axes=axis)
# Computing the legs
self.fitness(x)
# Plotting the legs
for leg in self.__legs:
plot_sf_leg(leg, units=AU, N=10, axes=axis, legend=False)
return axis
| gpl-3.0 |
eickenberg/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 251 | 2022 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
tethysplatform/tethys | tethys_gizmos/gizmo_options/plotly_view.py | 2 | 2794 | # coding=utf-8
import plotly.offline as opy
from .base import TethysGizmoOptions
__all__ = ['PlotlyView']
class PlotlyView(TethysGizmoOptions):
"""
Simple options object for plotly view.
.. note:: Information about the Plotly API can be found at https://plot.ly/python.
Attributes:
plot_input(plotly graph_objs): A plotly graph_objs to be plotted.
height(Optional[str]): Height of the plot element. Any valid css unit of length.
width(Optional[str]): Width of the plot element. Any valid css unit of length.
attributes(Optional[dict]): Dictionary of attributed to add to the outer div.
classes(Optional[str]): Space separated string of classes to add to the outer div.
hidden(Optional[bool]): If True, the plot will be hidden. Default is False.
show_link(Optional[bool]): If True, the link to export plot to view in plotly is shown. Default is False.
Controller Code Basic Example::
from datetime import datetime
import plotly.graph_objs as go
from tethys_sdk.gizmos import PlotlyView
x = [datetime(year=2013, month=10, day=04),
datetime(year=2013, month=11, day=05),
datetime(year=2013, month=12, day=06)]
my_plotly_view = PlotlyView([go.Scatter(x=x, y=[1, 3, 6])])
context = {'plotly_view_input': my_plotly_view}
Controller Code Pandas Example::
import numpy as np
import pandas as pd
from tethys_sdk.gizmos import PlotlyView
df = pd.DataFrame(np.random.randn(1000, 2), columns=['A', 'B']).cumsum()
my_plotly_view = PlotlyView(df.iplot(asFigure=True))
context = {'plotly_view_input': my_plotly_view}
Template Code::
{% load tethys_gizmos %}
{% gizmo plotly_view_input %}
"""
gizmo_name = "plotly_view"
def __init__(self, plot_input, height='520px', width='100%',
attributes='', classes='', divid='', hidden=False,
show_link=False):
"""
Constructor
"""
# Initialize the super class
super().__init__()
self.plotly_div = opy.plot(plot_input,
auto_open=False,
output_type='div',
include_plotlyjs=False,
show_link=show_link)
self.height = height
self.width = width
self.attributes = attributes
self.classes = classes
self.divid = divid
self.hidden = hidden
@staticmethod
def get_vendor_js():
"""
JavaScript vendor libraries to be placed in the
{% block global_scripts %} block
"""
return ('://plotly-load_from_python.js',)
| bsd-2-clause |
supriyantomaftuh/innstereo | innstereo/rotation_dialog.py | 1 | 22040 | #!/usr/bin/python3
"""
This module stores the RotationDialog class which controls the rotation dialog.
The module contains only the RotationDialog class. It controls the behaviour
of the data-rotation dialog.
"""
from gi.repository import Gtk
from matplotlib.figure import Figure
from matplotlib.gridspec import GridSpec
from matplotlib.backends.backend_gtk3cairo import (FigureCanvasGTK3Cairo
as FigureCanvas)
import numpy as np
import mplstereonet
import os
class RotationDialog(object):
"""
This class controls the appearance and signals of the data-rotation dialog.
This class pulls the rotation dialog from the Glade file, intilizes the
widgets and has methods for the signals defined in Glade.
"""
def __init__(self, main_window, settings, data, add_layer_dataset, add_feature, redraw_main):
"""
Initializes the RotationDialog class.
Requires the main_window object, the settings object (PlotSettings
class) and the data rows to initialize. All the necessary widgets are
loaded from the Glade file. A matplotlib figure is set up and added
to the scrolledwindow. Two axes are set up that show the original and
rotated data.
"""
self.builder = Gtk.Builder()
script_dir = os.path.dirname(__file__)
rel_path = "gui_layout.glade"
abs_path = os.path.join(script_dir, rel_path)
self.builder.add_objects_from_file(abs_path,
("dialog_rotation", "adjustment_rotation_dipdir",
"adjustment_rotation_dip", "adjustment_rotation_angle"))
self.dialog = self.builder.get_object("dialog_rotation")
self.dialog.set_transient_for(main_window)
self.settings = settings
self.data = data
self.trans = self.settings.get_transform()
self.add_layer_dataset = add_layer_dataset
self.add_feature = add_feature
self.redraw_main = redraw_main
self.adjustment_rotation_dipdir = self.builder.get_object("adjustment_rotation_dipdir")
self.adjustment_rotation_dip = self.builder.get_object("adjustment_rotation_dip")
self.adjustment_rotation_angle = self.builder.get_object("adjustment_rotation_angle")
self.spinbutton_rotation_dipdir = self.builder.get_object("spinbutton_rotation_dipdir")
self.spinbutton_rotation_dip = self.builder.get_object("spinbutton_rotation_dip")
self.spinbutton_rotation_angle = self.builder.get_object("spinbutton_rotation_angle")
self.scrolledwindow_rotate = self.builder.get_object("scrolledwindow_rotate")
self.fig = Figure(dpi=self.settings.get_pixel_density())
self.canvas = FigureCanvas(self.fig)
self.scrolledwindow_rotate.add_with_viewport(self.canvas)
gridspec = GridSpec(1, 2)
original_sp = gridspec.new_subplotspec((0, 0),
rowspan=1, colspan=1)
rotated_sp = gridspec.new_subplotspec((0, 1),
rowspan=1, colspan=1)
self.original_ax = self.fig.add_subplot(original_sp,
projection=self.settings.get_projection())
self.rotated_ax = self.fig.add_subplot(rotated_sp,
projection=self.settings.get_projection())
self.canvas.draw()
self.redraw_plot()
self.dialog.show_all()
self.builder.connect_signals(self)
def run(self):
"""
Runs the dialog.
Called from the MainWindow class. Initializes and shows the dialog.
"""
self.dialog.run()
def on_dialog_rotation_destroy(self, widget):
"""
Hides the dialog on destroy.
When the dialog is destroyed it is hidden.
"""
self.dialog.hide()
def on_button_cancel_rotation_clicked(self, button):
"""
Exits the rotation dialog and makes no changes to the project.
When the user clicks on Cancel the dialog is hidden, and no changes
are made to the project structure.
"""
self.dialog.hide()
def on_button_apply_rotate_clicked(self, button):
"""
Adds the rotated layers to the project.
When the user clicks on "apply the rotation", the rotated data is
added to the project as new datasets.
"""
raxis_dipdir = self.spinbutton_rotation_dipdir.get_value()
raxis_dip = self.spinbutton_rotation_dip.get_value()
raxis = [raxis_dipdir, raxis_dip]
raxis_angle = self.spinbutton_rotation_angle.get_value()
for lyr_obj in self.data:
lyr_type = lyr_obj.get_layer_type()
lyr_store = lyr_obj.get_data_treestore()
if lyr_type == "plane":
dipdir_org, dips_org, dipdir_lst, dips_lst, strat, dipdir_az = \
self.parse_plane(lyr_store, raxis, raxis_angle)
store, new_lyr_obj = self.add_layer_dataset("plane")
for dipdir, dip, strt in zip(dipdir_az, dips_lst, strat):
self.add_feature("plane", store, dipdir, dip, strt)
elif lyr_type == "line":
ldipdir_org, ldips_org, ldipdir_lst, ldips_lst, sense = \
self.parse_line(lyr_store, raxis, raxis_angle)
store, new_lyr_obj = self.add_layer_dataset("line")
for dipdir, dip, sns in zip(ldipdir_lst, ldips_lst, sense):
self.add_feature("line", store, dipdir, dip, sns)
elif lyr_type == "smallcircle":
ldipdir_org, ldips_org, ldipdir_lst, ldips_lst, angle = \
self.parse_line(lyr_store, raxis, raxis_angle)
store, new_lyr_obj = self.add_layer_dataset("smallcircle")
for dipdir, dip, ang in zip(ldipdir_lst, ldips_lst, angle):
self.add_feature("smallcircle", store, dipdir, dip, ang)
elif lyr_type == "faultplane":
rtrn = self.parse_faultplane(lyr_store, raxis, raxis_angle)
dipdir_org, dips_org, dipdir_lst, dips_lst, ldipdir_org, \
ldips_org, ldipdir_lst, ldips_lst, sense, dipdir_az = rtrn[0], \
rtrn[1], rtrn[2], rtrn[3], rtrn[4], rtrn[5], rtrn[6], rtrn[7], \
rtrn[8], rtrn[9]
store, new_lyr_obj = self.add_layer_dataset("faultplane")
for dipdir, dip, ldipdir, ldip, sns in zip(dipdir_az, dips_lst,
ldipdir_lst, ldips_lst, sense):
self.add_feature("faultplane", store, dipdir, dip, ldipdir, ldip, sns)
new_lyr_obj.set_properties(lyr_obj.get_properties())
self.dialog.hide()
self.redraw_main()
def on_spinbutton_rotation_dipdir_value_changed(self, spinbutton):
"""
Redraws the plot.
When the value of the spinbutton is changed, the redraw_plot method
is called, which rotates the data according to the new setting.
"""
self.redraw_plot()
def on_spinbutton_rotation_dip_value_changed(self, spinbutton):
"""
Redraws the plot.
When the value of the spinbutton is changed, the redraw_plot method
is called, which rotates the data according to the new setting.
"""
self.redraw_plot()
def on_spinbutton_rotation_angle_value_changed(self, spinbutton):
"""
Redraws the plot.
When the value of the spinbutton is changed, the redraw_plot method
is called, which rotates the data according to the new setting.
"""
self.redraw_plot()
def convert_lonlat_to_dipdir(self, lon, lat):
"""
Converts lat-lon data to dip-direction and dip.
Expects a longitude and a latitude value. The measurment is forward
transformed into stereonet-space. Then the azimut (dip-direction) and
diping angle are calculated. Returns two values: dip-direction and dip.
"""
#The longitude and latitude have to be forward-transformed to get
#the corect azimuth angle
xy = np.array([[lon, lat]])
xy_trans = self.trans.transform(xy)
x = float(xy_trans[0,0:1])
y = float(xy_trans[0,1:2])
alpha = np.arctan2(x, y)
alpha_deg = np.degrees(alpha)
if alpha_deg < 0:
alpha_deg += 360
#Longitude and Latitude don't need to be converted for rotation.
#The correct dip is the array[1] value once the vector has been
#rotated in north-south position.
array = mplstereonet.stereonet_math._rotate(np.degrees(lon),
np.degrees(lat),
alpha_deg * (-1))
gamma = float(array[1])
gamma_deg = 90 - np.degrees(gamma)
#If the longitude is larger or small than pi/2 the measurment lies
#on the upper hemisphere and needs to be corrected.
if lon > (np.pi / 2) or lon < (-np.pi / 2):
alpha_deg = alpha_deg + 180
return alpha_deg, gamma_deg
def rotate_data(self, raxis, raxis_angle, dipdir, dip):
"""
Rotates a measurment around a rotation axis a set number of degrees.
Expects a rotation-axis, a rotation-angle, a dip-direction and a
dip angle. The measurement is converted to latlot and then passed
to the mplstereonet rotate function.
"""
lonlat = mplstereonet.line(dip, dipdir)
#Rotation around x-axis until rotation-axis azimuth is east-west
rot1 = (90 - raxis[0])
lon1 = np.degrees(lonlat[0])
lat1 = np.degrees(lonlat[1])
lon_rot1, lat_rot1 = mplstereonet.stereonet_math._rotate(lon1, lat1,
theta=rot1, axis="x")
#Rotation around z-axis until rotation-axis dip is east-west
rot2 = -(90 - raxis[1])
lon2 = np.degrees(lon_rot1)
lat2 = np.degrees(lat_rot1)
lon_rot2, lat_rot2 = mplstereonet.stereonet_math._rotate(lon2, lat2,
theta=rot2, axis="z")
#Rotate around the x-axis for the specified rotation:
rot3 = raxis_angle
lon3 = np.degrees(lon_rot2)
lat3 = np.degrees(lat_rot2)
lon_rot3, lat_rot3 = mplstereonet.stereonet_math._rotate(lon3, lat3,
theta=rot3, axis="x")
#Undo the z-axis rotation
rot4 = -rot2
lon4 = np.degrees(lon_rot3)
lat4 = np.degrees(lat_rot3)
lon_rot4, lat_rot4 = mplstereonet.stereonet_math._rotate(lon4, lat4,
theta=rot4, axis="z")
#Undo the x-axis rotation
rot5 = -rot1
lon5 = np.degrees(lon_rot4)
lat5 = np.degrees(lat_rot4)
lon_rot5, lat_rot5 = mplstereonet.stereonet_math._rotate(lon5, lat5,
theta=rot5, axis="x")
dipdir5, dip5 = self.convert_lonlat_to_dipdir(lon_rot5, lat_rot5)
return dipdir5, dip5
def parse_plane(self, lyr_store, raxis, raxis_angle):
"""
Parses and rotates data of a plane layer.
Expects a TreeStore of a layer, the rotation axis and the
angle of rotation. The method returns each column unrotated and rotated.
"""
dipdir_org = []
dips_org = []
dipdir_lst = []
dips_lst = []
dipdir_az = []
strat = []
for row in lyr_store:
dipdir_org.append(row[0] - 90)
dips_org.append(row[1])
#Planes and faultplanes are rotated using their poles
dipdir, dip = self.rotate_data(raxis, raxis_angle, row[0] + 180,
90 - row[1])
dipdir_lst.append(dipdir + 90)
dipdir_az.append(dipdir + 180)
dips_lst.append(90 - dip)
strat.append(row[2])
return dipdir_org, dips_org, dipdir_lst, dips_lst, strat, dipdir_az
def parse_line(self, lyr_store, raxis, raxis_angle):
"""
Parses and rotates data of a linear or smallcircle layer.
Expects a TreeStore of a layer, the rotation axis and the
angle of rotation. The method returns each column unrotated and rotated.
"""
ldipdir_org = []
ldips_org = []
ldipdir_lst = []
ldips_lst = []
third_col = []
for row in lyr_store:
ldipdir_org.append(row[0])
ldips_org.append(row[1])
ldipdir, ldip = self.rotate_data(raxis, raxis_angle, row[0], row[1])
ldipdir_lst.append(ldipdir)
ldips_lst.append(ldip)
third_col.append(row[2])
return ldipdir_org, ldips_org, ldipdir_lst, ldips_lst, third_col
def parse_faultplane(self, lyr_store, raxis, raxis_angle):
"""
Parses and rotates data of a faultplane layer.
Expects a TreeStore of a faultplane layer, the rotation axis and the
angle of rotation. The method returns each column unrotated and rotated.
"""
dipdir_org = []
dips_org = []
dipdir_lst = []
dips_lst = []
ldipdir_org = []
ldips_org = []
ldipdir_lst = []
ldips_lst = []
dipdir_az = []
sense = []
for row in lyr_store:
dipdir_org.append(row[0] - 90)
dips_org.append(row[1])
#Planes and faultplanes are rotated using their poles
dipdir, dip = self.rotate_data(raxis, raxis_angle, row[0] + 180,
90 - row[1])
dipdir_lst.append(dipdir + 90)
dipdir_az.append(dipdir + 270)
dips_lst.append(90 - dip)
ldipdir_org.append(row[2])
ldips_org.append(row[3])
ldipdir, ldip = self.rotate_data(raxis, raxis_angle, row[2], row[3])
ldipdir_lst.append(ldipdir)
ldips_lst.append(ldip)
sense.append(row[4])
return (dipdir_org, dips_org, dipdir_lst, dips_lst, ldipdir_org,
ldips_org, ldipdir_lst, ldips_lst, sense, dipdir_az)
def redraw_plot(self):
"""
Redraws the plot using the current settings of the dialog's spinbuttons.
This method clears the two axes and adds the annotations. The current
values of the rotation axis and rotation angle spinbuttons are
retrieved. The data is parsed, and the features are then drawn.
In addition the rotation-axis is drawn.
"""
self.original_ax.cla()
self.rotated_ax.cla()
self.original_ax.grid(False)
self.rotated_ax.grid(False)
self.original_ax.set_azimuth_ticks([0], labels=["N"])
self.rotated_ax.set_azimuth_ticks([0], labels=["N"])
bar = 0.05
self.original_ax.annotate("", xy = (-bar, 0),
xytext = (bar, 0),
xycoords = "data",
arrowprops = dict(arrowstyle = "-",
connectionstyle = "arc3"))
self.original_ax.annotate("", xy = (0, -bar),
xytext = (0, bar),
xycoords = "data",
arrowprops = dict(arrowstyle = "-",
connectionstyle = "arc3"))
self.rotated_ax.annotate("", xy = (-bar, 0),
xytext = (bar, 0),
xycoords = "data",
arrowprops = dict(arrowstyle = "-",
connectionstyle = "arc3"))
self.rotated_ax.annotate("", xy = (0, -bar),
xytext = (0, bar),
xycoords = "data",
arrowprops = dict(arrowstyle = "-",
connectionstyle = "arc3"))
raxis_dipdir = self.spinbutton_rotation_dipdir.get_value()
raxis_dip = self.spinbutton_rotation_dip.get_value()
raxis = [raxis_dipdir, raxis_dip]
raxis_angle = self.spinbutton_rotation_angle.get_value()
for lyr_obj in self.data:
lyr_type = lyr_obj.get_layer_type()
lyr_store = lyr_obj.get_data_treestore()
if lyr_type == "plane":
dipdir_org, dips_org, dipdir_lst, dips_lst, strat, dipdir_az = \
self.parse_plane(lyr_store, raxis, raxis_angle)
self.original_ax.plane(dipdir_org, dips_org, color=lyr_obj.get_line_color(),
linewidth=lyr_obj.get_line_width(),
linestyle=lyr_obj.get_line_style(),
dash_capstyle=lyr_obj.get_capstyle(),
alpha=lyr_obj.get_line_alpha(), clip_on=False)
self.rotated_ax.plane(dipdir_lst, dips_lst, color=lyr_obj.get_line_color(),
linewidth=lyr_obj.get_line_width(),
linestyle=lyr_obj.get_line_style(),
dash_capstyle=lyr_obj.get_capstyle(),
alpha=lyr_obj.get_line_alpha(), clip_on=False)
elif lyr_type == "line":
ldipdir_org, ldips_org, ldipdir_lst, ldips_lst, sense = \
self.parse_line(lyr_store, raxis, raxis_angle)
self.original_ax.line(ldips_org, ldipdir_org,
marker=lyr_obj.get_marker_style(),
markersize=lyr_obj.get_marker_size(),
color=lyr_obj.get_marker_fill(),
markeredgewidth=lyr_obj.get_marker_edge_width(),
markeredgecolor=lyr_obj.get_marker_edge_color(),
alpha=lyr_obj.get_marker_alpha(), clip_on=False)
self.rotated_ax.line(ldips_lst, ldipdir_lst,
marker=lyr_obj.get_marker_style(),
markersize=lyr_obj.get_marker_size(),
color=lyr_obj.get_marker_fill(),
markeredgewidth=lyr_obj.get_marker_edge_width(),
markeredgecolor=lyr_obj.get_marker_edge_color(),
alpha=lyr_obj.get_marker_alpha(), clip_on=False)
elif lyr_type == "smallcircle":
ldipdir_org, ldips_org, ldipdir_lst, ldips_lst, angle = \
self.parse_line(lyr_store, raxis, raxis_angle)
self.original_ax.cone(ldips_org, ldipdir_org, angle, facecolor="None",
color=lyr_obj.get_line_color(),
linewidth=lyr_obj.get_line_width(),
label=lyr_obj.get_label(),
linestyle=lyr_obj.get_line_style())
self.rotated_ax.cone(ldips_lst, ldipdir_lst, angle, facecolor="None",
color=lyr_obj.get_line_color(),
linewidth=lyr_obj.get_line_width(),
label=lyr_obj.get_label(),
linestyle=lyr_obj.get_line_style())
elif lyr_type == "faultplane":
rtrn = self.parse_faultplane(lyr_store, raxis, raxis_angle)
dipdir_org, dips_org, dipdir_lst, dips_lst, ldipdir_org, \
ldips_org, ldipdir_lst, ldips_lst, sense = rtrn[0], rtrn[1], \
rtrn[2], rtrn[3], rtrn[4], rtrn[5], rtrn[6], rtrn[7], rtrn[8]
self.original_ax.plane(dipdir_org, dips_org, color=lyr_obj.get_line_color(),
linewidth=lyr_obj.get_line_width(),
linestyle=lyr_obj.get_line_style(),
dash_capstyle=lyr_obj.get_capstyle(),
alpha=lyr_obj.get_line_alpha(), clip_on=False)
self.rotated_ax.plane(dipdir_lst, dips_lst, color=lyr_obj.get_line_color(),
linewidth=lyr_obj.get_line_width(),
linestyle=lyr_obj.get_line_style(),
dash_capstyle=lyr_obj.get_capstyle(),
alpha=lyr_obj.get_line_alpha(), clip_on=False)
self.original_ax.line(ldips_org, ldipdir_org,
marker=lyr_obj.get_marker_style(),
markersize=lyr_obj.get_marker_size(),
color=lyr_obj.get_marker_fill(),
markeredgewidth=lyr_obj.get_marker_edge_width(),
markeredgecolor=lyr_obj.get_marker_edge_color(),
alpha=lyr_obj.get_marker_alpha(), clip_on=False)
self.rotated_ax.line(ldips_lst, ldipdir_lst,
marker=lyr_obj.get_marker_style(),
markersize=lyr_obj.get_marker_size(),
color=lyr_obj.get_marker_fill(),
markeredgewidth=lyr_obj.get_marker_edge_width(),
markeredgecolor=lyr_obj.get_marker_edge_color(),
alpha=lyr_obj.get_marker_alpha(), clip_on=False)
#Plot rotation axis
self.original_ax.line(raxis_dip, raxis_dipdir, marker="o",
markersize=10, color="#ff0000",
markeredgewidth=1, markeredgecolor="#000000",
alpha=1, clip_on=False)
self.canvas.draw()
| gpl-2.0 |
timqian/sms-tools | lectures/6-Harmonic-model/plots-code/harmonicModel-analysis-synthesis.py | 24 | 1387 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import harmonicModel as HM
import sineModel as SM
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/vignesh.wav')
w = np.blackman(1201)
N = 2048
t = -90
nH = 100
minf0 = 130
maxf0 = 300
f0et = 7
Ns = 512
H = Ns/4
minSineDur = .1
harmDevSlope = 0.01
hfreq, hmag, hphase = HM.harmonicModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur)
y = SM.sineModelSynth(hfreq, hmag, hphase, Ns, H, fs)
numFrames = int(hfreq[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.figure(1, figsize=(9, 7))
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.axis([0,x.size/float(fs),min(x),max(x)])
plt.title('x (vignesh.wav)')
plt.subplot(3,1,2)
yhfreq = hfreq
yhfreq[hfreq==0] = np.nan
plt.plot(frmTime, hfreq, lw=1.2)
plt.axis([0,y.size/float(fs),0,8000])
plt.title('f_h, harmonic frequencies')
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y, 'b')
plt.axis([0,y.size/float(fs),min(y),max(y)])
plt.title('yh')
plt.tight_layout()
UF.wavwrite(y, fs, 'vignesh-harmonic-synthesis.wav')
plt.savefig('harmonicModel-analysis-synthesis.png')
plt.show()
| agpl-3.0 |
nfejes/smartplot | smartplot.py | 2 | 5681 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# Default preferences
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=11)
plt.rc('legend', fontsize=11)
# Default plot colors, as of MATLAB 2014b
defcolors = [
[0.000, 0.447, 0.741],
[0.850, 0.325, 0.098],
[0.929, 0.694, 0.125],
[0.494, 0.184, 0.556],
[0.466, 0.674, 0.188],
[0.301, 0.745, 0.933],
[0.635, 0.078, 0.184],
]
# Plot hold
__hold_sub = None
# Next color
__linecounter = 0
def __nextcolor():
global __linecounter
color = defcolors[__linecounter]
__linecounter = (__linecounter + 1) % len(defcolors)
return color
def texengform(val,n):
"""
texengform(val,n):
Format a number [val] to TeX style with [n] decimals,
e.g. texengform(1.43254e4,3) -> "$1.432\\times10^{4}$"
"""
v,e = (('%.'+str(n)+'e') % val).split('e')
e = int(e,10)
if e == 0: return '$%s$' % v
else: return '$%s\\times10^{%d}$' % (v,e)
def axform(sub,form,n):
if n == 0: get,set = sub.get_xticks,sub.set_xticklabels
elif n == 1: get,set = sub.get_yticks,sub.set_yticklabels
elif n == 2: get,set = sub.get_zticks,sub.set_zticklabels
else: return
if isinstance(form, str):
if form.find('eng:') == 0:
n = int(form[4:])
set([texengform(i,n) for i in get()])
else:
set([(form % i) for i in get()])
else:
set([form(i) for i in get()])
# 2D/3D line plot
def smartplot(x,props={},insub=None):
# Process multiple dicts
if isinstance(props,(list,tuple)):
pmerge = {}
for p in props:
pmerge.update(p)
props = pmerge
# getprop lambda
getprop = lambda v,d=None: props[v] if v in props else d
# Format x for most special cases
if not hasattr(x,'__len__'): x = []
if len(x) == 1: x = (range(len(x[0])),x[0])
if len(x) > 3: x = (range(len(x)),x)
# Default range
if len(x) > 0:
arange = getprop('range',np.array([np.min(x,1),np.max(x,1)]).T)
else:
arange = []
# Set colors
color = getprop('color',-1)
mcolor = getprop('mcolor')
if isinstance(color,int):
global __linecounter
if color == -1:
color = __nextcolor()
else:
color = defcolors[color % len(defcolors)]
if mcolor is None:
mcolor = color
# Modulate data
if len(x) > 0:
mod = getprop('mod')
if mod:
# TODO
if getprop('linewidth'):
pass
m = mod[1] - mod[0]
d = mod[0]
x = np.array(x)
x[0] = np.mod(x[0]-d,m) + d
# Add margin
# TODO fix
#if getprop('margin'):
# print('a',arange)
# m = getprop('margin')
# axmin = np.min(arange,1).reshape([-1])
# axmax = np.max(arange,1).reshape([-1])
# axlen = abs(axmax-axmin)
# arange = np.array([axmin - axlen*m,axmax + axlen*m])
# print('b',arange)
# Subplot
if insub:
sub = insub
else:
global __hold_sub
# If hold, use prev sub
if getprop('hold') and __hold_sub:
sub = __hold_sub
if getprop('hold') == 'clear':
sub.clear()
# If previous hold, use that
elif __hold_sub:
sub = __hold_sub
__hold_sub = None
# Else create subplot
else:
fig = plt.figure(figsize=getprop('figsize', (7.5,4)),dpi=getprop('figdpi',300))
if len(x) == 3:
sub = fig.add_subplot(111,projection='3d')
plt.draw() # needed to create zticks
else:
sub = fig.add_subplot(111)
# Store hold sub
if getprop('hold') and not __hold_sub:
__hold_sub = sub
# Plot arguments
kwargs = {
'markersize' : getprop('markersize', 1),
'linewidth' : getprop('linewidth', 0.2),
'linestyle' : getprop('linestyle', '-'),
'marker' : getprop('marker', '.'),
}
if not color is None: kwargs['color'] = color
if not mcolor is None: kwargs['mfc'] = kwargs['mec'] = mcolor
if getprop('label'): kwargs['label'] = getprop('label')
# Expand range
if len(x) > 0 and getprop('expand'):
# TODO: find a way to count subplots
if not all(np.array(sub.get_xlim()) == (0,1)):
c = [sub.get_xlim(), sub.get_ylim()]
if len(x) == 3: c += [sub.get_zlim()]
r = np.concatenate([c,arange],1)
arange = np.array([np.min(r,1),np.max(r,1)]).T
# Plot
if len(x):
if len(x) == 3:
print(sub)
phandle = sub.plot(xs=x[0],ys=x[1],zs=x[2],**kwargs)
else:
phandle = sub.plot(x[0],x[1],**kwargs)
else:
phandle = None
# Range
if len(arange) > 0:
sub.set_xlim(arange[0])
sub.set_ylim(arange[1])
if len(arange) == 3:
sub.set_zlim(arange[2])
# Tick handling
def tickspace(x,mul):
a = np.ceil(np.min(x) / mul)
b = np.floor(np.max(x) / mul)
return np.arange(a,b+1) * mul
if len(arange) > 0:
if 'xtick' in props: sub.set_xticks(tickspace(arange[0],props['xtick']))
if 'ytick' in props: sub.set_yticks(tickspace(arange[1],props['ytick']))
if len(arange) == 3 and 'ztick' in props:
sub.set_zticks(tickspace(arange[2],props['ztick']))
if getprop('title'): sub.set_title(getprop('title'))
# Set labels
labels = getprop('labels')
if labels:
if labels[0]: sub.set_xlabel(labels[0])
if labels[1]: sub.set_ylabel(labels[1])
if len(labels) == 3 and labels[2]:
sub.set_zlabel(labels[2])
if getprop('axform'):
for n,form in enumerate(getprop('axform')):
if form:
axform(sub,form,n)
if getprop('smallticks'):
ticklabels = (sub.get_xticklabels() + sub.get_yticklabels())
if len(x) == 3: ticklabels += sub.get_zticklabels()
for item in ticklabels:
item.set_fontsize(8)
if getprop('prerender'):
props['prerender'](sub)
plt.tight_layout()
if getprop('pdf'):
plt.savefig(getprop('pdf'), format='pdf', dpi=1000)
if getprop('png'):
plt.savefig(getprop('png'), format='png', dpi=getprop('pngdpi',400))
if getprop('pause'):
plt.ion()
plt.show()
plt.pause(getprop('pause'))
elif (insub is None and not getprop('hold')) or getprop('show',False):
plt.show()
return phandle
| mit |
jdavidrcamacho/Tests_GP | 02 - Programs being tested/06 - spots tests/RV_function.py | 1 | 2458 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 3 11:36:58 2017
@author: camacho
"""
import numpy as np
import matplotlib.pyplot as pl
pl.close("all")
##### RV FUNCTION 1 - circular orbit
def RV_circular(P=365,K=0.1,T=0,gamma=0,time=100,space=20):
#parameters
#P = period in days
#K = semi-amplitude of the signal
#T = velocity at zero phase
#gamma = average velocity of the star
#time = time of the simulation
#space => I want an observation every time/space days
t=np.linspace(0,time,space)
RV=[K*np.sin(2*np.pi*x/P - T) + gamma for x in t]
RV=[x for x in RV] #m/s
return [t,RV]
##### RV FUNCTION 2 - keplerian orbit
def RV_kepler(P=365,e=0,K=0.1,T=0,gamma=0,w=np.pi,time=100,space=20):
#parameters
#P = period in days
#e = eccentricity
#K = RV amplitude
#gamma = constant system RV
#T = zero phase
#w = longitude of the periastron
#time = time of the simulation
#space => I want an observation every time/space days
t=np.linspace(0,time,space)
#mean anomaly
Mean_anom=[2*np.pi*(x1-T)/P for x1 in t]
#eccentric anomaly -> E0=M + e*sin(M) + 0.5*(e**2)*sin(2*M)
E0=[x + e*np.sin(x) + 0.5*(e**2)*np.sin(2*x) for x in Mean_anom]
#mean anomaly -> M0=E0 - e*sin(E0)
M0=[x - e*np.sin(x) for x in E0]
i=0
while i<100:
#[x + y for x, y in zip(first, second)]
calc_aux=[x2-y for x2,y in zip(Mean_anom,M0)]
E1=[x3 + y/(1-e*np.cos(x3)) for x3,y in zip(E0,calc_aux)]
M1=[x4 - e*np.sin(x4) for x4 in E0]
i+=1
E0=E1
M0=M1
nu=[2*np.arctan(np.sqrt((1+e)/(1-e))*np.tan(x5/2)) for x5 in E0]
RV=[ gamma + K*(e*np.cos(w)+np.cos(w+x6)) for x6 in nu]
RV=[x for x in RV] #m/s
return t,RV
#Examples
#a=RV_circular()
#pl.figure('RV_circular with P=365')
#pl.plot(a[0],a[1],':',)
#pl.title('planet of 365 days orbit')
#pl.xlabel('time')
#pl.ylabel('RV (Km/s)')
#b=RV_circular(P=100)
#pl.figure('RV_circular with P=100')
#pl.title('planet of 100 days orbit')
#pl.plot(b[0],b[1],':',)
#pl.xlabel('time')
#pl.ylabel('RV (Km/s)')
#c=RV_kepler(P=100,e=0,w=np.pi,time=100)
#pl.figure()
#pl.plot(c[0],c[1],':',)
#pl.title('P=100, e=0, w=pi, time=100')
#pl.xlabel('time')
#pl.ylabel('RV (Km/s)')
#d=RV_kepler(P=100,e=0, w=np.pi,time=500)
#pl.figure()
#pl.title('P=100, e=0, w=pi, time=25')
#pl.plot(d[0],d[1],'-',)
#pl.xlabel('time')
#pl.ylabel('RV (Km/s)') | mit |
mingwpy/numpy | numpy/doc/creation.py | 118 | 5507 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or structured arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to Numpy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic Numpy Array Creation
==============================
Numpy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard to
convert are those formats supported by libraries like PIL (able to read and
write many image formats such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
from __future__ import division, absolute_import, print_function
| bsd-3-clause |
neale/CS-program | 434-MachineLearning/final_project/linearClassifier/sklearn/covariance/robust_covariance.py | 105 | 29653 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = check_array(X, ensure_min_samples=2, estimator='fast_mcd')
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| unlicense |
dmargala/qusp | examples/fitspec.py | 1 | 8296 | #!/usr/bin/env python
import argparse
import random
import numpy as np
import h5py
import qusp
import bossdata.path
import bossdata.remote
from astropy.io import fits
def main():
# parse command-line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--verbose", action="store_true",
help="print verbose output")
parser.add_argument("-o", "--output", type=str, default=None,
help="hdf5 output filename")
parser.add_argument("--save-model", action="store_true",
help="specify to save raw data of sparse matrix model")
parser.add_argument("--save-chisq", action="store_true",
help="specify to save per obs chisq values")
## targets to fit
parser.add_argument("-i", "--input", type=str, default=None,
help="target list")
parser.add_argument("-n", "--ntargets", type=int, default=0,
help="number of targets to use, 0 for all")
parser.add_argument("--random", action="store_true",
help="use a random selection of input targets")
parser.add_argument("--seed", type=int, default=42,
help="rng seed")
# fit options
parser.add_argument("--sklearn", action="store_true",
help="use sklearn linear regression instead of scipy lstsq")
# scipy specifc options
parser.add_argument("--max-iter", type=int, default=100,
help="max number of iterations to use in lsqr")
parser.add_argument("--atol", type=float, default=1e-4,
help="a stopping tolerance")
parser.add_argument("--btol", type=float, default=1e-8,
help="b stopping tolerance")
# input data columns
parser.add_argument("--z-col", type=int, default=3,
help="redshift column of input targetlist")
parser.add_argument("--sn-col", type=int, default=None,
help="sn column of input targetlist")
parser.add_argument("--norm-col", type=int, default=None,
help="norm param column of input targetlist")
parser.add_argument("--tilt-col", type=int, default=None,
help="tilt param column of input targetlist")
parser.add_argument("--fix-norm", action="store_true",
help="fix norm param")
parser.add_argument("--fix-tilt", action="store_true",
help="fix tilt param")
parser.add_argument("--continuum-file", type=str, default=None,
help="continuum to load")
qusp.Paths.add_args(parser)
qusp.ContinuumModel.add_args(parser)
args = parser.parse_args()
# setup boss data directory path
paths = qusp.Paths(**qusp.Paths.from_args(args))
try:
finder = bossdata.path.Finder()
mirror = bossdata.remote.Manager()
except ValueError as e:
print(e)
return -1
# read target data
fields = [('z', float, args.z_col)]
if args.norm_col is not None:
fields.append(('amp', float, args.norm_col))
if args.tilt_col is not None:
fields.append(('nu', float, args.tilt_col))
if args.sn_col is not None:
fields.append(('sn', float, args.sn_col))
targets = qusp.target.load_target_list(
args.input, fields, verbose=args.verbose)
# use the first n targets or a random sample
ntargets = args.ntargets if args.ntargets > 0 else len(targets)
if args.random:
random.seed(args.seed)
targets = random.sample(targets, ntargets)
else:
targets = targets[:ntargets]
continuum = None
if args.continuum_file:
specfits = h5py.File(args.continuum_file)
wave = specfits['restWaveCenters'].value
flux = specfits['continuum'].value
continuum = qusp.spectrum.SpectralFluxDensity(wave, flux)
# Initialize model
model = qusp.ContinuumModel(continuum=continuum, **qusp.ContinuumModel.from_args(args))
# Add observations to model
model_targets = []
npixels = []
if args.verbose:
print '... adding observations to fit ...\n'
def get_lite_spectra(targets):
for target in targets:
remote_path = finder.get_spec_path(plate=target['plate'], mjd=target['mjd'], fiber=target['fiber'], lite=True)
try:
local_path = mirror.get(remote_path, auto_download=False)
except RuntimeError as e:
print e
continue
spec = fits.open(local_path)
yield target, qusp.spectrum.read_lite_spectrum(spec)
# for target, combined in qusp.target.get_combined_spectra(targets, paths=paths):
for target, combined in get_lite_spectra(targets):
wavelength = combined.wavelength
ivar = combined.ivar.values
flux = combined.flux.values
# fix quasar spectrum normalization
if args.fix_norm:
if not hasattr(target, 'nu'):
# estimate quasar normalization
try:
norm = combined.mean_flux(args.continuum_normmin*(1+target['z']),
args.continuum_normmax*(1+target['z']))
except RuntimeError:
continue
if norm <= 0:
continue
# restframe amplitude
target['amp'] = norm*(1+target['z'])
# fix spectal tilt
if args.fix_tilt:
if not hasattr(target, 'nu'):
target['nu'] = 0
# Add this observation to our model
npixels_added = model.add_observation(
target, flux, wavelength, ivar, unweighted=args.unweighted)
if npixels_added > 0:
model_targets.append(target)
npixels.append(npixels_added)
if args.verbose:
print target, npixels_added
# Add constraints
if args.continuum_normmax > args.continuum_normmin:
model.add_continuum_constraint(
0, args.continuum_normmin, args.continuum_normmax, args.continuum_normweight)
if args.transmission_normmax > args.transmission_normmin:
model.add_transmission_constraint(
0, args.transmission_normmin, args.transmission_normmax, args.transmission_normweight)
if args.tiltweight > 0:
model.add_tilt_constraint(args.tiltweight)
if args.verbose:
print ''
# Construct the model
model_matrix, model_y = model.get_model()
# perform fit
if args.sklearn:
from sklearn import linear_model
regr = linear_model.LinearRegression(fit_intercept=False)
if args.verbose:
print ('... performing fit using '
'sklearn.linear_model.LinearRegression ...\n')
regr.fit(model_matrix, model_y)
soln = regr.coef_
else:
import scipy.sparse.linalg
if args.verbose:
print '... performing fit using scipy.sparse.linalg.lsqr ...\n'
lsqr_soln = scipy.sparse.linalg.lsqr(
model_matrix, model_y, show=args.verbose, iter_lim=args.max_iter,
atol=args.atol, btol=args.btol)
soln = lsqr_soln[0]
chisq = model.get_chisq(soln)
if args.verbose:
print 'chisq (nModelParams,nConstraints): %.2g (%d,%d)' % (
chisq, model.model.shape[1], model.model_nconstraints)
print 'reduced chisq: %.2g' % (
chisq/(model.model.shape[1]-model.model_nconstraints))
# Save HDF5 file with results
outfile = model.save(args.output+'.hdf5', soln, args, args.save_model, args.save_chisq)
outfile.create_dataset('npixels', data=npixels)
outfile.create_dataset(
'targets', data=[target['target'] for target in model_targets])
outfile.create_dataset(
'redshifts', data=[target['z'] for target in model_targets])
try:
mediansn = [target['sn'] for target in model_targets]
except KeyError:
mediansn = np.zeros(len(model_targets))
outfile.create_dataset('sn', data=mediansn)
outfile.close()
# Save target list text file
results = model.get_results(soln)
for index, target in enumerate(model_targets):
target['amp'] = results['amplitude'][index]
target['nu'] = results['nu'][index]
qusp.target.save_target_list(
args.output+'.txt', model_targets, ['z', 'amp', 'nu'],
verbose=args.verbose)
if __name__ == '__main__':
main()
| mit |
abimannans/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
bdh1011/wau | venv/lib/python2.7/site-packages/IPython/core/tests/test_pylabtools.py | 15 | 7752 | """Tests for pylab tools module.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2011, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
from io import UnsupportedOperation, BytesIO
import matplotlib
matplotlib.use('Agg')
from matplotlib.figure import Figure
from nose import SkipTest
import nose.tools as nt
from matplotlib import pyplot as plt
import numpy as np
# Our own imports
from IPython.core.getipython import get_ipython
from IPython.core.interactiveshell import InteractiveShell
from IPython.core.display import _PNG, _JPEG
from .. import pylabtools as pt
from IPython.testing import decorators as dec
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def test_figure_to_svg():
# simple empty-figure test
fig = plt.figure()
nt.assert_equal(pt.print_figure(fig, 'svg'), None)
plt.close('all')
# simple check for at least svg-looking output
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot([1,2,3])
plt.draw()
svg = pt.print_figure(fig, 'svg')[:100].lower()
nt.assert_in(u'doctype svg', svg)
def _check_pil_jpeg_bytes():
"""Skip if PIL can't write JPEGs to BytesIO objects"""
# PIL's JPEG plugin can't write to BytesIO objects
# Pillow fixes this
from PIL import Image
buf = BytesIO()
img = Image.new("RGB", (4,4))
try:
img.save(buf, 'jpeg')
except Exception as e:
ename = e.__class__.__name__
raise SkipTest("PIL can't write JPEG to BytesIO: %s: %s" % (ename, e))
@dec.skip_without("PIL.Image")
def test_figure_to_jpeg():
_check_pil_jpeg_bytes()
# simple check for at least jpeg-looking output
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot([1,2,3])
plt.draw()
jpeg = pt.print_figure(fig, 'jpeg', quality=50)[:100].lower()
assert jpeg.startswith(_JPEG)
def test_retina_figure():
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot([1,2,3])
plt.draw()
png, md = pt.retina_figure(fig)
assert png.startswith(_PNG)
nt.assert_in('width', md)
nt.assert_in('height', md)
_fmt_mime_map = {
'png': 'image/png',
'jpeg': 'image/jpeg',
'pdf': 'application/pdf',
'retina': 'image/png',
'svg': 'image/svg+xml',
}
def test_select_figure_formats_str():
ip = get_ipython()
for fmt, active_mime in _fmt_mime_map.items():
pt.select_figure_formats(ip, fmt)
for mime, f in ip.display_formatter.formatters.items():
if mime == active_mime:
nt.assert_in(Figure, f)
else:
nt.assert_not_in(Figure, f)
def test_select_figure_formats_kwargs():
ip = get_ipython()
kwargs = dict(quality=10, bbox_inches='tight')
pt.select_figure_formats(ip, 'png', **kwargs)
formatter = ip.display_formatter.formatters['image/png']
f = formatter.lookup_by_type(Figure)
cell = f.__closure__[0].cell_contents
nt.assert_equal(cell, kwargs)
# check that the formatter doesn't raise
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot([1,2,3])
plt.draw()
formatter.enabled = True
png = formatter(fig)
assert png.startswith(_PNG)
def test_select_figure_formats_set():
ip = get_ipython()
for fmts in [
{'png', 'svg'},
['png'],
('jpeg', 'pdf', 'retina'),
{'svg'},
]:
active_mimes = {_fmt_mime_map[fmt] for fmt in fmts}
pt.select_figure_formats(ip, fmts)
for mime, f in ip.display_formatter.formatters.items():
if mime in active_mimes:
nt.assert_in(Figure, f)
else:
nt.assert_not_in(Figure, f)
def test_select_figure_formats_bad():
ip = get_ipython()
with nt.assert_raises(ValueError):
pt.select_figure_formats(ip, 'foo')
with nt.assert_raises(ValueError):
pt.select_figure_formats(ip, {'png', 'foo'})
with nt.assert_raises(ValueError):
pt.select_figure_formats(ip, ['retina', 'pdf', 'bar', 'bad'])
def test_import_pylab():
ns = {}
pt.import_pylab(ns, import_all=False)
nt.assert_true('plt' in ns)
nt.assert_equal(ns['np'], np)
class TestPylabSwitch(object):
class Shell(InteractiveShell):
def enable_gui(self, gui):
pass
def setup(self):
import matplotlib
def act_mpl(backend):
matplotlib.rcParams['backend'] = backend
# Save rcParams since they get modified
self._saved_rcParams = matplotlib.rcParams
self._saved_rcParamsOrig = matplotlib.rcParamsOrig
matplotlib.rcParams = dict(backend='Qt4Agg')
matplotlib.rcParamsOrig = dict(backend='Qt4Agg')
# Mock out functions
self._save_am = pt.activate_matplotlib
pt.activate_matplotlib = act_mpl
self._save_ip = pt.import_pylab
pt.import_pylab = lambda *a,**kw:None
self._save_cis = pt.configure_inline_support
pt.configure_inline_support = lambda *a,**kw:None
def teardown(self):
pt.activate_matplotlib = self._save_am
pt.import_pylab = self._save_ip
pt.configure_inline_support = self._save_cis
import matplotlib
matplotlib.rcParams = self._saved_rcParams
matplotlib.rcParamsOrig = self._saved_rcParamsOrig
def test_qt(self):
s = self.Shell()
gui, backend = s.enable_matplotlib(None)
nt.assert_equal(gui, 'qt')
nt.assert_equal(s.pylab_gui_select, 'qt')
gui, backend = s.enable_matplotlib('inline')
nt.assert_equal(gui, 'inline')
nt.assert_equal(s.pylab_gui_select, 'qt')
gui, backend = s.enable_matplotlib('qt')
nt.assert_equal(gui, 'qt')
nt.assert_equal(s.pylab_gui_select, 'qt')
gui, backend = s.enable_matplotlib('inline')
nt.assert_equal(gui, 'inline')
nt.assert_equal(s.pylab_gui_select, 'qt')
gui, backend = s.enable_matplotlib()
nt.assert_equal(gui, 'qt')
nt.assert_equal(s.pylab_gui_select, 'qt')
def test_inline(self):
s = self.Shell()
gui, backend = s.enable_matplotlib('inline')
nt.assert_equal(gui, 'inline')
nt.assert_equal(s.pylab_gui_select, None)
gui, backend = s.enable_matplotlib('inline')
nt.assert_equal(gui, 'inline')
nt.assert_equal(s.pylab_gui_select, None)
gui, backend = s.enable_matplotlib('qt')
nt.assert_equal(gui, 'qt')
nt.assert_equal(s.pylab_gui_select, 'qt')
def test_qt_gtk(self):
s = self.Shell()
gui, backend = s.enable_matplotlib('qt')
nt.assert_equal(gui, 'qt')
nt.assert_equal(s.pylab_gui_select, 'qt')
gui, backend = s.enable_matplotlib('gtk')
nt.assert_equal(gui, 'qt')
nt.assert_equal(s.pylab_gui_select, 'qt')
| mit |
wdm0006/categorical_encoding | category_encoders/cat_boost.py | 1 | 12465 | """CatBoost coding"""
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
import category_encoders.utils as util
from sklearn.utils.random import check_random_state
__author__ = 'Jan Motl'
class CatBoostEncoder(BaseEstimator, TransformerMixin):
"""CatBoost coding for categorical features.
This is very similar to leave-one-out encoding, but calculates the
values "on-the-fly". Consequently, the values naturally vary
during the training phase and it is not necessary to add random noise.
Beware, the training data have to be randomly permutated. E.g.:
# Random permutation
perm = np.random.permutation(len(X))
X = X.iloc[perm].reset_index(drop=True)
y = y.iloc[perm].reset_index(drop=True)
This is necessary because some data sets are sorted based on the target
value and this coder encodes the features on-the-fly in a single pass.
Parameters
----------
verbose: int
integer indicating verbosity of the output. 0 for none.
cols: list
a list of columns to encode, if None, all string columns will be encoded.
drop_invariant: bool
boolean for whether or not to drop columns with 0 variance.
return_df: bool
boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array).
handle_missing: str
options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean.
handle_unknown: str
options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean.
sigma: float
adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched).
sigma gives the standard deviation (spread or "width") of the normal distribution.
a: float
additive smoothing (it is the same variable as "m" in m-probability estimate). By default set to 1.
Example
-------
>>> from category_encoders import *
>>> import pandas as pd
>>> from sklearn.datasets import load_boston
>>> bunch = load_boston()
>>> y = bunch.target
>>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names)
>>> enc = CatBoostEncoder(cols=['CHAS', 'RAD']).fit(X, y)
>>> numeric_dataset = enc.transform(X)
>>> print(numeric_dataset.info())
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 506 entries, 0 to 505
Data columns (total 13 columns):
CRIM 506 non-null float64
ZN 506 non-null float64
INDUS 506 non-null float64
CHAS 506 non-null float64
NOX 506 non-null float64
RM 506 non-null float64
AGE 506 non-null float64
DIS 506 non-null float64
RAD 506 non-null float64
TAX 506 non-null float64
PTRATIO 506 non-null float64
B 506 non-null float64
LSTAT 506 non-null float64
dtypes: float64(13)
memory usage: 51.5 KB
None
References
----------
.. [1] Transforming categorical features to numerical features, from
https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/
.. [2] CatBoost: unbiased boosting with categorical features, from
https://arxiv.org/abs/1706.09516
"""
def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True,
handle_unknown='value', handle_missing='value', random_state=None, sigma=None, a=1):
self.return_df = return_df
self.drop_invariant = drop_invariant
self.drop_cols = []
self.verbose = verbose
self.use_default_cols = cols is None # if True, even a repeated call of fit() will select string columns from X
self.cols = cols
self._dim = None
self.mapping = None
self.handle_unknown = handle_unknown
self.handle_missing = handle_missing
self._mean = None
self.random_state = random_state
self.sigma = sigma
self.feature_names = None
self.a = a
def fit(self, X, y, **kwargs):
"""Fit encoder according to X and y.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : encoder
Returns self.
"""
# unite the input into pandas types
X = util.convert_input(X)
y = util.convert_input_vector(y, X.index).astype(float)
if X.shape[0] != y.shape[0]:
raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".")
self._dim = X.shape[1]
# if columns aren't passed, just use every string column
if self.use_default_cols:
self.cols = util.get_obj_cols(X)
else:
self.cols = util.convert_cols_to_list(self.cols)
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
categories = self._fit(
X, y,
cols=self.cols
)
self.mapping = categories
X_temp = self.transform(X, y, override_return_df=True)
self.feature_names = X_temp.columns.tolist()
if self.drop_invariant:
self.drop_cols = []
generated_cols = util.get_generated_cols(X, X_temp, self.cols)
self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5]
try:
[self.feature_names.remove(x) for x in self.drop_cols]
except KeyError as e:
if self.verbose > 0:
print("Could not remove column from feature names."
"Not found in generated cols.\n{}".format(e))
return self
def transform(self, X, y=None, override_return_df=False):
"""Perform the transformation to new categorical data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
y : array-like, shape = [n_samples] when transform by leave one out
None, when transform without target information (such as transform test set)
Returns
-------
p : array, shape = [n_samples, n_numeric + N]
Transformed values with encoding applied.
"""
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
if self._dim is None:
raise ValueError('Must train encoder before it can be used to transform data.')
# unite the input into pandas types
X = util.convert_input(X)
# then make sure that it is the right size
if X.shape[1] != self._dim:
raise ValueError('Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim,))
# if we are encoding the training data, we have to check the target
if y is not None:
y = util.convert_input_vector(y, X.index).astype(float)
if X.shape[0] != y.shape[0]:
raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".")
if not list(self.cols):
return X
X = self._transform(
X, y,
mapping=self.mapping
)
if self.drop_invariant:
for col in self.drop_cols:
X.drop(col, 1, inplace=True)
if self.return_df or override_return_df:
return X
else:
return X.values
def fit_transform(self, X, y=None, **fit_params):
"""
Encoders that utilize the target must make sure that the training data are transformed with:
transform(X, y)
and not with:
transform(X)
"""
# the interface requires 'y=None' in the signature but we need 'y'
if y is None:
raise(TypeError, 'fit_transform() missing argument: ''y''')
return self.fit(X, y, **fit_params).transform(X, y)
def _fit(self, X_in, y, cols=None):
X = X_in.copy(deep=True)
if cols is None:
cols = X.columns.values
self._mean = y.mean()
return {col: self._fit_column_map(X[col], y) for col in cols}
def _fit_column_map(self, series, y):
category = pd.Categorical(series)
categories = category.categories
codes = category.codes.copy()
codes[codes == -1] = len(categories)
categories = np.append(categories, np.nan)
return_map = pd.Series(dict([(code, category) for code, category in enumerate(categories)]))
result = y.groupby(codes).agg(['sum', 'count'])
return result.rename(return_map)
def _transform(self, X_in, y, mapping=None):
"""
The model uses a single column of floats to represent the means of the target variables.
"""
X = X_in.copy(deep=True)
random_state_ = check_random_state(self.random_state)
# Prepare the data
if y is not None:
# Convert bools to numbers (the target must be summable)
y = y.astype('double')
for col, colmap in mapping.items():
level_notunique = colmap['count'] > 1
unique_train = colmap.index
unseen_values = pd.Series([x for x in X_in[col].unique() if x not in unique_train])
is_nan = X_in[col].isnull()
is_unknown_value = X_in[col].isin(unseen_values.dropna().astype(object))
if self.handle_unknown == 'error' and is_unknown_value.any():
raise ValueError('Columns to be encoded can not contain new values')
if y is None: # Replace level with its mean target; if level occurs only once, use global mean
level_means = ((colmap['sum'] + self._mean) / (colmap['count'] + self.a)).where(level_notunique, self._mean)
X[col] = X[col].map(level_means)
else:
# Simulation of CatBoost implementation, which calculates leave-one-out on the fly.
# The nice thing about this is that it helps to prevent overfitting. The bad thing
# is that CatBoost uses many iterations over the data. But we run just one iteration.
# Still, it works better than leave-one-out without any noise.
# See:
# https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/
# Cumsum does not work nicely with None (while cumcount does).
# As a workaround, we cast the grouping column as string.
# See: issue #209
temp = y.groupby(X[col].astype(str)).agg(['cumsum', 'cumcount'])
X[col] = (temp['cumsum'] - y + self._mean) / (temp['cumcount'] + self.a)
if self.handle_unknown == 'value':
if X[col].dtype.name == 'category':
X[col] = X[col].astype(float)
X.loc[is_unknown_value, col] = self._mean
elif self.handle_unknown == 'return_nan':
X.loc[is_unknown_value, col] = np.nan
if self.handle_missing == 'value':
X.loc[is_nan & unseen_values.isnull().any(), col] = self._mean
elif self.handle_missing == 'return_nan':
X.loc[is_nan, col] = np.nan
if self.sigma is not None and y is not None:
X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0])
return X
def get_feature_names(self):
"""
Returns the names of all transformed / added columns.
Returns
-------
feature_names: list
A list with all feature names transformed or added.
Note: potentially dropped features are not included!
"""
if not isinstance(self.feature_names, list):
raise ValueError('Must fit data first. Affected feature names are not known before.')
else:
return self.feature_names
| bsd-3-clause |