repo_name
stringlengths 6
96
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 762
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
cuiwei0322/cost_analysis | tall_building_zero_attack_angle_cost_analysis/Result/peak_ng.py | 1 | 2728 | import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
from matplotlib import cm
from matplotlib import pyplot as plt
from itertools import product, combinations
from matplotlib import rc
from matplotlib.font_manager import FontProperties
font_size = 8
rc('font',**{'family':'serif','serif':['Times New Roman'],'size':font_size})
step = 0.04
maxval = 1.0
fig = plt.figure(num=1, figsize=(2.9,2.4), dpi=300, facecolor='w', edgecolor='k')
ax = fig.add_subplot(111, projection='3d')
# create supporting points in polar coordinates
r = np.linspace(0,0.8,40)
p = np.linspace(0,2*np.pi,60)
R,P = np.meshgrid(r,p)
# transform them to cartesian system
X,Y = R*np.cos(P),R*np.sin(P)
mu_x = 0.3656;
sigma_x = 0.1596;
sigma_y = 0.1964;
Z = np.exp(-(X - mu_x)**2/(2*sigma_x**2) - (Y)**2/(2*sigma_y**2)) / (2*np.pi*sigma_x*sigma_y)
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, alpha = 0.7)
cset = ax.contourf(X, Y, Z, zdir='z', offset=-1, cmap=cm.coolwarm)
# cset = ax.contourf(X, Y, Z, zdir='x', offset=0.7, cmap=cm.coolwarm, alpha = 0.5)
# cset = ax.contourf(X, Y, Z, zdir='y', offset=0.8, cmap=cm.coolwarm, alpha = 0.5)
theta = np.linspace(0, 2 * np.pi, 100)
r = 0.3
x = r * np.sin(theta)
y = r * np.cos(theta)
z = np.exp(-(x - mu_x)**2/(2*sigma_x**2) - (y)**2/(2*sigma_y**2)) / (2*np.pi*sigma_x*sigma_y)
Z = -1
ax.plot(x, y, z, '-b',zorder = 10,linewidth = 0.5,label = 'Joint PDF on integral path')
ax.plot(x, y, Z, '-.b',zorder = 9,linewidth = 0.5,label = 'Integral path')
#draw a arrow
r = 0.3
Z = -1
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
a = Arrow3D([0,r*np.cos(5/4*np.pi)],[0,r*np.sin(5/4*np.pi)],[Z,Z], mutation_scale=4, lw=0.5, arrowstyle="-|>", color="k")
ax.add_artist(a)
rt = 0.3
ax.text(rt*np.cos(4/4*np.pi)+0.1, rt*np.sin(4/4*np.pi)+0.05, Z, 'r', None)
#done with arrow
# legend
# fontP = FontProperties()
# fontP.set_size('small')
legend = ax.legend(loc='upper center',shadow=False,handlelength = 3.8,prop={'size':font_size})
#
ax.view_init(elev=30, azim=-110)
ax.set_zlim3d(-1, 5)
ax.set_xlabel(r'$r_x$', fontsize = font_size)
ax.set_ylabel(r'$r_y$',fontsize = font_size)
ax.set_zlabel(r'Joint PDF,$f(r_x,r_y)$')
plt.tight_layout()
# plt.show()
plt.savefig("peak_ng.pdf") | apache-2.0 |
vybstat/scikit-learn | sklearn/utils/validation.py | 30 | 24618 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from inspect import getargspec
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
warnings.warn(
"Passing 1d arrays as data is deprecated in 0.17 and will"
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample.",
DeprecationWarning)
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we acually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s"
% (dtype_orig, array.dtype))
if estimator is not None:
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
msg += " by %s" % estimator
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
If the dtype of X is object, attempt converting to float,
raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in getargspec(estimator.fit)[0]
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
| bsd-3-clause |
michigraber/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 225 | 5719 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', fontsize='small')
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/groupby/aggregate/test_cython.py | 2 | 6848 | """
test cython .agg behavior
"""
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, NaT, Series, Timedelta, Timestamp, bdate_range
from pandas.core.groupby.groupby import DataError
import pandas.util.testing as tm
@pytest.mark.parametrize(
"op_name",
[
"count",
"sum",
"std",
"var",
"sem",
"mean",
pytest.param(
"median",
# ignore mean of empty slice
# and all-NaN
marks=[pytest.mark.filterwarnings("ignore::RuntimeWarning")],
),
"prod",
"min",
"max",
],
)
def test_cythonized_aggers(op_name):
data = {
"A": [0, 0, 0, 0, 1, 1, 1, 1, 1, 1.0, np.nan, np.nan],
"B": ["A", "B"] * 6,
"C": np.random.randn(12),
}
df = DataFrame(data)
df.loc[2:10:2, "C"] = np.nan
op = lambda x: getattr(x, op_name)()
# single column
grouped = df.drop(["B"], axis=1).groupby("A")
exp = {cat: op(group["C"]) for cat, group in grouped}
exp = DataFrame({"C": exp})
exp.index.name = "A"
result = op(grouped)
tm.assert_frame_equal(result, exp)
# multiple columns
grouped = df.groupby(["A", "B"])
expd = {}
for (cat1, cat2), group in grouped:
expd.setdefault(cat1, {})[cat2] = op(group["C"])
exp = DataFrame(expd).T.stack(dropna=False)
exp.index.names = ["A", "B"]
exp.name = "C"
result = op(grouped)["C"]
if op_name in ["sum", "prod"]:
tm.assert_series_equal(result, exp)
def test_cython_agg_boolean():
frame = DataFrame(
{
"a": np.random.randint(0, 5, 50),
"b": np.random.randint(0, 2, 50).astype("bool"),
}
)
result = frame.groupby("a")["b"].mean()
expected = frame.groupby("a")["b"].agg(np.mean)
tm.assert_series_equal(result, expected)
def test_cython_agg_nothing_to_agg():
frame = DataFrame({"a": np.random.randint(0, 5, 50), "b": ["foo", "bar"] * 25})
msg = "No numeric types to aggregate"
with pytest.raises(DataError, match=msg):
frame.groupby("a")["b"].mean()
frame = DataFrame({"a": np.random.randint(0, 5, 50), "b": ["foo", "bar"] * 25})
with pytest.raises(DataError, match=msg):
frame[["b"]].groupby(frame["a"]).mean()
def test_cython_agg_nothing_to_agg_with_dates():
frame = DataFrame(
{
"a": np.random.randint(0, 5, 50),
"b": ["foo", "bar"] * 25,
"dates": pd.date_range("now", periods=50, freq="T"),
}
)
msg = "No numeric types to aggregate"
with pytest.raises(DataError, match=msg):
frame.groupby("b").dates.mean()
def test_cython_agg_frame_columns():
# #2113
df = DataFrame({"x": [1, 2, 3], "y": [3, 4, 5]})
df.groupby(level=0, axis="columns").mean()
df.groupby(level=0, axis="columns").mean()
df.groupby(level=0, axis="columns").mean()
df.groupby(level=0, axis="columns").mean()
def test_cython_agg_return_dict():
# GH 16741
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "three", "two", "two", "one", "three"],
"C": np.random.randn(8),
"D": np.random.randn(8),
}
)
ts = df.groupby("A")["B"].agg(lambda x: x.value_counts().to_dict())
expected = Series(
[{"two": 1, "one": 1, "three": 1}, {"two": 2, "one": 2, "three": 1}],
index=Index(["bar", "foo"], name="A"),
name="B",
)
tm.assert_series_equal(ts, expected)
def test_cython_fail_agg():
dr = bdate_range("1/1/2000", periods=50)
ts = Series(["A", "B", "C", "D", "E"] * 10, index=dr)
grouped = ts.groupby(lambda x: x.month)
summed = grouped.sum()
expected = grouped.agg(np.sum)
tm.assert_series_equal(summed, expected)
@pytest.mark.parametrize(
"op, targop",
[
("mean", np.mean),
("median", np.median),
("var", np.var),
("add", np.sum),
("prod", np.prod),
("min", np.min),
("max", np.max),
("first", lambda x: x.iloc[0]),
("last", lambda x: x.iloc[-1]),
],
)
def test__cython_agg_general(op, targop):
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
result = df.groupby(labels)._cython_agg_general(op)
expected = df.groupby(labels).agg(targop)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"op, targop",
[
("mean", np.mean),
("median", lambda x: np.median(x) if len(x) > 0 else np.nan),
("var", lambda x: np.var(x, ddof=1)),
("min", np.min),
("max", np.max),
],
)
def test_cython_agg_empty_buckets(op, targop, observed):
df = pd.DataFrame([11, 12, 13])
grps = range(0, 55, 5)
# calling _cython_agg_general directly, instead of via the user API
# which sets different values for min_count, so do that here.
g = df.groupby(pd.cut(df[0], grps), observed=observed)
result = g._cython_agg_general(op)
g = df.groupby(pd.cut(df[0], grps), observed=observed)
expected = g.agg(lambda x: targop(x))
tm.assert_frame_equal(result, expected)
def test_cython_agg_empty_buckets_nanops(observed):
# GH-18869 can't call nanops on empty groups, so hardcode expected
# for these
df = pd.DataFrame([11, 12, 13], columns=["a"])
grps = range(0, 25, 5)
# add / sum
result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general(
"add"
)
intervals = pd.interval_range(0, 20, freq=5)
expected = pd.DataFrame(
{"a": [0, 0, 36, 0]},
index=pd.CategoricalIndex(intervals, name="a", ordered=True),
)
if observed:
expected = expected[expected.a != 0]
tm.assert_frame_equal(result, expected)
# prod
result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general(
"prod"
)
expected = pd.DataFrame(
{"a": [1, 1, 1716, 1]},
index=pd.CategoricalIndex(intervals, name="a", ordered=True),
)
if observed:
expected = expected[expected.a != 1]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("op", ["first", "last", "max", "min"])
@pytest.mark.parametrize(
"data", [Timestamp("2016-10-14 21:00:44.557"), Timedelta("17088 days 21:00:44.557")]
)
def test_cython_with_timestamp_and_nat(op, data):
# https://github.com/pandas-dev/pandas/issues/19526
df = DataFrame({"a": [0, 1], "b": [data, NaT]})
index = Index([0, 1], name="a")
# We will group by a and test the cython aggregations
expected = DataFrame({"b": [data, NaT]}, index=index)
result = df.groupby("a").aggregate(op)
tm.assert_frame_equal(expected, result)
| apache-2.0 |
indigowhale33/Digit-Recognizer | mnist992.py | 1 | 1289 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 8 12:34:27 2016
@author: Steve Cho
"""
import pandas as pd
df_wine= pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data', header = None)
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
X, y = df_wine.iloc[:,1:].values, df_wine.iloc[:,0].values
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3, random_state=0)
sc= StandardScaler()
X_train_std = sc.fit_transform(X_train)
X_test_std = sc.transform(X_test)
from sklearn.decomposition import PCA
pca = PCA(n_components = len(X.T))
X_train_pca_sklearn = pca.fit_transform(X_train_std)
variance_retained=0.9
cum_VE=0
i=1
while cum_VE < variance_retained:
i=i+1
cum_VE = sum(pca.explained_variance_ratio_[0:i])
npcs=i
print ("Use", npcs, "principal components to retain ", variance_retained*100, "% of the variance")
pca = PCA(n_components = npcs)
X_train_pca = pca.fit_transform(X_train_std)
X_test_pca = pca.transform(X_test_std)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors = 5, p=2, metric='minkowski')
knn.fit(X_train_pca, y_train)
y_pred = knn.predict(X_test_pca) | gpl-3.0 |
pford68/nupic.research | union_pooling/union_pooling/activation/excite_functions/excite_functions_all.py | 2 | 3781 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
import matplotlib.pyplot as plt
from excite_function_base import ExciteFunctionBase
class LogisticExciteFunction(ExciteFunctionBase):
"""
Implementation of a logistic activation function for activation updating.
Specifically, the function has the following form:
f(x) = (maxValue - minValue) / (1 + exp(-steepness * (x - xMidpoint) ) ) + minValue
Note: The excitation rate is linear. The activation function is
logistic.
"""
def __init__(self, xMidpoint=5, minValue=10, maxValue=20, steepness=1):
"""
@param xMidpoint: Controls where function output is half of 'maxValue,'
i.e. f(xMidpoint) = maxValue / 2
@param minValue: Minimum value of the function
@param maxValue: Controls the maximum value of the function's range
@param steepness: Controls the steepness of the "middle" part of the
curve where output values begin changing rapidly.
Must be a non-zero value.
"""
assert steepness != 0
self._xMidpoint = xMidpoint
self._maxValue = maxValue
self._minValue = minValue
self._steepness = steepness
def excite(self, currentActivation, inputs):
"""
Increases current activation by amount.
@param currentActivation (numpy array) Current activation levels for each cell
@param inputs (numpy array) inputs for each cell
"""
currentActivation = self._minValue + (self._maxValue - self._minValue) / \
(1 + numpy.exp(-self._steepness * (inputs - self._xMidpoint)))
return currentActivation
def plot(self):
"""
plot the activation function
"""
plt.ion()
plt.show()
x = numpy.linspace(0, 15, 100)
y = numpy.zeros(x.shape)
y = self.excite(y, x)
plt.plot(x, y)
plt.xlabel('Input')
plt.ylabel('Persistence')
plt.title('Sigmoid Activation Function')
class FixedExciteFunction(ExciteFunctionBase):
"""
Implementation of a simple fixed excite function
The function reset the activation level to a fixed amount
"""
def __init__(self, targetExcLevel=10.0):
"""
"""
self._targetExcLevel = targetExcLevel
def excite(self, currentActivation, inputs):
"""
Increases current activation by a fixed amount.
@param currentActivation (numpy array) Current activation levels for each cell
@param inputs (numpy array) inputs for each cell
"""
currentActivation = self._targetExcLevel
return currentActivation
def plot(self):
"""
plot the activation function
"""
plt.ion()
plt.show()
x = numpy.linspace(0, 15, 100)
y = numpy.zeros(x.shape)
y = self.excite(y, x)
plt.plot(x, y)
plt.xlabel('Input')
plt.ylabel('Persistence')
| gpl-3.0 |
OshynSong/scikit-learn | examples/feature_selection/plot_feature_selection.py | 249 | 2827 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='b')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
mblue9/tools-iuc | tools/vsnp/vsnp_add_zero_coverage.py | 2 | 8655 | #!/usr/bin/env python
import argparse
import multiprocessing
import os
import queue
import re
import shutil
import pandas
import pysam
from Bio import SeqIO
INPUT_BAM_DIR = 'input_bam_dir'
INPUT_VCF_DIR = 'input_vcf_dir'
OUTPUT_VCF_DIR = 'output_vcf_dir'
OUTPUT_METRICS_DIR = 'output_metrics_dir'
def get_base_file_name(file_path):
base_file_name = os.path.basename(file_path)
if base_file_name.find(".") > 0:
# Eliminate the extension.
return os.path.splitext(base_file_name)[0]
elif base_file_name.endswith("_vcf"):
# The "." character has likely
# changed to an "_" character.
return base_file_name.rstrip("_vcf")
return base_file_name
def get_coverage_and_snp_count(task_queue, reference, output_metrics, output_vcf, timeout):
while True:
try:
tup = task_queue.get(block=True, timeout=timeout)
except queue.Empty:
break
bam_file, vcf_file = tup
# Create a coverage dictionary.
coverage_dict = {}
coverage_list = pysam.depth(bam_file, split_lines=True)
for line in coverage_list:
chrom, position, depth = line.split('\t')
coverage_dict["%s-%s" % (chrom, position)] = depth
# Convert it to a data frame.
coverage_df = pandas.DataFrame.from_dict(coverage_dict, orient='index', columns=["depth"])
# Create a zero coverage dictionary.
zero_dict = {}
for record in SeqIO.parse(reference, "fasta"):
chrom = record.id
total_len = len(record.seq)
for pos in list(range(1, total_len + 1)):
zero_dict["%s-%s" % (str(chrom), str(pos))] = 0
# Convert it to a data frame with depth_x
# and depth_y columns - index is NaN.
zero_df = pandas.DataFrame.from_dict(zero_dict, orient='index', columns=["depth"])
coverage_df = zero_df.merge(coverage_df, left_index=True, right_index=True, how='outer')
# depth_x "0" column no longer needed.
coverage_df = coverage_df.drop(columns=['depth_x'])
coverage_df = coverage_df.rename(columns={'depth_y': 'depth'})
# Covert the NaN to 0 coverage and get some metrics.
coverage_df = coverage_df.fillna(0)
coverage_df['depth'] = coverage_df['depth'].apply(int)
total_length = len(coverage_df)
average_coverage = coverage_df['depth'].mean()
zero_df = coverage_df[coverage_df['depth'] == 0]
total_zero_coverage = len(zero_df)
total_coverage = total_length - total_zero_coverage
genome_coverage = "{:.2%}".format(total_coverage / total_length)
# Process the associated VCF input.
column_names = ["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT", "Sample"]
vcf_df = pandas.read_csv(vcf_file, sep='\t', header=None, names=column_names, comment='#')
good_snp_count = len(vcf_df[(vcf_df['ALT'].str.len() == 1) & (vcf_df['REF'].str.len() == 1) & (vcf_df['QUAL'] > 150)])
base_file_name = get_base_file_name(vcf_file)
if total_zero_coverage > 0:
header_file = "%s_header.csv" % base_file_name
with open(header_file, 'w') as outfile:
with open(vcf_file) as infile:
for line in infile:
if re.search('^#', line):
outfile.write("%s" % line)
vcf_df_snp = vcf_df[vcf_df['REF'].str.len() == 1]
vcf_df_snp = vcf_df_snp[vcf_df_snp['ALT'].str.len() == 1]
vcf_df_snp['ABS_VALUE'] = vcf_df_snp['CHROM'].map(str) + "-" + vcf_df_snp['POS'].map(str)
vcf_df_snp = vcf_df_snp.set_index('ABS_VALUE')
cat_df = pandas.concat([vcf_df_snp, zero_df], axis=1, sort=False)
cat_df = cat_df.drop(columns=['CHROM', 'POS', 'depth'])
cat_df[['ID', 'ALT', 'QUAL', 'FILTER', 'INFO']] = cat_df[['ID', 'ALT', 'QUAL', 'FILTER', 'INFO']].fillna('.')
cat_df['REF'] = cat_df['REF'].fillna('N')
cat_df['FORMAT'] = cat_df['FORMAT'].fillna('GT')
cat_df['Sample'] = cat_df['Sample'].fillna('./.')
cat_df['temp'] = cat_df.index.str.rsplit('-', n=1)
cat_df[['CHROM', 'POS']] = pandas.DataFrame(cat_df.temp.values.tolist(), index=cat_df.index)
cat_df = cat_df[['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT', 'Sample']]
cat_df['POS'] = cat_df['POS'].astype(int)
cat_df = cat_df.sort_values(['CHROM', 'POS'])
body_file = "%s_body.csv" % base_file_name
cat_df.to_csv(body_file, sep='\t', header=False, index=False)
if output_vcf is None:
output_vcf_file = os.path.join(OUTPUT_VCF_DIR, "%s.vcf" % base_file_name)
else:
output_vcf_file = output_vcf
with open(output_vcf_file, "w") as outfile:
for cf in [header_file, body_file]:
with open(cf, "r") as infile:
for line in infile:
outfile.write("%s" % line)
else:
if output_vcf is None:
output_vcf_file = os.path.join(OUTPUT_VCF_DIR, "%s.vcf" % base_file_name)
else:
output_vcf_file = output_vcf
shutil.copyfile(vcf_file, output_vcf_file)
bam_metrics = [base_file_name, "", "%4f" % average_coverage, genome_coverage]
vcf_metrics = [base_file_name, str(good_snp_count), "", ""]
if output_metrics is None:
output_metrics_file = os.path.join(OUTPUT_METRICS_DIR, "%s.tabular" % base_file_name)
else:
output_metrics_file = output_metrics
metrics_columns = ["File", "Number of Good SNPs", "Average Coverage", "Genome Coverage"]
with open(output_metrics_file, "w") as fh:
fh.write("# %s\n" % "\t".join(metrics_columns))
fh.write("%s\n" % "\t".join(bam_metrics))
fh.write("%s\n" % "\t".join(vcf_metrics))
task_queue.task_done()
def set_num_cpus(num_files, processes):
num_cpus = int(multiprocessing.cpu_count())
if num_files < num_cpus and num_files < processes:
return num_files
if num_cpus < processes:
half_cpus = int(num_cpus / 2)
if num_files < half_cpus:
return num_files
return half_cpus
return processes
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--output_metrics', action='store', dest='output_metrics', required=False, default=None, help='Output metrics text file')
parser.add_argument('--output_vcf', action='store', dest='output_vcf', required=False, default=None, help='Output VCF file')
parser.add_argument('--reference', action='store', dest='reference', help='Reference dataset')
parser.add_argument('--processes', action='store', dest='processes', type=int, help='User-selected number of processes to use for job splitting')
args = parser.parse_args()
# The assumption here is that the list of files
# in both INPUT_BAM_DIR and INPUT_VCF_DIR are
# equal in number and named such that they are
# properly matched if the directories contain
# more than 1 file (i.e., hopefully the bam file
# names and vcf file names will be something like
# Mbovis-01D6_* so they can be # sorted and properly
# associated with each other).
bam_files = []
for file_name in sorted(os.listdir(INPUT_BAM_DIR)):
file_path = os.path.abspath(os.path.join(INPUT_BAM_DIR, file_name))
bam_files.append(file_path)
vcf_files = []
for file_name in sorted(os.listdir(INPUT_VCF_DIR)):
file_path = os.path.abspath(os.path.join(INPUT_VCF_DIR, file_name))
vcf_files.append(file_path)
multiprocessing.set_start_method('spawn')
queue1 = multiprocessing.JoinableQueue()
num_files = len(bam_files)
cpus = set_num_cpus(num_files, args.processes)
# Set a timeout for get()s in the queue.
timeout = 0.05
# Add each associated bam and vcf file pair to the queue.
for i, bam_file in enumerate(bam_files):
vcf_file = vcf_files[i]
queue1.put((bam_file, vcf_file))
# Complete the get_coverage_and_snp_count task.
processes = [multiprocessing.Process(target=get_coverage_and_snp_count, args=(queue1, args.reference, args.output_metrics, args.output_vcf, timeout, )) for _ in range(cpus)]
for p in processes:
p.start()
for p in processes:
p.join()
queue1.join()
if queue1.empty():
queue1.close()
queue1.join_thread()
| mit |
almarklein/scikit-image | viewer_examples/plugins/watershed_demo.py | 4 | 1276 | import matplotlib.pyplot as plt
from skimage import data
from skimage import filter
from skimage import morphology
from skimage.viewer import ImageViewer
from skimage.viewer.widgets import history
from skimage.viewer.plugins.labelplugin import LabelPainter
class OKCancelButtons(history.OKCancelButtons):
def update_original_image(self):
# OKCancelButtons updates the original image with the filtered image
# by default. Override this method to update the overlay.
self.plugin._show_watershed()
self.plugin.close()
class WatershedPlugin(LabelPainter):
def help(self):
helpstr = ("Watershed plugin",
"----------------",
"Use mouse to paint each region with a different label.",
"Press OK to display segmented image.")
return '\n'.join(helpstr)
def _show_watershed(self):
viewer = self.image_viewer
edge_image = filter.sobel(viewer.image)
labels = morphology.watershed(edge_image, self.paint_tool.overlay)
viewer.ax.imshow(labels, cmap=plt.cm.jet, alpha=0.5)
viewer.redraw()
image = data.coins()
plugin = WatershedPlugin()
plugin += OKCancelButtons()
viewer = ImageViewer(image)
viewer += plugin
viewer.show()
| bsd-3-clause |
sandipde/bokehplot | plot-server/main.py | 1 | 4359 | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from copy import copy
from bokeh.core.properties import field
from bokeh.io import curdoc
from bokeh.layouts import layout,column,row
from bokeh.models.layouts import HBox
from bokeh.models import (
ColumnDataSource, HoverTool, SingleIntervalTicker, Slider, Button, Label,
CategoricalColorMapper,
)
from bokeh.models.widgets import Panel, Tabs
from bokeh.models import ColumnDataSource, CustomJS, Rect,Spacer
from bokeh.models import HoverTool,TapTool,FixedTicker,Circle
from bokeh.models import BoxSelectTool, LassoSelectTool
from bokeh.models.mappers import LinearColorMapper
from bokeh.plotting import figure
from bokeh.layouts import row, widgetbox
from bokeh.models import Select
from cosmo import create_plot
#from data import process_data
from os.path import dirname, join
def selected_point(data,xcol,ycol,indx):
xval=copy(data[indx,xcol])
yval=copy(data[indx,ycol])
return xval,yval
def animate_update():
global indx,n
indx = slider.value + 1
if indx > (n-1):
indx = 0
slider.value = indx
#def slider_update(attrname, old, new):
# year = slider.value
# label.text = str(year)
# source.data = data[year]
#slider = Slider(start=years[0], end=years[-1], value=years[0], step=1, title="Year")
#def slider_callback2(src=datasrc,source=s2, window=None):
def slider_update(attrname, old, new):
global indx,s2
col_dict={"cv1":0,"cv2": 1,"index": 2,"energy": 3}
indx = slider.value
# label.text = str(indx)
xval,yval=selected_point(colvar,col_dict[xcol.value],col_dict[ycol.value],indx)
s = ColumnDataSource(data=dict(xs=[xval], ys=[yval]))
s2.data=s.data
def animate():
if button.label == '► Play':
button.label = '❚❚ Pause'
curdoc().add_periodic_callback(animate_update, 500)
else:
button.label = '► Play'
curdoc().remove_periodic_callback(animate_update)
def update(attr, old, new):
global indx,s2
col_dict={"cv1":0,"cv2": 1,"index": 2,"energy": 3}
p1,p2,slider = create_plot(colvar,col_dict[xcol.value],col_dict[ycol.value],col_dict[ccol.value],plt_name.value)
xval,yval=selected_point(colvar,col_dict[xcol.value],col_dict[ycol.value],indx)
s = ColumnDataSource(data=dict(xs=[xval], ys=[yval]))
s2.data=s.data
p1.circle('xs', 'ys', source=s2, fill_alpha=0.9, fill_color="blue",line_color='black',line_width=1, size=8,name="mycircle")
button = Button(label='► Play', width=60)
button.on_click(animate)
lay.children[1] = row(p1,p2)
datafile=join(dirname(__file__), 'data', 'MAPbI.dat')
colvar=np.loadtxt(datafile) #dtype='float32')
n=len(colvar)
columns=["cv1","cv2","index","energy"]
col_dict={"cv1":0,"cv2": 1,"index": 2,"energy": 3}
xcol = Select(title='X-Axis', value='cv1', options=columns)
xcol.on_change('value', update)
ycol = Select(title='Y-Axis', value='cv2', options=columns)
ycol.on_change('value', update)
ccol = Select(title='Color', value='energy', options=columns)
ccol.on_change('value', update)
plt_name = Select(title='Palette', value='Magma256', options=["Magma256","Plasma256","Spectral6","Inferno256","Viridis256","Greys256"])
plt_name.on_change('value', update)
xm=widgetbox(xcol,width=100)
ym=widgetbox(ycol,width=100)
cm=widgetbox(ccol,width=100)
pm=widgetbox(plt_name,width=100)
#controls = row(xcol, ycol, ccol, plt_name),width=600)
controls = row(xm, ym, cm, pm)
button = Button(label='► Play', width=60)
button.on_click(animate)
indx=0
xval,yval=selected_point(colvar,col_dict[xcol.value],col_dict[ycol.value],indx)
s2 = ColumnDataSource(data=dict(xs=[xval], ys=[yval]))
p1,p2,slider= create_plot(colvar,col_dict[xcol.value],col_dict[ycol.value],col_dict[ccol.value],plt_name.value)
p1.circle('xs', 'ys', source=s2, fill_alpha=0.9, fill_color="blue",line_color='black',line_width=1, size=8,name="mycircle")
#p1.circle('xs', 'ys', source=s2, fill_alpha=1, fill_color="black", size=10,name="mycircle")
slider.on_change('value', slider_update)
#plots=column(row(p1,p2),row(s,Spacer(width=20, height=30))) #,button)
slide=widgetbox(slider,width=600)
lay = layout([
[controls],
[p1,p2],
[slider, button],
], sizing_mode='fixed')
curdoc().add_root(lay)
curdoc().template_variables["js_files"] = ["plot-server/static/jmol/JSmol.min.js"]
curdoc().title = "Sketchmap"
| mit |
yukke42/machine-learning | 2/p30.py | 1 | 2222 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
def plot_decision_regions(X, y, classifier, resolution=0.02):
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() - 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() - 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=cmap(idx), marker=markers[idx], label=cl)
class Perceptron(object):
def __init__(self, eta=0.01, n_iter=10):
self.eta = eta
self.n_iter = n_iter
def fit(self, X, Y):
"""
paramater
# X.shape = [n_samples, n_features]
# Y.shape = [n_samples]
return
# object
"""
self.w_ = np.zeros(1 + X.shape[1])
self.errors_ = []
for _ in range(self.n_iter):
errors = 0
for xi, target in zip(X, y):
update = self.eta * (target - self.predict(xi))
self.w_[1:] += update * xi
self.w_[0] += update
errors += int(update != 0.0)
self.errors_.append(errors)
return self
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
def predict(self, X):
return np.where(self.net_input(X) >= 0.0, 1, -1)
df = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header=None)
y = df.iloc[0:150, 4].values
y = np.where(y == 'Iris-setosa', -1, 1)
X = df.iloc[0:150, [0,2]].values
ppn = Perceptron(eta=0.01, n_iter=10)
ppn.fit(X, y)
plot_decision_regions(X, y, classifier=ppn)
plt.xlabel('sepal lenght')
plt.ylabel('petal lenght')
plt.legend(loc='upper left')
plt.show()
| mit |
mixturemodel-flow/tensorflow | tensorflow/contrib/timeseries/examples/predict_test.py | 80 | 2487 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that the TensorFlow parts of the prediction example run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
from tensorflow.contrib.timeseries.examples import predict
from tensorflow.python.platform import test
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/period_trend.csv")
class PeriodTrendExampleTest(test.TestCase):
def test_shapes_and_variance_structural(self):
(times, observed, all_times, mean, upper_limit, lower_limit
) = predict.structural_ensemble_train_and_predict(_DATA_FILE)
# Just check that plotting will probably be OK. We can't actually run the
# plotting code since we don't want to pull in matplotlib as a dependency
# for this test.
self.assertAllEqual([500], times.shape)
self.assertAllEqual([500], observed.shape)
self.assertAllEqual([700], all_times.shape)
self.assertAllEqual([700], mean.shape)
self.assertAllEqual([700], upper_limit.shape)
self.assertAllEqual([700], lower_limit.shape)
# Check that variance hasn't blown up too much. This is a relatively good
# indication that training was successful.
self.assertLess(upper_limit[-1] - lower_limit[-1],
1.5 * (upper_limit[0] - lower_limit[0]))
def test_ar(self):
(times, observed, all_times, mean,
upper_limit, lower_limit) = predict.ar_train_and_predict(_DATA_FILE)
self.assertAllEqual(times.shape, observed.shape)
self.assertAllEqual(all_times.shape, mean.shape)
self.assertAllEqual(all_times.shape, upper_limit.shape)
self.assertAllEqual(all_times.shape, lower_limit.shape)
self.assertLess((upper_limit - lower_limit).mean(), 4.)
if __name__ == "__main__":
test.main()
| apache-2.0 |
kveeramah/aDNA_GenoCaller | aDNA_GenoCaller.py | 1 | 25225 | #!/usr/bin/env python
# -*- coding: ASCII -*-
###This program calls genotypes from bam files at positions/regions specified in a bed file while taking into account post mortem damage as estimate by MapDamage.
###Otherwise the algorithm is the same as GATK Unified Genotype for diploid calls
###This version of the program will take any genotype with a low quality heterozygote call (Q<30) and convert to the next best homozygote call
###Three files are created:
###An emit all vcf file noting the call for all base pairs in the bed file
###An vcf file with only those sites with evidence for at least one alternative allele and that passes a predetermined QUAL filter
###An haploid emit all vcf, that gives you the most likely base under a haploid model. If two or more basepairs are tied, the reported allele is randomly chosen.
###pysam and matplotlib need to be installed.
###to run type:
##aDNA_GenoCaller <indexed bamfile> <bed file> <reference genome> <5C-T mapdamage file> <3G-A mapdamage file>
from sys import argv
import pysam
import math
import numpy as np
import string
import numpy.ma as ma
import random
import matplotlib
matplotlib.use('Agg')
from matplotlib import rcParams
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.optimize import minimize
from scipy.optimize import fmin
from scipy.optimize import fminbound
from scipy.optimize import curve_fit
import time
from random import randint
filenamein=argv[1]
filenameinB=argv[2]
ref_file=argv[3]
mapdamageCT=argv[4]
mapdamageGA=argv[5]
min_RD=1
MQ=15
BQ=15
GQ=30
QUALpass=30
theta=0.001
if filenamein[-4:] == '.bam':
filenameout=string.split(filenamein,'.bam')[0]
else:
filenameout=filenamein[:]
if '/' in filenamein:
filenameout=string.split(filenameout,'/')[-1]
plotfile=filenameout+'.'+filenameinB+'.aDNA.expMDfit_WB.pdf'
MDfile=filenameout+'.'+filenameinB+'.aDNA.fitted_model_params_WB'
filenameout1=filenameout+'.'+filenameinB+'.aDNA.emit_all.vcf'
filenameout2=filenameout+'.'+filenameinB+'.aDNA.vcf'
filenameout3=filenameout+'.'+filenameinB+'.aDNA.haploid.emit_all.vcf_like'
def phred2prob(x):
return 10.0**(-x/10.0)
def prob2phred(x):
return -10*math.log10(x)
#exponential function
def exp_fit(x,a,b,c):
return a*np.exp(-x*b)+c
#stretched exponential (weibell) function
def weibull_fit(x,a,b,c):
return a*np.exp(-(x**c)*b)
#genotype called that incorporates damage for aDNA
def geno_caller_10GT_aDNA(X):
GL=np.zeros(10) #all 10 possible genotypes and order = AA,AC,AG,AT,CC,CG,CT,GG,GT,TT
hap=np.zeros((len(X),4)) #all 4 haploid possibilities, A,C,G,T
all_dic={}
all_dic['A']=0
all_dic['C']=1
all_dic['G']=2
all_dic['T']=3
count=0
for g in range(len(X)):
if all_dic.has_key(X[g][0])==False:
continue
err=phred2prob(X[g][1])
hap[g]=err/3.0
if X[g][0]=='A':
hap[g][0]=1-err
hap[g][2]=((1-X[g][3])*(err/3.0))+(X[g][3]*(1-err))
elif X[g][0]=='C':
hap[g][1]= ((1-X[g][2])*(1-err))+(X[g][2]*(err/3.0))
elif X[g][0]=='G':
hap[g][2]= ((1-X[g][3])*(1-err))+(X[g][3]*(err/3.0))
elif X[g][0]=='T':
hap[g][3]=1-err
hap[g][1]=((1-X[g][2])*(err/3.0))+(X[g][2]*(1-err))
GL[0]=GL[0]+math.log10(hap[g][0])
GL[1]=GL[1]+math.log10((hap[g][0]+hap[g][1])/2)
GL[2]=GL[2]+math.log10((hap[g][0]+hap[g][2])/2)
GL[3]=GL[3]+math.log10((hap[g][0]+hap[g][3])/2)
GL[4]=GL[4]+math.log10(hap[g][1])
GL[5]=GL[5]+math.log10((hap[g][1]+hap[g][2])/2)
GL[6]=GL[6]+math.log10((hap[g][1]+hap[g][3])/2)
GL[7]=GL[7]+math.log10(hap[g][2])
GL[8]=GL[8]+math.log10((hap[g][2]+hap[g][3])/2)
GL[9]=GL[9]+math.log10(hap[g][3])
count+=1
if count==0:
GL.fill(-9)
return GL
#open up mapdamage files and put results in an array
fileCT=open(mapdamageCT,'r')
CTdata=fileCT.read()
fileCT.close()
CTdata=string.split(CTdata,'\n')
if CTdata[-1]=='':
del(CTdata[-1])
fileGA=open(mapdamageGA,'r')
GAdata=fileGA.read()
fileGA.close()
GAdata=string.split(GAdata,'\n')
if GAdata[-1]=='':
del(GAdata[-1])
X_CT=[]
X_GA=[]
Y_CT=[]
Y_GA=[]
for g in range(1,len(CTdata)):
k=string.split(CTdata[g],'\t')
X_CT.append(float(k[0]))
Y_CT.append(float(k[1]))
k=string.split(GAdata[g],'\t')
X_GA.append(float(k[0]))
Y_GA.append(float(k[1]))
X_CT=np.asarray(X_CT)
Y_CT=np.asarray(Y_CT)
X_GA=np.asarray(X_GA)
Y_GA=np.asarray(Y_GA)
try:
#Perform curve fitting of MapDamage results to an exponential function
print '\nFitting the following weibull model to the MapDamage data : a*exp(-(x^c)*b)'
fitParams_CT = curve_fit(weibull_fit, X_CT, Y_CT)
CTa=fitParams_CT[0][0]
CTb=fitParams_CT[0][1]
CTc=fitParams_CT[0][2]
fitParams_GA = curve_fit(weibull_fit, X_GA, Y_GA)
GAa=fitParams_GA[0][0]
GAb=fitParams_GA[0][1]
GAc=fitParams_GA[0][2]
out='Fit the following weibull model to the MapDamage data : a*exp(-(x^c)*b)\n\n'
out=out+'Fitted coefficients for CT changes:\na\t'+str(CTa)+'\nb\t'+str(CTb)+'\nc\t'+str(CTc)+'\n'
out=out+'\nFitted coefficients for GA changes:\na\t'+str(GAa)+'\nb\t'+str(GAb)+'\nc\t'+str(GAc)+'\n'
print '\n'+out
#print coefficient inference to file
fileMD=open(MDfile,'w')
fileMD.write(out)
fileMD.close()
print '\nWrote fitted coefficients to '+MDfile
#set up plot area
rcParams['figure.figsize'] = 10, 6
plt.ylabel('Substitution Frequency', fontsize = 16)
plt.xlabel('Read position', fontsize = 16)
plt.xlim(0,26)
#plot points and fitted curve
plt.plot(X_CT,Y_CT,'ro')
plt.plot(X_GA,Y_GA,'bo')
plt.plot(X_CT,weibull_fit(X_CT, fitParams_CT[0][0], fitParams_CT[0][1], fitParams_CT[0][2]),'r',ms=10,linewidth=2.0,label='C>T')
plt.plot(X_GA,weibull_fit(X_GA, fitParams_GA[0][0], fitParams_GA[0][1], fitParams_GA[0][2]),'b',ms=10,linewidth=2.0,label='G>A')
plt.legend()
# save plot to a file
plt.savefig(plotfile, bbox_inches=0, dpi=600)
plt.close()
print '\nPlotted MapDamage curve fit to '+plotfile
#Make a reference list to determine how much to adjust a particular quality score given it's read position (maxed at 300 here)
CT_decay=[]
GA_decay=[]
for g in range(300):
CTpoint=weibull_fit(g+1,CTa,CTb,CTc)-theta
GApoint=weibull_fit(g+1,GAa,GAb,GAc)-theta
if CTpoint<0.0:
CTpoint=0.0
if GApoint<0.0:
GApoint=0.0
CT_decay.append(CTpoint)
GA_decay.append(GApoint)
#in case weibull does not fit (occurs for low damage) use exponential fit
except:
#Perform curve fitting of MapDamage results to an exponential function
print '\nCould not fit weibull'
print '\nFitting the following exponential model to the MapDamage data : a*exp(-x*b)+c'
fitParams_CT = curve_fit(exp_fit, X_CT, Y_CT)
CTa=fitParams_CT[0][0]
CTb=fitParams_CT[0][1]
CTc=fitParams_CT[0][2]
fitParams_GA = curve_fit(exp_fit, X_GA, Y_GA)
GAa=fitParams_GA[0][0]
GAb=fitParams_GA[0][1]
GAc=fitParams_GA[0][2]
out='Fit the following exponential model to the MapDamage data : a*exp(-x*b)+c\n\n'
out=out+'Fitted coefficients for CT changes:\na\t'+str(CTa)+'\nb\t'+str(CTb)+'\nc\t'+str(CTc)+'\n'
out=out+'\nFitted coefficients for GA changes:\na\t'+str(GAa)+'\nb\t'+str(GAb)+'\nc\t'+str(GAc)+'\n'
print '\n'+out
#print coefficient inference to file
fileMD=open(MDfile,'w')
fileMD.write(out)
fileMD.close()
print '\nWrote fitted coefficients to '+MDfile
#set up plot area
rcParams['figure.figsize'] = 10, 6
plt.ylabel('Substitution Frequency', fontsize = 16)
plt.xlabel('Read position', fontsize = 16)
plt.xlim(0,26)
#plot points and fitted curve
plt.plot(X_CT,Y_CT,'ro')
plt.plot(X_GA,Y_GA,'bo')
plt.plot(X_CT,exp_fit(X_CT, fitParams_CT[0][0], fitParams_CT[0][1], fitParams_CT[0][2]),'r',ms=10,linewidth=2.0,label='C>T')
plt.plot(X_GA,exp_fit(X_GA, fitParams_GA[0][0], fitParams_GA[0][1], fitParams_GA[0][2]),'b',ms=10,linewidth=2.0,label='G>A')
plt.legend()
# save plot to a file
plt.savefig(plotfile, bbox_inches=0, dpi=600)
plt.close()
print '\nPlotted MapDamage curve fit to '+plotfile
#Make a reference list to determine how much to adjust a particular quality score given it's read position (maxed at 300 here)
CT_decay=[]
GA_decay=[]
for g in range(300):
CT_decay.append(exp_fit(g+1,CTa,CTb,CTc)-theta)
GA_decay.append(exp_fit(g+1,GAa,GAb,GAc)-theta)
###open up reference file
ref=pysam.FastaFile(ref_file)
###set up various look up dictionaries
all_dic={}
all_dic={}
all_dic['A']=0
all_dic['C']=1
all_dic['G']=2
all_dic['T']=3
all_dic['N']=-99
all_dic[0]='A'
all_dic[1]='C'
all_dic[2]='G'
all_dic[3]='T'
all_dic[-9]='.'
all_dic[-99]='N'
GL_dic={}
GL_dic['AA']=0
GL_dic['AC']=1
GL_dic['AG']=2
GL_dic['AT']=3
GL_dic['CC']=4
GL_dic['CG']=5
GL_dic['CT']=6
GL_dic['GG']=7
GL_dic['GT']=8
GL_dic['TT']=9
GL_dic[0]='AA'
GL_dic[1]='AC'
GL_dic[2]='AG'
GL_dic[3]='AT'
GL_dic[4]='CC'
GL_dic[5]='CG'
GL_dic[6]='CT'
GL_dic[7]='GG'
GL_dic[8]='GT'
GL_dic[9]='TT'
GL_dic[-9]='./.'
homs=[0,4,7,9]
alt_dic={}
alt_dic[0]=[0,0]
alt_dic[1]=[0,1]
alt_dic[2]=[0,2]
alt_dic[3]=[0,3]
alt_dic[4]=[1,1]
alt_dic[5]=[1,2]
alt_dic[6]=[1,3]
alt_dic[7]=[2,2]
alt_dic[8]=[2,3]
alt_dic[9]=[3,3]
tri_dic={}
tri_dic[1]='A,C'
tri_dic[2]='A,G'
tri_dic[3]='A,T'
tri_dic[5]='C,G'
tri_dic[6]='C,T'
tri_dic[8]='G,T'
LL0_map={}
LL0_map[0]=[0]
LL0_map[1]=[4]
LL0_map[2]=[7]
LL0_map[3]=[9]
LL1_map={}
LL1_map[0]=[1,2,3]
LL1_map[1]=[1,5,6]
LL1_map[2]=[2,5,8]
LL1_map[3]=[3,6,8]
LL2_map={}
LL2_map[0]=[4,5,6,7,8,9]
LL2_map[1]=[0,2,3,7,8,9]
LL2_map[2]=[0,1,3,4,6,9]
LL2_map[3]=[0,1,2,4,5,7]
homs=[0,4,7,9]
###make a list of regions to be interogated
file = open(filenameinB)
data=file.read()
data=string.split(data,'\n')
file.close()
if data[-1]=='':
del(data[-1])
SNPdir={}
SNPll={}
SNPlist=[]
count=0
for g in range(len(data)):
k=string.split(data[g])
key=k[0]+'_'+k[1]+'_'+k[2]
## chromo=k[0]
## start=int(k[1])
## end=int(k[2])
## seq=ref.fetch(chromo,start,end)
## seq=seq.upper()
SNPlist.append(key)
SNPdir[key]=k
###open up bam file (must be indexed)
samfile = pysam.AlignmentFile(filenamein, "rb")
samp_name=samfile.header['RG'][0]['SM']
count=0
###set up output files
fileout=open(filenameout1,'w')
fileout2=open(filenameout2,'w')
fileout3=open(filenameout3,'w')
###set up output headers
out1='##fileformat=VCFv4.1\n'
out2='##fileformat=VCFv4.1\n'
out3='##fileformat=Custom variant caller for_aDNA, emit all haploid\n'
outA='##Caller_arguments=<MappingQuality_filter='+str(MQ)+',BaseQuality_filter='+str(BQ)
outA=outA+',GenotypeQuality_filter='+str(GQ)+',minRD='+str(min_RD)
outA=outA+',bam_in='+filenamein+',bedfile='+filenameinB
outA=outA+',C>T_damagefile='+mapdamageCT
outA=outA+',G>A_damagefile='+mapdamageGA+'>\n'
outB='##INFO=<ID=AC,Number=A,Type=Integer,Description="Allele count in genotypes, for each ALT allele, in the same order as listed">\n'
outB=outB+'##INFO=<ID=AF,Number=A,Type=Float,Description="Allele Frequency, for each ALT allele, in the same order as listed">\n'
outB=outB+'##INFO=<ID=MQ,Number=1,Type=Float,Description="mean Mapping Quality (not RMS)">\n'
outB=outB+'##INFO=<ID=MQ0,Number=1,Type=Integer,Description="Total Mapping Quality Zero Reads">\n'
outB=outB+'##INFO=<ID=SGC,Number=1,Type=Integer,Description="low GQ heterozygote switched to best homozygote">\n'
outC='##Time created='+(time.strftime("%H:%M:%S"))+' '+(time.strftime("%d/%m/%Y"))+'\n'
for i in range(len(ref.lengths)):
outC=outC+'##contig=<ID='+ref.references[i]+',length='+str(ref.lengths[i])+'>\n'
outC=outC+'##reference=file:'+ref_file+'\n'
out_e='#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t'+samp_name+'\n'
out_f='#CHROM\tPOS\tREF\tCALL\tBEST_PROB\tALL_PROB\tAD\n'
fileout.write(out1+outA+outB+outC+out_e)
fileout2.write(out2+outA+outB+outC+out_e)
fileout3.write(out3+outA+outC+out_f)
###Precomputed prios for QUAL estimation
theta_prior=[0,theta/1,theta/2]
theta_prior[0]=1-sum(theta_prior)
###Start running through regions in the bed
for gg in range(len(SNPlist)):
print 'Calculating genotype likelihoods for locus '+str(gg+1)+' : '+SNPlist[gg]
chromo=SNPdir[SNPlist[gg]][0]
pos_start=int(SNPdir[SNPlist[gg]][1])
pos_end=int(SNPdir[SNPlist[gg]][2])
###set up arrays to store results for a given bed entry (probably not to efficient for SNP data, more suited for long regions)
seq_len=pos_end-pos_start
GLs=np.zeros((seq_len,10),dtype='float32') ##Genotype likelihoods
PLs=np.zeros((seq_len,10),dtype='float32') ##Phred-scale normalized likelihoods
RDs=np.zeros((seq_len,4),dtype='int32') ##Read depth for each of the four bases
GQs=np.zeros((seq_len),dtype='float64') ##Estimated genotype qualitues
REFs=np.zeros((seq_len),dtype='int32') ##Reference allele index
GTs=np.zeros((seq_len),dtype='int32') ##Genotype index [order is AA,AC,AG,AT,CC,CG,CT,GG,GT,TT]
POS=np.zeros((seq_len),dtype='int32') ##1-based position
ALTs=np.zeros((seq_len),dtype='int32') ##Alternate allele index. If trinucleotide, store as 10
QUALs=np.zeros((seq_len),dtype='float32') ##Bayesian quality score
MQ0=np.zeros((seq_len),dtype='int32') ##number of reads with mapping 0
MQm=np.zeros((seq_len),dtype='float32') #mean mapping score
SEG=np.zeros((seq_len),dtype='int32') #hom ref, het with ref, hom alt, het without ref (tri)
SWGQ=np.zeros((seq_len),dtype='int32') #low GQhet switched to hom ref = 1
###make a list of positions
count=0
for ggg in range(pos_start,pos_end):
POS[count]=ggg+1
count+=1
###prepopulate arrays with -9s
GTs.fill(-9)
REFs.fill(-9)
ALTs.fill(-9)
SEG.fill(-9)
###Go base by base through each region in the bam file
for pileupcolumn in samfile.pileup(chromo,pos_start,pos_end,truncate=True,stepper='all'):
nucl=ref.fetch(chromo,pileupcolumn.pos,pileupcolumn.pos+1) ##grab reference sequence for region
nucl=nucl.upper()
var_list=[] ##a list to store read bases
map_list=[] ##a list to store mapping qualities
index=pileupcolumn.pos-pos_start ##tells us what position we are in for the arrays given the nucleotide position
REFs[index]=all_dic[nucl] ##recrod the reference allele
###go read by read for each position
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip: ##ensure not an indel or duplicate
map_list.append(pileupread.alignment.mapping_quality) ##record mapping quality of read
if (pileupread.alignment.mapping_quality>=MQ) and (ord(pileupread.alignment.qual[pileupread.query_position])-33>=BQ) and (REFs[index]<>-99): ###Add base calls meeting the MQ and BQ filters
var_list.append([pileupread.alignment.query_sequence[pileupread.query_position],ord(pileupread.alignment.qual[pileupread.query_position])-33,CT_decay[pileupread.query_position],GA_decay[len(pileupread.alignment.query_sequence)-1-pileupread.query_position]])
###rescale qualities that are greater than 40 to a max of 40.
for ggg in range(len(var_list)):
if var_list[ggg][1]>40:
var_list[ggg][1]=40
###record read depth for each basetype
if len(var_list)>0:
all_list=list(zip(*var_list)[0])
RDs[index]=[all_list.count('A'),all_list.count('C'),all_list.count('G'),all_list.count('T')]
###work out MQ0 and mean mapping quality for the snp (not RMSE)
MQ0[index]=map_list.count(0)
try:
MQm[index]=sum(map_list)/float(len(map_list))
except:
MQm[index]=0.0
###if minimum read depth is met, try calling the genotype
if len(var_list)>=min_RD:
GLs[index]=geno_caller_10GT_aDNA(var_list) ##ancient DNA aware calculation of genotype likelihoods
GTs[index]=np.argmax(GLs[index]) ##record best genotype
PLs[index]=(GLs[index]-np.max(GLs[index]))*-10 ###calculte Phred-scale values
GQs[index]=np.msort(PLs[index])[1] ###record genotype quality
###if GQ is less than threshold and site is heterozygous, swith to best homozygous genotype
if (alt_dic[GTs[index]][0]<>alt_dic[GTs[index]][1]) and (GQs[index]<GQ):
GTs[index]=homs[np.argmax(GLs[index][homs])]
SWGQ[index]=1
###Work out basesian QUALity score
## LL_0=np.sum(10**GLs[index][LL0_map[REFs[index]]])
## LL_1=np.sum(10**GLs[index][LL1_map[REFs[index]]])
## LL_2=np.sum(10**GLs[index][LL2_map[REFs[index]]])
## norconst1=sum([LL_0,LL_1,LL_2]) ###Depristo says this, but it really should be multiplied by the prior
## norconst2=sum([LL_0*theta_prior[0],LL_1*theta_prior[1],LL_2*theta_prior[2]]) ###Depristo says this, but it really should be multiplied by the prior
##
## Pr0=(theta_prior[0]*LL_0)/norconst2
## Pr1=(theta_prior[1]*LL_1)/norconst2
## Pr2=(theta_prior[2]*LL_2)/norconst2
###this new version add constant to log likelihoods to avoid underflow
LL_0=np.sum(10**(GLs[index][LL0_map[REFs[index]]]-np.max(GLs[index]))) ##total likelihood for q=0|X
LL_1=np.sum(10**(GLs[index][LL1_map[REFs[index]]]-np.max(GLs[index]))) ##total likelihood for q=1|X, three different genotypes summed
LL_2=np.sum(10**(GLs[index][LL2_map[REFs[index]]]-np.max(GLs[index]))) ##total likelihood for q=2|X, six different genotypes summed
###calculate normalizing constant for bayes formula
## norconst1=sum([LL_0,LL_1,LL_2]) ###Depristo says this, but it really should be multiplied by the prior
norconst2=sum([LL_0*theta_prior[0],LL_1*theta_prior[1],LL_2*theta_prior[2]]) ###Depristo says this, but it really should be multiplied by the prior
###calculate posterio probabilities for q=0,1 and 2
Pr0=(theta_prior[0]*LL_0)/norconst2
Pr1=(theta_prior[1]*LL_1)/norconst2
Pr2=(theta_prior[2]*LL_2)/norconst2
###work out QUAL. If Pr=0 is greatest, work out qual from probability for q not equalling 0, otherwise, work out from qual=0
if LL0_map[REFs[index]][0]==GTs[index]:
try:
QUALs[index]=-10*math.log10(1-Pr0)
except:
QUALs[index]=100.0 #add hoc solution to deal with extremely small ref homozygote probability
SEG[index]=0
else:
try:
QUALs[index]=-10*math.log10(Pr0)
except:
QUALs[index]=1000.0 #add hoc solution to deal with extremely small ref homozygote probability
###Work out what the reference allele is, and if there is in fact a trinucleotide to
if REFs[index]==alt_dic[GTs[index]][0]: ##homo,alt heterozygote
ALTs[index]=alt_dic[GTs[index]][1]
SEG[index]=1
elif REFs[index]==alt_dic[GTs[index]][1]: ##homo,alt heterozygote
ALTs[index]=alt_dic[GTs[index]][0]
SEG[index]=1
elif alt_dic[GTs[index]][0]==alt_dic[GTs[index]][1]: #homozygote alternate
ALTs[index]=alt_dic[GTs[index]][0]
SEG[index]=2
else: #must be trinucleotide
ALTs[index]=10
SEG[index]=3
###start writing calls to vcf file
for ggg in range(len(GLs)):
LQ=0 ##indicator for a low quality site
out=chromo+'\t'+str(POS[ggg])+'\t.\t'+all_dic[REFs[ggg]]+'\t'
if ALTs[ggg]<10:
out=out+all_dic[ALTs[ggg]] #write none or single alternate allele
else:
out=out+tri_dic[GTs[ggg]] #write trinucleotide alleles
if SEG[ggg]<>-9: ###this sites was callable, even if QUAL was low
out=out+'\t'+str(round(QUALs[ggg],2))+'\t'
###indicate whether site had low QUAL score or not
if QUALs[ggg]>=QUALpass:
out=out+'PASS\t'
LQ=1
else:
out=out+'low_quality\t'
###Info tag information, alternate allele count, frequency, as well as mapping qualities
if SEG[ggg]==0:
out=out+'AC=0;AF=0.0;MQ='+str(round(MQm[ggg],2))+';MQ0='+str(MQ0[ggg])
elif SEG[ggg]==1:
out=out+'AC=1;AF=0.5;MQ='+str(round(MQm[ggg],2))+';MQ0='+str(MQ0[ggg])
elif SEG[ggg]==2:
out=out+'AC=2;AF=1.0;MQ='+str(round(MQm[ggg],2))+';MQ0='+str(MQ0[ggg])
elif SEG[ggg]==3:
out=out+'AC=1,1;AF=0.5,0.5;MQ='+str(round(MQm[ggg],2))+';MQ0='+str(MQ0[ggg])
###Make a note that heterozygote call was converted to homozygous call
if SWGQ[ggg]==1:
out=out+';SGC='+GL_dic[np.argmax(GLs[ggg])]
if SEG[ggg]==0: #site is homozygous reference, just note, GT,DP and GQ
out=out+'\tGT:DP:GQ:PL\t0/0:'+str(RDs[ggg][REFs[ggg]])+':'
if GQs[ggg]>99:
out=out+'99:'
else:
out=out+str(int(round(GQs[ggg])))+':'
##output all 10 PL values. Note the order is not the same as GATK
for gggg in range(len(PLs[ggg])):
out=out+str(int(round(PLs[ggg][gggg])))+','
out=out[:-1]+'\n'
else: #Site is heterozygous so note more information
out=out+'\tGT:AD:DP:GQ:PL\t'
if SEG[ggg]<3: #there is only one alternate allele
if SEG[ggg]==1:
out=out+'0/1:'
elif SEG[ggg]==2:
out=out+'1/1:'
out=out+str(RDs[ggg][REFs[ggg]])+','+str(RDs[ggg][ALTs[ggg]])+':'+str(np.sum(RDs[ggg]))+':'
else: #there are two alternate alleles
out=out+'1/2:'
out=out+str(RDs[ggg][REFs[ggg]])+','+str(RDs[ggg][alt_dic[GTs[ggg]][0]])+','+str(RDs[ggg][alt_dic[GTs[ggg]][1]])+':'+str(np.sum(RDs[ggg]))+':'
##cap reported GQ score to 99
if GQs[ggg]>99:
out=out+'99:'
else:
out=out+str(int(round(GQs[ggg])))+':'
##output all 10 PL values. Note the order is not the same as GATK
for gggg in range(len(PLs[ggg])):
out=out+str(int(round(PLs[ggg][gggg])))+','
out=out[:-1]+'\n'
else: ###Site had not data, i.e. usable read depth 0
out=out+'\t.\t.\t.\t.\t./.\n'
fileout.write(out) ##write to emit all
if (SEG[ggg]>=1) and (LQ==1):
fileout2.write(out) ##write to variable only vcf that passes QUAL threshold
###bayesian best haploid call
out_h=chromo+'\t'+str(POS[ggg])+'\t'+all_dic[REFs[ggg]]+'\t'
if SEG[ggg]>=0:
###Use GLs for homozygous genotypes to calculate probablity of A,C,G,T. Reference unaware, so prior probability of a particular base is equal to the other three bases
norm_const3=np.sum(10**(GLs[ggg][homs]-np.max(GLs[ggg][homs]))*0.25)
post=(10**(GLs[ggg][homs]-np.max(GLs[ggg][homs]))*0.25)/norm_const3
##this section checks if the highest posterior is unique or found for multiple bases
max_val=np.max(post)
matches=[]
post_str=''
for gggg in range(len(post)):
if post[gggg]==max_val:
matches.append(gggg)
post_str=post_str+str(round(post[gggg],3))+','
##if max val is 0.25, there is not information at this site, so set as N
if max_val<=0.25:
out_h=out_h+'N\t0.25\t0.25,0.25,0.25,0.25'
else:
if len(matches)==1: ##get the base with the highest LL
out_h=out_h+all_dic[matches[0]]+'\t'+str(round(post[matches[0]],3))+'\t'+post_str[:-1]
else:
rand_allele=randint(0,len(matches)-1) ##if two or more bases are tied for best likelihood, randomly choose one
out_h=out_h+all_dic[matches[rand_allele]]+'\t'+str(round(post[matches[rand_allele]],3))+'\t'+post_str[:-1]
out_h=out_h+'\t'+str(RDs[ggg][0])+','+str(RDs[ggg][1])+','+str(RDs[ggg][2])+','+str(RDs[ggg][3])+'\n'
else:
out_h=out_h+'N\t0.25\t0.25,0.25,0.25,0.25\t0,0,0,0\n'
fileout3.write(out_h)
fileout.close()
fileout2.close()
fileout3.close()
| gpl-3.0 |
djgagne/scikit-learn | sklearn/datasets/tests/test_base.py | 205 | 5878 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
| bsd-3-clause |
christobal54/aei-grad-school | bin/ebv-scales-growth-hist.py | 1 | 3965 | #!/usr/bin/python
#####
# plots distributions of growth EBV variables
#####
import aei
import gdal
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap as lsc
from matplotlib.colors import ColorConverter as cc
from scipy.stats import gaussian_kde
# set base directory for files
base = '/home/cba/Downloads/scale-figures/'
# file to read
file = base + 'CR-GROWTH-stack4.tif'
# set the output file
ofile = base + 'CR-histograms.tif'
# band names
names = ['TreeCover', 'NIRv', 'Temperature', 'Insolation', 'CloudCover']
titles = ['Tree Cover (%)', 'NIRv (unitless)', 'Temperature (C)',
'Solar Insolation (kWh / m2)', 'Annual Cloud Cover (%)']
cor_titles = ['Tree Cover\n(%)', 'NIRv\n(unitless)', 'Temperature\n(C)',
'Solar Insolation\n(kWh / m2)', 'Annual Cloud Cover (%)']
# create color maps for each plot
tree_cmap = lsc.from_list('tree_cmap', [(0, cc.to_rgba('#cc78a7')), (1, cc.to_rgba('#06de37'))])
nirv_cmap = lsc.from_list('nirv_cmap', [(0, cc.to_rgba('#e74726')), (1, cc.to_rgba('#5db3e5'))])
temp_cmap = lsc.from_list('temp_cmap', [(0, cc.to_rgba('#0773b3')), (1, cc.to_rgba('#f1e545'))])
insl_cmap = lsc.from_list('insl_cmap', [(0, cc.to_rgba('#0d0887')),
(0.5, cc.to_rgba('#da5b69')), (1, cc.to_rgba('#f0f921'))])
cmap = [tree_cmap, nirv_cmap, temp_cmap, insl_cmap]
# read the data reference
ref = gdal.Open(file)
ndval = 0
data = ref.ReadAsArray()
# get the no data locations
gd = np.where(data[0,:,:] != ndval)
# loop through each band and plot a density distribution
plt.figure(1)
counter = 0
for i in [3,0,1,2]:
# subset to a 1d array
band_data = data[i,gd[0], gd[1]]
# find min/max for plot bounds
xmin = np.percentile(band_data, 2)
xmax = np.percentile(band_data, 98)
# create ticks to plot
xticks = np.around(np.arange(0,1.25, 0.25) * (xmax - xmin) + xmin, 2)
# set custom covariance to smooth peaks
covar = 0.5
dns = gaussian_kde(band_data)
if i == 2:
dns.covariance_factor = lambda : covar
dns._compute_covariance()
# set xscale to plot
npts = 100
xs = np.linspace(xmin, xmax, npts)
# create a normalizing function for color filling
norm = matplotlib.colors.Normalize(vmin=xmin, vmax=xmax)
# set a vertical structure for this plot format
plt.subplot(411 + counter)
# calculate the lines to plot
ydata = dns(xs)
# plot the function
plt.plot(xs, dns(xs), color = 'black', lw = 3)
plt.xlabel(titles[i])
plt.yticks([], [])
plt.xticks(xticks)
plt.ylabel('%')
# add the fill point by point
for j in range(npts-1):
plt.fill_between([xs[j], xs[j+1]], [ydata[j], ydata[j+1]], color = cmap[i](norm(xs[j])))
counter += 1
# save the final figure
plt.tight_layout()
plt.savefig(ofile, dpi = 300)
# calculate correlation coefficients between variables
cor = np.corrcoef(data[0:4, gd[0], gd[1]])
arr = data[0:4, gd[0], gd[1]]
# create heat maps using hexbin to plot correlations between each variable
ctable = 'plasma'
#ctable = 'copper'
stanford_cmap = temp_cmap = lsc.from_list('stanford_cmap', [(0, cc.to_rgba("#000000")), (1, cc.to_rgba("#8C1515"))])
plt.figure(2)
# set up the plots to use
#plots = [[0,0], [0,1], [0,2], [0,3], [1,1], [1,2], [1,3], [2,2], [2,3], [3,3]]
plots = [[0,1], [0,2], [0,3], [1,2], [1,3], [2,3]]
for plot in plots:
#plt.subplot(441)
#plt.subplot(4, 4, 1 + plot[0] + (plot[1] * 4))
plt.subplot(3, 3, 1 + plot[0] + ((plot[1]-1) * 3))
plt.hexbin(arr[plot[0]], arr[plot[1]], gridsize = 20, cmap = ctable,
alpha = 0.95, linewidths = 0, bins = 'log')
# label the correlation
plt.title("pearson's r: {:0.2f}".format(cor[plot[0], plot[1]]))
# label the axes
if plot[0] == 0:
plt.ylabel(cor_titles[plot[1]])
if plot[1] == 3:
plt.xlabel(cor_titles[plot[0]])
| mit |
vybstat/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 248 | 6359 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. Journal of Computational and
Graphical Statistics. December 1, 2005, 14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
YihaoLu/statsmodels | docs/sphinxext/numpy_ext/docscrape_sphinx.py | 62 | 7703 | import re, inspect, textwrap, pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
katstalk/android_external_chromium_org | chrome/test/nacl_test_injection/buildbot_chrome_nacl_stage.py | 26 | 11131 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Do all the steps required to build and test against nacl."""
import optparse
import os.path
import re
import shutil
import subprocess
import sys
import find_chrome
# Copied from buildbot/buildbot_lib.py
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
# Copied from buildbot/buildbot_lib.py
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
else:
try:
os.remove(path)
except Exception:
pass
else:
print 'Skipping %s' % path
# TODO(ncbray): this is somewhat unsafe. We should fix the underlying problem.
def CleanTempDir():
# Only delete files and directories like:
# a) C:\temp\83C4.tmp
# b) /tmp/.org.chromium.Chromium.EQrEzl
file_name_re = re.compile(
r'[\\/]([0-9a-fA-F]+\.tmp|\.org\.chrom\w+\.Chrom\w+\..+)$')
file_name_filter = lambda fn: file_name_re.search(fn) is not None
path = os.environ.get('TMP', os.environ.get('TEMP', '/tmp'))
if len(path) >= 4 and os.path.isdir(path):
print
print "Cleaning out the temp directory."
print
TryToCleanContents(path, file_name_filter)
else:
print
print "Cannot find temp directory, not cleaning it."
print
def RunCommand(cmd, cwd, env):
sys.stdout.write('\nRunning %s\n\n' % ' '.join(cmd))
sys.stdout.flush()
retcode = subprocess.call(cmd, cwd=cwd, env=env)
if retcode != 0:
sys.stdout.write('\nFailed: %s\n\n' % ' '.join(cmd))
sys.exit(retcode)
def RunTests(name, cmd, nacl_dir, env):
sys.stdout.write('\n\nBuilding files needed for %s testing...\n\n' % name)
RunCommand(cmd + ['do_not_run_tests=1', '-j8'], nacl_dir, env)
sys.stdout.write('\n\nRunning %s tests...\n\n' % name)
RunCommand(cmd, nacl_dir, env)
def BuildAndTest(options):
# Refuse to run under cygwin.
if sys.platform == 'cygwin':
raise Exception('I do not work under cygwin, sorry.')
# By default, use the version of Python is being used to run this script.
python = sys.executable
if sys.platform == 'darwin':
# Mac 10.5 bots tend to use a particularlly old version of Python, look for
# a newer version.
macpython27 = '/Library/Frameworks/Python.framework/Versions/2.7/bin/python'
if os.path.exists(macpython27):
python = macpython27
script_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(os.path.dirname(os.path.dirname(script_dir)))
nacl_dir = os.path.join(src_dir, 'native_client')
# Decide platform specifics.
if options.browser_path:
chrome_filename = options.browser_path
else:
chrome_filename = find_chrome.FindChrome(src_dir, [options.mode])
if chrome_filename is None:
raise Exception('Cannot find a chome binary - specify one with '
'--browser_path?')
env = dict(os.environ)
if sys.platform in ['win32', 'cygwin']:
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
bits = 64
else:
bits = 32
msvs_path = ';'.join([
r'c:\Program Files\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files\Microsoft Visual Studio 8\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\VC',
r'c:\Program Files\Microsoft Visual Studio 8\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\Common7\Tools',
])
env['PATH'] += ';' + msvs_path
scons = [python, 'scons.py']
elif sys.platform == 'darwin':
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
else:
p = subprocess.Popen(['file', chrome_filename], stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if p_stdout.find('executable x86_64') >= 0:
bits = 64
else:
bits = 32
scons = [python, 'scons.py']
else:
p = subprocess.Popen(
'uname -m | '
'sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/"',
shell=True, stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif p_stdout.find('64') >= 0:
bits = 64
else:
bits = 32
# xvfb-run has a 2-second overhead per invocation, so it is cheaper to wrap
# the entire build step rather than each test (browser_headless=1).
# We also need to make sure that there are at least 24 bits per pixel.
# https://code.google.com/p/chromium/issues/detail?id=316687
scons = [
'xvfb-run',
'--auto-servernum',
'--server-args', '-screen 0 1024x768x24',
python, 'scons.py',
]
if options.jobs > 1:
scons.append('-j%d' % options.jobs)
scons.append('disable_tests=%s' % options.disable_tests)
if options.buildbot is not None:
scons.append('buildbot=%s' % (options.buildbot,))
# Clean the output of the previous build.
# Incremental builds can get wedged in weird ways, so we're trading speed
# for reliability.
shutil.rmtree(os.path.join(nacl_dir, 'scons-out'), True)
# check that the HOST (not target) is 64bit
# this is emulating what msvs_env.bat is doing
if '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
# 64bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 9.0\\Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 8.0\\Common7\\Tools\\')
else:
# 32bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 9.0\\'
'Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 8.0\\'
'Common7\\Tools\\')
# Run nacl/chrome integration tests.
# Note that we have to add nacl_irt_test to --mode in order to get
# inbrowser_test_runner to run.
# TODO(mseaborn): Change it so that inbrowser_test_runner is not a
# special case.
cmd = scons + ['--verbose', '-k', 'platform=x86-%d' % bits,
'--mode=opt-host,nacl,nacl_irt_test',
'chrome_browser_path=%s' % chrome_filename,
]
if not options.integration_bot and not options.morenacl_bot:
cmd.append('disable_flaky_tests=1')
cmd.append('chrome_browser_tests')
# Propagate path to JSON output if present.
# Note that RunCommand calls sys.exit on errors, so potential errors
# from one command won't be overwritten by another one. Overwriting
# a successful results file with either success or failure is fine.
if options.json_build_results_output_file:
cmd.append('json_build_results_output_file=%s' %
options.json_build_results_output_file)
# Download the toolchain(s).
RunCommand([python,
os.path.join(nacl_dir, 'build', 'download_toolchains.py'),
'--no-arm-trusted', '--no-pnacl', 'TOOL_REVISIONS'],
nacl_dir, os.environ)
CleanTempDir()
if options.enable_newlib:
RunTests('nacl-newlib', cmd, nacl_dir, env)
if options.enable_glibc:
RunTests('nacl-glibc', cmd + ['--nacl_glibc'], nacl_dir, env)
def MakeCommandLineParser():
parser = optparse.OptionParser()
parser.add_option('-m', '--mode', dest='mode', default='Debug',
help='Debug/Release mode')
parser.add_option('-j', dest='jobs', default=1, type='int',
help='Number of parallel jobs')
parser.add_option('--enable_newlib', dest='enable_newlib', default=-1,
type='int', help='Run newlib tests?')
parser.add_option('--enable_glibc', dest='enable_glibc', default=-1,
type='int', help='Run glibc tests?')
parser.add_option('--json_build_results_output_file',
help='Path to a JSON file for machine-readable output.')
# Deprecated, but passed to us by a script in the Chrome repo.
# Replaced by --enable_glibc=0
parser.add_option('--disable_glibc', dest='disable_glibc',
action='store_true', default=False,
help='Do not test using glibc.')
parser.add_option('--disable_tests', dest='disable_tests',
type='string', default='',
help='Comma-separated list of tests to omit')
builder_name = os.environ.get('BUILDBOT_BUILDERNAME', '')
is_integration_bot = 'nacl-chrome' in builder_name
parser.add_option('--integration_bot', dest='integration_bot',
type='int', default=int(is_integration_bot),
help='Is this an integration bot?')
is_morenacl_bot = (
'More NaCl' in builder_name or
'naclmore' in builder_name)
parser.add_option('--morenacl_bot', dest='morenacl_bot',
type='int', default=int(is_morenacl_bot),
help='Is this a morenacl bot?')
# Not used on the bots, but handy for running the script manually.
parser.add_option('--bits', dest='bits', action='store',
type='int', default=None,
help='32/64')
parser.add_option('--browser_path', dest='browser_path', action='store',
type='string', default=None,
help='Path to the chrome browser.')
parser.add_option('--buildbot', dest='buildbot', action='store',
type='string', default=None,
help='Value passed to scons as buildbot= option.')
return parser
def Main():
parser = MakeCommandLineParser()
options, args = parser.parse_args()
if options.integration_bot and options.morenacl_bot:
parser.error('ERROR: cannot be both an integration bot and a morenacl bot')
# Set defaults for enabling newlib.
if options.enable_newlib == -1:
options.enable_newlib = 1
# Set defaults for enabling glibc.
if options.enable_glibc == -1:
if options.integration_bot or options.morenacl_bot:
options.enable_glibc = 1
else:
options.enable_glibc = 0
if args:
parser.error('ERROR: invalid argument')
BuildAndTest(options)
if __name__ == '__main__':
Main()
| bsd-3-clause |
endolith/scikit-image | doc/examples/segmentation/plot_rag.py | 25 | 2139 | """
=======================
Region Adjacency Graphs
=======================
This example demonstrates the use of the `merge_nodes` function of a Region
Adjacency Graph (RAG). The `RAG` class represents a undirected weighted graph
which inherits from `networkx.graph` class. When a new node is formed by
merging two nodes, the edge weight of all the edges incident on the resulting
node can be updated by a user defined function `weight_func`.
The default behaviour is to use the smaller edge weight in case of a conflict.
The example below also shows how to use a custom function to select the larger
weight instead.
"""
from skimage.future.graph import rag
import networkx as nx
from matplotlib import pyplot as plt
import numpy as np
def max_edge(g, src, dst, n):
"""Callback to handle merging nodes by choosing maximum weight.
Returns either the weight between (`src`, `n`) or (`dst`, `n`)
in `g` or the maximum of the two when both exist.
Parameters
----------
g : RAG
The graph under consideration.
src, dst : int
The vertices in `g` to be merged.
n : int
A neighbor of `src` or `dst` or both.
Returns
-------
weight : float
The weight between (`src`, `n`) or (`dst`, `n`) in `g` or the
maximum of the two when both exist.
"""
w1 = g[n].get(src, {'weight': -np.inf})['weight']
w2 = g[n].get(dst, {'weight': -np.inf})['weight']
return max(w1, w2)
def display(g, title):
"""Displays a graph with the given title."""
pos = nx.circular_layout(g)
plt.figure()
plt.title(title)
nx.draw(g, pos)
nx.draw_networkx_edge_labels(g, pos, font_size=20)
g = rag.RAG()
g.add_edge(1, 2, weight=10)
g.add_edge(2, 3, weight=20)
g.add_edge(3, 4, weight=30)
g.add_edge(4, 1, weight=40)
g.add_edge(1, 3, weight=50)
# Assigning dummy labels.
for n in g.nodes():
g.node[n]['labels'] = [n]
gc = g.copy()
display(g, "Original Graph")
g.merge_nodes(1, 3)
display(g, "Merged with default (min)")
gc.merge_nodes(1, 3, weight_func=max_edge, in_place=False)
display(gc, "Merged with max without in_place")
plt.show()
| bsd-3-clause |
genehughes/trading-with-python | nautilus/nautilus.py | 77 | 5403 | '''
Created on 26 dec. 2011
Copyright: Jev Kuznetsov
License: BSD
'''
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from ib.ext.Contract import Contract
from ib.opt import ibConnection
from ib.ext.Order import Order
import tradingWithPython.lib.logger as logger
from tradingWithPython.lib.eventSystem import Sender, ExampleListener
import tradingWithPython.lib.qtpandas as qtpandas
import numpy as np
import pandas
priceTicks = {1:'bid',2:'ask',4:'last',6:'high',7:'low',9:'close', 14:'open'}
class PriceListener(qtpandas.DataFrameModel):
def __init__(self):
super(PriceListener,self).__init__()
self._header = ['position','bid','ask','last']
def addSymbol(self,symbol):
data = dict(zip(self._header,[0,np.nan,np.nan,np.nan]))
row = pandas.DataFrame(data, index = pandas.Index([symbol]))
self.df = self.df.append(row[self._header]) # append data and set correct column order
def priceHandler(self,sender,event,msg=None):
if msg['symbol'] not in self.df.index:
self.addSymbol(msg['symbol'])
if msg['type'] in self._header:
self.df.ix[msg['symbol'],msg['type']] = msg['price']
self.signalUpdate()
#print self.df
class Broker(Sender):
def __init__(self, name = "broker"):
super(Broker,self).__init__()
self.name = name
self.log = logger.getLogger(self.name)
self.log.debug('Initializing broker. Pandas version={0}'.format(pandas.__version__))
self.contracts = {} # a dict to keep track of subscribed contracts
self._id2symbol = {} # id-> symbol dict
self.tws = None
self._nextId = 1 # tws subscription id
self.nextValidOrderId = None
def connect(self):
""" connect to tws """
self.tws = ibConnection() # tws interface
self.tws.registerAll(self._defaultHandler)
self.tws.register(self._nextValidIdHandler,'NextValidId')
self.log.debug('Connecting to tws')
self.tws.connect()
self.tws.reqAccountUpdates(True,'')
self.tws.register(self._priceHandler,'TickPrice')
def subscribeStk(self,symbol, secType='STK', exchange='SMART',currency='USD'):
''' subscribe to stock data '''
self.log.debug('Subscribing to '+symbol)
c = Contract()
c.m_symbol = symbol
c.m_secType = secType
c.m_exchange = exchange
c.m_currency = currency
subId = self._nextId
self._nextId += 1
self.tws.reqMktData(subId,c,'',False)
self._id2symbol[subId] = c.m_symbol
self.contracts[symbol]=c
def disconnect(self):
self.tws.disconnect()
#------event handlers--------------------
def _defaultHandler(self,msg):
''' default message handler '''
#print msg.typeName
if msg.typeName == 'Error':
self.log.error(msg)
def _nextValidIdHandler(self,msg):
self.nextValidOrderId = msg.orderId
self.log.debug( 'Next valid order id:{0}'.format(self.nextValidOrderId))
def _priceHandler(self,msg):
#translate to meaningful messages
message = {'symbol':self._id2symbol[msg.tickerId],
'price':msg.price,
'type':priceTicks[msg.field]}
self.dispatch('price',message)
#-----------------GUI elements-------------------------
class TableView(QTableView):
""" extended table view """
def __init__(self,name='TableView1', parent=None):
super(TableView,self).__init__(parent)
self.name = name
self.setSelectionBehavior(QAbstractItemView.SelectRows)
def contextMenuEvent(self, event):
menu = QMenu(self)
Action = menu.addAction("print selected rows")
Action.triggered.connect(self.printName)
menu.exec_(event.globalPos())
def printName(self):
print "Action triggered from " + self.name
print 'Selected :'
for idx in self.selectionModel().selectedRows():
print self.model().df.ix[idx.row(),:]
class Form(QDialog):
def __init__(self,parent=None):
super(Form,self).__init__(parent)
self.broker = Broker()
self.price = PriceListener()
self.broker.connect()
symbols = ['SPY','XLE','QQQ','VXX','XIV']
for symbol in symbols:
self.broker.subscribeStk(symbol)
self.broker.register(self.price.priceHandler, 'price')
widget = TableView(parent=self)
widget.setModel(self.price)
widget.horizontalHeader().setResizeMode(QHeaderView.Stretch)
layout = QVBoxLayout()
layout.addWidget(widget)
self.setLayout(layout)
def __del__(self):
print 'Disconnecting.'
self.broker.disconnect()
if __name__=="__main__":
print "Running nautilus"
import sys
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
print "All done." | bsd-3-clause |
ua-snap/downscale | snap_scripts/epscor_sc/testing_deltadownscale_deficiency_tasmin_tasmax_epscor_sc_CLIMtest.py | 1 | 7419 | # SLICE YEARS AND CROP TO THE BASE EXTENT OF AKCAN IN WGS84 PCLL
def transform_from_latlon( lat, lon ):
''' simple way to make an affine transform from lats and lons coords '''
from affine import Affine
lat = np.asarray( lat )
lon = np.asarray( lon )
trans = Affine.translation(lon[0], lat[0])
scale = Affine.scale(lon[1] - lon[0], lat[1] - lat[0])
return trans * scale
if __name__ == '__main__':
import matplotlib
matplotlib.use( 'agg' )
from matplotlib import pyplot as plt
import os, rasterio
from rasterio import crs
import xarray as xr
import numpy as np
import pandas as pd
import geopandas as gpd
filelist = [ '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/cmip5/prepped/IPSL-CM5A-LR/rcp85/tasmin/tasmin_IPSL-CM5A-LR_rcp85_r1i1p1_2006_2100.nc',
'/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/cmip5/prepped/IPSL-CM5A-LR/rcp85/tasmax/tasmax_IPSL-CM5A-LR_rcp85_r1i1p1_2006_2100.nc',
'/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/cmip5/prepped/IPSL-CM5A-LR/rcp85/tas/tas_IPSL-CM5A-LR_rcp85_r1i1p1_2006_2100.nc' ]
historicals = [ '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/cmip5/prepped/IPSL-CM5A-LR/historical/tasmin/tasmin_IPSL-CM5A-LR_historical_r1i1p1_1860_2005.nc',
'/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/cmip5/prepped/IPSL-CM5A-LR/historical/tasmax/tasmax_IPSL-CM5A-LR_historical_r1i1p1_1860_2005.nc',
'/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/cmip5/prepped/IPSL-CM5A-LR/historical/tas/tas_IPSL-CM5A-LR_historical_r1i1p1_1860_2005.nc' ]
variables = ['tasmin', 'tasmax', 'tas']
output_path = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/downscaled_test_ipsl/IPSL-CM5A-LR_raw/rcp85'
begin = '2096'
end = '2096'
model = 'IPSL-CM5A-LR'
scenario = 'rcp85'
figsize = (16,9)
# pacific-centered reference system 4326
pacific = "+proj=longlat +ellps=WGS84 +pm=-180 +datum=WGS84 +no_defs"
shp_fn = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/SCTC_studyarea/Kenai_StudyArea.shp'
shp = gpd.read_file( shp_fn )
shp_pacific = shp.to_crs( pacific )
bounds = shp_pacific.bounds
raw_dict = {}
anom_dict = {}
for variable, fn, fn_hist in zip( variables, filelist, historicals ):
print( 'running: {}'.format( variable ) )
ds = xr.open_dataset( fn )
ds = ds[ variable ].sel( time=slice( begin, end ) )
ds = ds - 273.15
time_suffix = [ t.strftime('%m_%Y') for t in ds.time.to_pandas() ]
# get the historicals for climatology
ds_hist = xr.open_dataset( fn_hist )
ds_hist = ds_hist[ variable ].sel( time=slice( '01-1961', '12-1990' ) )
ds_hist = ds_hist - 273.15
climatology = ds_hist.groupby( 'time.month' ).mean( axis=0 )
# create anomalies
anomalies = ds.groupby( 'time.month' ) - climatology
count, height, width = climatology.shape
affine = transform_from_latlon( ds.lat, ds.lon )
# can we window the data here?
# this will involve generating a window object using rasterio and extracting using slicing.
meta = { 'crs':crs.from_string( pacific ),
'affine':affine,
'dtype':'float64',
'driver':'GTiff',
'height': height,
'width': width,
'count':count }
# write out the climatlogies
# # # # CLIMATOLOGY OUTPUT
#out_fn = ''
out_fn = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/downscaled_test_ipsl/IPSL-CM5A-LR/'+ variable +'_IPSL-CM5A-LR_rcp85_clim.tif'
with rasterio.open( out_fn, 'w', **meta ) as rst:
window = rst.window( *bounds.as_matrix().ravel().tolist() )
rst.write( climatology.data )
# read it back from window
new_affine = rst.window_transform( window )
raw_arr = rst.read( window=window )
# same for anom arr
rst.write( anomalies )
clim_arr = rst.read( window=window )
out_fn = ''
with rasterio.open( out_fn, 'w', **meta ) as rst:
window = rst.window( *bounds.as_matrix().ravel().tolist() )
rst.write( ds.data )
# read it back from window
new_affine = rst.window_transform( window )
raw_arr = rst.read( window=window )
# same for anom arr
rst.write( anomalies )
anom_arr = rst.read( window=window )
rst = None
count, height, width = raw_arr.shape
meta.update( height=height, width=width, affine=new_affine, driver='GTiff' )
# write out raw arr
out_fn = os.path.join( output_path, '{}_IPSL_CM5A_raw_data_{}_epscor_sc_{}_{}.tif'.format( variable,'rcp85',begin, end ))
dirname = os.path.dirname( out_fn )
if not os.path.exists( dirname ):
os.makedirs( dirname )
with rasterio.open( out_fn, 'w', **meta ) as out:
out.write( raw_arr )
# write out anom_arr
out_fn = os.path.join( output_path, '{}_IPSL_CM5A_raw_anom_data_{}_epscor_sc_{}_{}.tif'.format( variable,'rcp85',begin, end ))
dirname = os.path.dirname( out_fn )
if not os.path.exists( dirname ):
os.makedirs( dirname )
with rasterio.open( out_fn, 'w', **meta ) as out:
out.write( anom_arr )
raw_dict[ variable ] = raw_arr.mean( axis=(1,2) )
anom_dict[ variable ] = anom_arr.mean( axis=(1,2) )
# plot the above data in groups showing the raw values and the
df_raw = pd.DataFrame( raw_dict )
df_anom = pd.DataFrame( anom_dict )
col_list = ['tasmax', 'tas', 'tasmin']
df_raw = df_raw[ col_list ]
df_raw.index = pd.date_range('2096', '2097', freq='M')
df_anom = df_anom[ col_list ]
df_anom.index = pd.date_range('2096', '2097', freq='M')
# RAW FIRST
# now plot the dataframe
if begin == end:
title = 'EPSCoR SC AOI Temp Mean {} {} {}'.format( model, scenario, begin )
else:
title = 'EPSCoR SC AOI Temp Mean {} {} {} - {}'.format( model, scenario, begin, end )
if 'tas' in variables:
colors = ['red', 'black', 'blue' ]
else:
colors = [ 'blue', 'black' ]
ax = df_raw.plot( kind='line', title=title, figsize=figsize, color=colors )
output_dir = os.path.join( output_path, 'test_min_max_issue' )
if not os.path.exists( output_dir ):
os.makedirs( output_dir )
out_metric_fn = 'temps'
if 'pr' in variables:
out_metric_fn = 'prec'
if begin == end:
output_filename = os.path.join( output_dir,'mean_{}_epscor_sc_{}_{}_{}.png'.format( out_metric_fn, model, scenario, begin ) )
else:
output_filename = os.path.join( output_dir,'mean_{}_epscor_sc_{}_{}_{}_{}.png'.format( out_metric_fn, model, scenario, begin, end ) )
plt.savefig( output_filename, dpi=700 )
plt.close()
# ANOM NEXT
# now plot the dataframe
if begin == end:
title = 'EPSCoR SC AOI Temp Mean Anomalies {} {} {}'.format( model, scenario, begin )
else:
title = 'EPSCoR SC AOI Temp Mean Anomalies {} {} {} - {}'.format( model, scenario, begin, end )
if 'tas' in variables:
colors = ['red', 'black', 'blue' ]
else:
colors = [ 'blue', 'black' ]
ax = df_anom.plot( kind='line', title=title, figsize=figsize, color=colors )
output_dir = os.path.join( output_path, 'test_min_max_issue' )
if not os.path.exists( output_dir ):
os.makedirs( output_dir )
# now plot the dataframe
out_metric_fn = 'temps'
if 'pr' in variables:
out_metric_fn = 'prec'
if begin == end:
output_filename = os.path.join( output_dir,'mean_{}_epscor_sc_anom_{}_{}_{}.png'.format( out_metric_fn, model, scenario, begin ) )
else:
output_filename = os.path.join( output_dir,'mean_{}_epscor_sc_anom_{}_{}_{}_{}.png'.format( out_metric_fn, model, scenario, begin, end ) )
plt.savefig( output_filename, dpi=700 )
plt.close()
| mit |
brodoll/sms-tools | lectures/06-Harmonic-model/plots-code/f0Twm-piano.py | 19 | 1261 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackman
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
import stft as STFT
import sineModel as SM
import harmonicModel as HM
(fs, x) = UF.wavread('../../../sounds/piano.wav')
w = np.blackman(1501)
N = 2048
t = -90
minf0 = 100
maxf0 = 300
f0et = 1
maxnpeaksTwm = 4
H = 128
x1 = x[1.5*fs:1.8*fs]
plt.figure(1, figsize=(9, 7))
mX, pX = STFT.stftAnal(x, fs, w, N, H)
f0 = HM.f0Detection(x, fs, w, N, H, t, minf0, maxf0, f0et)
f0 = UF.cleaningTrack(f0, 5)
yf0 = UF.sinewaveSynth(f0, .8, H, fs)
f0[f0==0] = np.nan
maxplotfreq = 800.0
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = fs*np.arange(N*maxplotfreq/fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:N*maxplotfreq/fs+1]))
plt.autoscale(tight=True)
plt.plot(frmTime, f0, linewidth=2, color='k')
plt.autoscale(tight=True)
plt.title('mX + f0 (piano.wav), TWM')
plt.tight_layout()
plt.savefig('f0Twm-piano.png')
UF.wavwrite(yf0, fs, 'f0Twm-piano.wav')
plt.show()
| agpl-3.0 |
smcantab/pele | pele/gui/graph_viewer.py | 5 | 13967 | import networkx as nx
import numpy as np
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import QWidget
from pele.gui.ui.graph_view_ui import Ui_Form
from pele.utils.events import Signal
from pele.utils.disconnectivity_graph import database2graph
from pele.gui.ui.dgraph_dlg import minimum_energy_path, check_thermodynamic_info
from pele.rates import compute_committors
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class ShowPathAction(QtGui.QAction):
"""this action will show the minimum energy path to minimum1"""
def __init__(self, minimum1, minimum2, parent=None):
QtGui.QAction.__init__(self, "show path to %d" % minimum2._id, parent)
self.parent = parent
self.minimum1 = minimum1
self.minimum2 = minimum2
self.triggered.connect(self.__call__)
def __call__(self, val):
self.parent._show_minimum_energy_path(self.minimum1, self.minimum2)
class ColorByCommittorAction(QtGui.QAction):
"""this action will color the graph by committor probabilities"""
def __init__(self, minimum1, minimum2, parent=None):
QtGui.QAction.__init__(self, "color by committor %d" % minimum2._id, parent)
self.parent = parent
self.minimum1 = minimum1
self.minimum2 = minimum2
self.triggered.connect(self.__call__)
def __call__(self, val):
dialog = QtGui.QInputDialog(parent=self.parent)
dialog.setLabelText("Temperature for committor calculation")
dialog.setInputMode(2)
dialog.setDoubleValue(1.)
dialog.exec_()
if dialog.result():
T = dialog.doubleValue()
self.parent._color_by_committor(self.minimum1, self.minimum2, T=T)
class GraphViewWidget(QWidget):
def __init__(self, database=None, parent=None, app=None, minima=None):
QWidget.__init__(self, parent=parent)
self.ui = Ui_Form()
self.ui.setupUi(self)
self.database = database
self.minima = minima
self.app = app
self.canvas = self.ui.canvas.canvas
self.axes = self.canvas.axes
self.fig = self.canvas.fig
self.on_minima_picked = Signal()
self.from_minima = set()
self.positions = dict()
self.boundary_nodes = set()
self._selected_minimum = None
self._mpl_cid = None
self._minima_color_value = None
def on_btn_show_all_clicked(self, clicked=None):
if clicked is None: return
self.show_all()
def _reset_minima_lists(self):
self.from_minima.clear()
self.positions.clear()
self.boundary_nodes.clear()
self._minima_color_value = None
def show_all(self):
self.ui.label_status.setText("showing full graph")
self._reset_minima_lists()
self.make_graph()
self.show_graph()
def make_graph_from(self, minima, cutoff=1):
"""rebuild the graph using only the passed minima and those in self.from_minima"""
# cutoff += 1
self.from_minima.update(minima)
minima = self.from_minima
nodes = set()
# make a graph from the minima in self.minima and nearest neighbors
outer_layer = set()
for m in minima:
nodesdir = nx.single_source_shortest_path(self.full_graph, m, cutoff=cutoff)
for n, path in nodesdir.iteritems():
d = len(path) - 1
if d < cutoff:
# n is close to m, remove it from outer layer
outer_layer.discard(n)
elif d == cutoff:
if n not in nodes:
# n is in the outer layer of m and not near any other nodes.
outer_layer.add(n)
nodes.update(nodesdir)
self.boundary_nodes = outer_layer
self.graph = self.full_graph.subgraph(nodes)
# remove nodes not in the graph from the dictionary positions
difference = set(self.positions.viewkeys())
difference.difference_update(self.graph.nodes())
for m in difference:
self.positions.pop(m)
print "boundary nodes", len(self.boundary_nodes), self.graph.number_of_nodes()
def make_graph(self, database=None, minima=None):
"""build an nx graph from the database"""
if database is None:
database = self.database
if minima is None:
minima = self.minima
print "making graph", database, minima
# get the graph object, eliminate nodes without edges
graph = database2graph(database)
if minima is not None:
to_remove = set(graph.nodes()).difference(set(minima))
graph.remove_nodes_from(to_remove)
self.full_graph = graph
print graph.number_of_nodes()
degree = graph.degree()
nodes = [n for n, nedges in degree.items() if nedges > 0]
self.graph = graph.subgraph(nodes)
print self.graph.number_of_nodes(), self.graph.number_of_edges()
def _show_minimum_energy_path(self, m1, m2):
"""show only the minima in the path from m1 to m2"""
self._reset_minima_lists()
path = minimum_energy_path(self.full_graph, m1, m2)
self.make_graph_from(path)
print "there are", len(path), "minima in the path from", m1._id, "to", m2._id
status = "showing path from minimum %d to %d" % (m1._id, m2._id)
self.ui.label_status.setText(status)
self.show_graph()
def _color_by_committor(self, min1, min2, T=1.):
print "coloring by the probability that a trajectory gets to minimum", min1._id, "before", min2._id
# get a list of transition states in the same cluster as min1
edges = nx.bfs_edges(self.graph, min1)
transition_states = [ self.graph.get_edge_data(u, v)["ts"] for u, v in edges ]
if not check_thermodynamic_info(transition_states):
raise Exception("The thermodynamic information is not yet computed. Use the main menu of the gui under 'Actions'")
A = [min2]
B = [min1]
committors = compute_committors(transition_states, A, B, T=T)
self._minima_color_value = committors
def get_committor(m):
try:
return committors[m]
except KeyError:
return 1.
self._minima_color_value = get_committor
self.show_graph()
def _on_right_click_minimum(self, minimum):
"""create a menu with the list of available actions"""
print "you right clicked on minimum with id", minimum._id, "and energy", minimum.energy
menu = QtGui.QMenu("list menu", parent=self)
if self._selected_minimum is not None:
menu.addAction(ShowPathAction(minimum, self._selected_minimum, parent=self))
menu.addAction(ColorByCommittorAction(minimum, self._selected_minimum, parent=self))
menu.exec_(QtGui.QCursor.pos())
def _on_left_click_minimum(self, min1):
self._selected_minimum = min1
print "you clicked on minimum with id", min1._id, "and energy", min1.energy
self.on_minima_picked(min1)
if self.ui.checkBox_zoom.isChecked():
self.make_graph_from([min1])
text = "showing graph near minima "
for m in self.from_minima:
text += " " + str(m._id)
self.ui.label_status.setText(text)
self.show_graph()
def _on_mpl_pick_event(self, event):
"""matplotlib event called when a minimum is clicked on"""
artists = {self._boundary_points, self._minima_points}
if event.artist not in artists:
# print "you clicked on something other than a node"
return True
ind = event.ind[0]
if event.artist == self._minima_points:
min1 = self._mimima_layout_list[ind][0]
else:
min1 = self._boundary_layout_list[ind][0]
if event.mouseevent.button == 3:
self._on_right_click_minimum(min1)
else:
self._on_left_click_minimum(min1)
def show_graph(self, fixed=False, show_ids=True):
"""draw the graph"""
import pylab as pl
if not hasattr(self, "graph"):
self.make_graph()
print "showing graph"
# I need to clear the figure and make a new axes object because
# I can't find any other way to remove old colorbars
self.fig.clf()
self.axes = self.fig.add_subplot(111)
ax = self.axes
ax.clear()
graph = self.graph
# get the layout of the nodes from networkx
oldlayout = self.positions
layout = nx.spring_layout(graph, pos=oldlayout)
self.positions.update(layout)
layout = self.positions
# draw the edges as lines
from matplotlib.collections import LineCollection
linecollection = LineCollection([(layout[u], layout[v]) for u, v in graph.edges()
if u not in self.boundary_nodes and v not in self.boundary_nodes])
linecollection.set_color('k')
ax.add_collection(linecollection)
if self.boundary_nodes:
# draw the edges connecting the boundary nodes as thin lines
from matplotlib.collections import LineCollection
linecollection = LineCollection([(layout[u], layout[v]) for u, v in graph.edges()
if u in self.boundary_nodes or v in self.boundary_nodes])
linecollection.set_color('k')
linecollection.set_linewidth(0.2)
ax.add_collection(linecollection)
markersize = 8**2
# draw the interior nodes
interior_nodes = set(graph.nodes()) - self.boundary_nodes
layoutlist = filter(lambda nxy: nxy[0] in interior_nodes, layout.items())
xypos = np.array([xy for n, xy in layoutlist])
if self._minima_color_value is None:
#color the nodes by energy
color_values = [m.energy for m, xy in layoutlist]
else:
color_values = [self._minima_color_value(m) for m, xy in layoutlist]
#plot the nodes
self._minima_points = ax.scatter(xypos[:,0], xypos[:,1], picker=5,
s=markersize, c=color_values, cmap=pl.cm.autumn)
self._mimima_layout_list = layoutlist
# if not hasattr(self, "colorbar"):
self.colorbar = self.fig.colorbar(self._minima_points)
self._boundary_points = None
self._boundary_list = []
if self.boundary_nodes:
# draw the boundary nodes as empty circles with thin lines
boundary_layout_list = filter(lambda nxy: nxy[0] in self.boundary_nodes, layout.items())
xypos = np.array([xy for n, xy in boundary_layout_list])
#plot the nodes
# marker = mpl.markers.MarkerStyle("o", fillstyle="none")
# marker.set_fillstyle("none")
self._boundary_points = ax.scatter(xypos[:,0], xypos[:,1], picker=5,
s=markersize, marker="o", facecolors="none", linewidths=.5)
self._boundary_layout_list = boundary_layout_list
#scale the axes so the points are not cutoff
xmax = max((x for x,y in layout.itervalues() ))
xmin = min((x for x,y in layout.itervalues() ))
ymax = max((y for x,y in layout.itervalues() ))
ymin = min((y for x,y in layout.itervalues() ))
dx = (xmax - xmin)*.1
dy = (ymax - ymin)*.1
ax.set_xlim([xmin-dx, xmax+dx])
ax.set_ylim([ymin-dy, ymax+dy])
if self._mpl_cid is not None:
self.canvas.mpl_disconnect(self._mpl_cid)
self._mpl_cid = None
self._mpl_cid = self.fig.canvas.mpl_connect('pick_event', self._on_mpl_pick_event)
self.canvas.draw()
self.app.processEvents()
class GraphViewDialog(QtGui.QMainWindow):
def __init__(self, database, parent=None, app=None):
QtGui.QMainWindow.__init__(self, parent=parent)
self.setWindowTitle("Connectivity graph")
self.widget = GraphViewWidget(database=database, parent=self, app=app)
self.setCentralWidget(self.widget)
self.app = app
def start(self):
self.widget.show_all()
def test():
from OpenGL.GLUT import glutInit
import sys
import pylab as pl
app = QtGui.QApplication(sys.argv)
from pele.systems import LJCluster
pl.ion()
natoms = 13
system = LJCluster(natoms)
system.params.double_ended_connect.local_connect_params.NEBparams.iter_density = 5.
dbname = "lj%dtest.db" % (natoms,)
db = system.create_database(dbname)
#get some minima
if False:
bh = system.get_basinhopping(database=db)
bh.run(10)
minima = db.minima()
else:
x1, e1 = system.get_random_minimized_configuration()[:2]
x2, e2 = system.get_random_minimized_configuration()[:2]
min1 = db.addMinimum(e1, x1)
min2 = db.addMinimum(e2, x2)
minima = [min1, min2]
# connect some of the minima
nmax = min(3, len(minima))
m1 = minima[0]
for m2 in minima[1:nmax]:
connect = system.get_double_ended_connect(m1, m2, db)
connect.connect()
if True:
from pele.thermodynamics import get_thermodynamic_information
get_thermodynamic_information(system, db, nproc=4)
wnd = GraphViewDialog(db, app=app)
# decrunner = DECRunner(system, db, min1, min2, outstream=wnd.textEdit_writer)
glutInit()
wnd.show()
from PyQt4.QtCore import QTimer
def start():
wnd.start()
QTimer.singleShot(10, start)
sys.exit(app.exec_())
if __name__ == "__main__":
test()
| gpl-3.0 |
iiSeymour/pandashells | pandashells/lib/plot_lib.py | 1 | 3883 | #! /usr/bin/env python
import sys
import re
from dateutil.parser import parse
import matplotlib as mpl
import pylab as pl
import seaborn as sns
import mpld3
def show(args):
# if figure saving requested
if hasattr(args, 'savefig') and args.savefig:
# save html if requested
rex_html = re.compile('.*?\.html$')
if rex_html.match(args.savefig[0]):
fig = pl.gcf()
html = mpld3.fig_to_html(fig)
with open(args.savefig[0], 'w') as outfile:
outfile.write(html)
return
# save image types
pl.savefig(args.savefig[0])
# otherwise show to screen
else:
pl.show()
def set_plot_styling(args):
# set up seaborn context
sns.set(context=args.plot_context[0],
style=args.plot_theme[0],
palette=args.plot_palette[0])
# modify seaborn slightly to look good in interactive backends
if 'white' not in args.plot_theme[0]:
mpl.rcParams['figure.facecolor'] = 'white'
mpl.rcParams['figure.edgecolor'] = 'white'
def set_limits(args):
if args.xlim:
pl.gca().set_xlim(args.xlim)
if args.ylim:
pl.gca().set_ylim(args.ylim)
def set_scale(args):
if args.xlog:
pl.gca().set_xscale('log')
if args.ylog:
pl.gca().set_yscale('log')
def set_labels_title(args):
if args.title:
pl.title(args.title[0])
if args.xlabel:
pl.xlabel(args.xlabel[0])
if args.ylabel:
pl.ylabel(args.ylabel[0])
def set_legend(args):
if args.legend:
loc = args.legend[0]
rex = re.compile(r'\d')
m = rex.match(loc)
if m:
loc = int(loc)
else:
loc = 'best'
pl.legend(loc=loc)
def set_grid(args):
if args.no_grid:
pl.grid(False)
else:
pl.grid(True)
def ensure_xy_args(args):
x_is_none = args.x is None
y_is_none = args.y is None
if (x_is_none ^ y_is_none):
msg = "\nIf either x or y is specified, both must be specified\n\n"
sys.stderr.write(msg)
sys.exit(1)
def ensure_xy_omission_state(args, df):
if (len(df.columns) != 2) and (args.x is None):
msg = "\n\n x and y can be ommited only "
msg += "for 2-column data-frames\n"
sys.stderr.write(msg)
sys.exit(1)
def autofill_plot_fields_and_labels(args, df):
# add labels for two column inputs
if (args.x is None) and (len(df.columns) == 2):
args.x = [df.columns[0]]
args.y = [df.columns[1]]
# if no xlabel, set it to the x field
if args.xlabel is None:
args.xlabel = args.x
# if no ylabel, and only 1 trace being plotted, set ylabel to that field
if (args.ylabel is None) and (len(args.y) == 1):
args.ylabel = [args.y[0]]
def str_to_date(x):
try:
basestring
except NameError:
basestring = str
if isinstance(x.iloc[0], basestring):
return [parse(e) for e in x]
else:
return x
def draw_traces(args, df):
y_field_list = args.y
x = str_to_date(df[args.x[0]])
style_list = args.style
alpha_list = args.alpha
if len(style_list) != len(y_field_list):
style_list = [style_list[0] for y_field in y_field_list]
if len(alpha_list) != len(y_field_list):
alpha_list = [alpha_list[0] for y_field in y_field_list]
for y_field, style, alpha in zip(y_field_list, style_list, alpha_list):
y = df[y_field]
pl.plot(x, y, style, label=y_field, alpha=alpha)
def refine_plot(args):
set_limits(args)
set_scale(args)
set_labels_title(args)
set_grid(args)
set_legend(args)
def draw_xy_plot(args, df):
ensure_xy_args(args)
ensure_xy_omission_state(args, df)
autofill_plot_fields_and_labels(args, df)
draw_traces(args, df)
refine_plot(args)
show(args)
| bsd-2-clause |
pypot/scikit-learn | sklearn/preprocessing/tests/test_imputation.py | 213 | 11911 | import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn import grid_search
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0]+1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: np.mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: np.median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = grid_search.GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_true(np.all(X == Xt))
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
| bsd-3-clause |
mraspaud/dask | dask/dataframe/tests/test_ufunc.py | 5 | 9888 | from __future__ import absolute_import, division, print_function
import pytest
pd = pytest.importorskip('pandas')
import pandas.util.testing as tm
import numpy as np
import dask.array as da
import dask.dataframe as dd
from dask.dataframe.utils import assert_eq
_BASE_UFUNCS = ['conj', 'exp', 'log', 'log2', 'log10', 'log1p',
'expm1', 'sqrt', 'square', 'sin', 'cos', 'tan',
'arcsin','arccos', 'arctan', 'sinh', 'cosh', 'tanh',
'arcsinh', 'arccosh', 'arctanh', 'deg2rad', 'rad2deg',
'isfinite', 'isinf', 'isnan', 'signbit',
'degrees', 'radians', 'rint', 'fabs', 'sign', 'absolute',
'floor', 'ceil', 'trunc', 'logical_not']
@pytest.mark.parametrize('ufunc', _BASE_UFUNCS)
def test_ufunc(ufunc):
dafunc = getattr(da, ufunc)
npfunc = getattr(np, ufunc)
s = pd.Series(np.random.randint(1, 100, size=20))
ds = dd.from_pandas(s, 3)
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(ds), dd.Series)
assert_eq(dafunc(ds), npfunc(s))
# applying NumPy ufunc triggers computation
assert isinstance(npfunc(ds), pd.Series)
assert_eq(npfunc(ds), npfunc(s))
# applying Dask ufunc to normal Series triggers computation
assert isinstance(dafunc(s), pd.Series)
assert_eq(dafunc(s), npfunc(s))
s = pd.Series(np.abs(np.random.randn(100)))
ds = dd.from_pandas(s, 3)
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(ds), dd.Series)
assert_eq(dafunc(ds), npfunc(s))
# applying NumPy ufunc triggers computation
assert isinstance(npfunc(ds), pd.Series)
assert_eq(npfunc(ds), npfunc(s))
# applying Dask ufunc to normal Series triggers computation
assert isinstance(dafunc(s), pd.Series)
assert_eq(dafunc(s), npfunc(s))
# DataFrame
df = pd.DataFrame({'A': np.random.randint(1, 100, size=20),
'B': np.random.randint(1, 100, size=20),
'C': np.abs(np.random.randn(20))})
ddf = dd.from_pandas(df, 3)
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(ddf), dd.DataFrame)
assert_eq(dafunc(ddf), npfunc(df))
# applying NumPy ufunc triggers computation
assert isinstance(npfunc(ddf), pd.DataFrame)
assert_eq(npfunc(ddf), npfunc(df))
# applying Dask ufunc to normal Dataframe triggers computation
assert isinstance(dafunc(df), pd.DataFrame)
assert_eq(dafunc(df), npfunc(df))
# Index
if ufunc in ('logical_not', 'signbit', 'isnan', 'isinf', 'isfinite'):
return
assert isinstance(dafunc(ddf.index), dd.Index)
assert_eq(dafunc(ddf.index), npfunc(df.index))
# applying NumPy ufunc triggers computation
assert isinstance(npfunc(ddf.index), pd.Index)
assert_eq(npfunc(ddf.index), npfunc(df.index))
# applying Dask ufunc to normal Series triggers computation
assert isinstance(dafunc(df.index), pd.Index)
assert_eq(dafunc(df), npfunc(df))
@pytest.mark.parametrize('ufunc', _BASE_UFUNCS)
def test_ufunc_with_index(ufunc):
dafunc = getattr(da, ufunc)
npfunc = getattr(np, ufunc)
s = pd.Series(np.random.randint(1, 100, size=20),
index=list('abcdefghijklmnopqrst'))
ds = dd.from_pandas(s, 3)
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(ds), dd.Series)
assert_eq(dafunc(ds), npfunc(s))
# applying NumPy ufunc triggers computation
assert isinstance(npfunc(ds), pd.Series)
assert_eq(npfunc(ds), npfunc(s))
# applying Dask ufunc to normal Series triggers computation
assert isinstance(dafunc(s), pd.Series)
assert_eq(dafunc(s), npfunc(s))
s = pd.Series(np.abs(np.random.randn(20)),
index=list('abcdefghijklmnopqrst'))
ds = dd.from_pandas(s, 3)
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(ds), dd.Series)
assert_eq(dafunc(ds), npfunc(s))
# applying NumPy ufunc triggers computation
assert isinstance(npfunc(ds), pd.Series)
assert_eq(npfunc(ds), npfunc(s))
# applying Dask ufunc to normal Series triggers computation
assert isinstance(dafunc(s), pd.Series)
assert_eq(dafunc(s), npfunc(s))
df = pd.DataFrame({'A': np.random.randint(1, 100, size=20),
'B': np.random.randint(1, 100, size=20),
'C': np.abs(np.random.randn(20))},
index=list('abcdefghijklmnopqrst'))
ddf = dd.from_pandas(df, 3)
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(ddf), dd.DataFrame)
assert_eq(dafunc(ddf), npfunc(df))
# applying NumPy ufunc triggers computation
assert isinstance(npfunc(ddf), pd.DataFrame)
assert_eq(npfunc(ddf), npfunc(df))
# applying Dask ufunc to normal DataFrame triggers computation
assert isinstance(dafunc(df), pd.DataFrame)
assert_eq(dafunc(df), npfunc(df))
@pytest.mark.parametrize('ufunc', ['isreal', 'iscomplex', 'real', 'imag',
'angle', 'fix'])
def test_ufunc_array_wrap(ufunc):
"""
some np.ufuncs doesn't call __array_wrap__, it should work as below
- da.ufunc(dd.Series) => dd.Series
- da.ufunc(pd.Series) => np.ndarray
- np.ufunc(dd.Series) => np.ndarray
- np.ufunc(pd.Series) => np.ndarray
"""
dafunc = getattr(da, ufunc)
npfunc = getattr(np, ufunc)
s = pd.Series(np.random.randint(1, 100, size=20),
index=list('abcdefghijklmnopqrst'))
ds = dd.from_pandas(s, 3)
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(ds), dd.Series)
assert_eq(dafunc(ds), pd.Series(npfunc(s), index=s.index))
assert isinstance(npfunc(ds), np.ndarray)
tm.assert_numpy_array_equal(npfunc(ds), npfunc(s))
assert isinstance(dafunc(s), np.ndarray)
tm.assert_numpy_array_equal(dafunc(s), npfunc(s))
df = pd.DataFrame({'A': np.random.randint(1, 100, size=20),
'B': np.random.randint(1, 100, size=20),
'C': np.abs(np.random.randn(20))},
index=list('abcdefghijklmnopqrst'))
ddf = dd.from_pandas(df, 3)
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(ddf), dd.DataFrame)
# result may be read-only ndarray
exp = pd.DataFrame(npfunc(df).copy(), columns=df.columns, index=df.index)
assert_eq(dafunc(ddf), exp)
assert isinstance(npfunc(ddf), np.ndarray)
tm.assert_numpy_array_equal(npfunc(ddf), npfunc(df))
assert isinstance(dafunc(df), np.ndarray)
tm.assert_numpy_array_equal(dafunc(df), npfunc(df))
@pytest.mark.parametrize('ufunc', ['logaddexp', 'logaddexp2', 'arctan2',
'hypot', 'copysign', 'nextafter', 'ldexp',
'fmod', 'logical_and', 'logical_or',
'logical_xor', 'maximum', 'minimum',
'fmax', 'fmin'])
def test_ufunc_with_2args(ufunc):
dafunc = getattr(da, ufunc)
npfunc = getattr(np, ufunc)
s1 = pd.Series(np.random.randint(1, 100, size=20))
ds1 = dd.from_pandas(s1, 3)
s2 = pd.Series(np.random.randint(1, 100, size=20))
ds2 = dd.from_pandas(s2, 4)
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(ds1, ds2), dd.Series)
assert_eq(dafunc(ds1, ds2), npfunc(s1, s2))
# applying NumPy ufunc triggers computation
assert isinstance(npfunc(ds1, ds2), pd.Series)
assert_eq(npfunc(ds1, ds2), npfunc(s1, s2))
# applying Dask ufunc to normal Series triggers computation
assert isinstance(dafunc(s1, s2), pd.Series)
assert_eq(dafunc(s1, s2), npfunc(s1, s2))
df1 = pd.DataFrame(np.random.randint(1, 100, size=(20, 2)),
columns=['A', 'B'])
ddf1 = dd.from_pandas(df1, 3)
df2 = pd.DataFrame(np.random.randint(1, 100, size=(20, 2)),
columns=['A', 'B'])
ddf2 = dd.from_pandas(df2, 4)
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(ddf1, ddf2), dd.DataFrame)
assert_eq(dafunc(ddf1, ddf2), npfunc(df1, df2))
# applying NumPy ufunc triggers computation
assert isinstance(npfunc(ddf1, ddf2), pd.DataFrame)
assert_eq(npfunc(ddf1, ddf2), npfunc(df1, df2))
# applying Dask ufunc to normal DataFrame triggers computation
assert isinstance(dafunc(df1, df2), pd.DataFrame)
assert_eq(dafunc(df1, df2), npfunc(df1, df2))
def test_clip():
# clip internally calls dd.Series.clip
s = pd.Series(np.random.randint(1, 100, size=20))
ds = dd.from_pandas(s, 3)
# applying Dask ufunc doesn't trigger computation
assert isinstance(da.clip(ds, 5, 50), dd.Series)
assert_eq(da.clip(ds, 5, 50), np.clip(s, 5, 50))
# applying Dask ufunc doesn't trigger computation
assert isinstance(np.clip(ds, 5, 50), dd.Series)
assert_eq(np.clip(ds, 5, 50), np.clip(s, 5, 50))
# applying Dask ufunc to normal Series triggers computation
assert isinstance(da.clip(s, 5, 50), pd.Series)
assert_eq(da.clip(s, 5, 50), np.clip(s, 5, 50))
df = pd.DataFrame(np.random.randint(1, 100, size=(20, 2)),
columns=['A', 'B'])
ddf = dd.from_pandas(df, 3)
# applying Dask ufunc doesn't trigger computation
assert isinstance(da.clip(ddf, 5.5, 40.5), dd.DataFrame)
assert_eq(da.clip(ddf, 5.5, 40.5), np.clip(df, 5.5, 40.5))
# applying Dask ufunc doesn't trigger computation
assert isinstance(np.clip(ddf, 5.5, 40.5), dd.DataFrame)
assert_eq(np.clip(ddf, 5.5, 40.5), np.clip(df, 5.5, 40.5))
# applying Dask ufunc to normal DataFrame triggers computation
assert isinstance(da.clip(df, 5.5, 40.5), pd.DataFrame)
assert_eq(da.clip(df, 5.5, 40.5), np.clip(df, 5.5, 40.5))
| bsd-3-clause |
Jimmy-Morzaria/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
MSeifert04/numpy | numpy/lib/histograms.py | 4 | 39639 | """
Histogram-related functions
"""
from __future__ import division, absolute_import, print_function
import contextlib
import functools
import operator
import warnings
import numpy as np
from numpy.compat.py3k import basestring
from numpy.core import overrides
__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
# range is a keyword argument to many functions, so save the builtin so they can
# use it.
_range = range
def _hist_bin_sqrt(x, range):
"""
Square root histogram bin estimator.
Bin width is inversely proportional to the data size. Used by many
programs for its simplicity.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
return x.ptp() / np.sqrt(x.size)
def _hist_bin_sturges(x, range):
"""
Sturges histogram bin estimator.
A very simplistic estimator based on the assumption of normality of
the data. This estimator has poor performance for non-normal data,
which becomes especially obvious for large data sets. The estimate
depends only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
return x.ptp() / (np.log2(x.size) + 1.0)
def _hist_bin_rice(x, range):
"""
Rice histogram bin estimator.
Another simple estimator with no normality assumption. It has better
performance for large data than Sturges, but tends to overestimate
the number of bins. The number of bins is proportional to the cube
root of data size (asymptotically optimal). The estimate depends
only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
return x.ptp() / (2.0 * x.size ** (1.0 / 3))
def _hist_bin_scott(x, range):
"""
Scott histogram bin estimator.
The binwidth is proportional to the standard deviation of the data
and inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)
def _hist_bin_stone(x, range):
"""
Histogram bin estimator based on minimizing the estimated integrated squared error (ISE).
The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution.
The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule.
https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule
This paper by Stone appears to be the origination of this rule.
http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
range : (float, float)
The lower and upper range of the bins.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
n = x.size
ptp_x = np.ptp(x)
if n <= 1 or ptp_x == 0:
return 0
def jhat(nbins):
hh = ptp_x / nbins
p_k = np.histogram(x, bins=nbins, range=range)[0] / n
return (2 - (n + 1) * p_k.dot(p_k)) / hh
nbins_upper_bound = max(100, int(np.sqrt(n)))
nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)
if nbins == nbins_upper_bound:
warnings.warn("The number of bins estimated may be suboptimal.",
RuntimeWarning, stacklevel=3)
return ptp_x / nbins
def _hist_bin_doane(x, range):
"""
Doane's histogram bin estimator.
Improved version of Sturges' formula which works better for
non-normal data. See
stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
if x.size > 2:
sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
sigma = np.std(x)
if sigma > 0.0:
# These three operations add up to
# g1 = np.mean(((x - np.mean(x)) / sigma)**3)
# but use only one temp array instead of three
temp = x - np.mean(x)
np.true_divide(temp, sigma, temp)
np.power(temp, 3, temp)
g1 = np.mean(temp)
return x.ptp() / (1.0 + np.log2(x.size) +
np.log2(1.0 + np.absolute(g1) / sg1))
return 0.0
def _hist_bin_fd(x, range):
"""
The Freedman-Diaconis histogram bin estimator.
The Freedman-Diaconis rule uses interquartile range (IQR) to
estimate binwidth. It is considered a variation of the Scott rule
with more robustness as the IQR is less affected by outliers than
the standard deviation. However, the IQR depends on fewer points
than the standard deviation, so it is less accurate, especially for
long tailed distributions.
If the IQR is 0, this function returns 1 for the number of bins.
Binwidth is inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
iqr = np.subtract(*np.percentile(x, [75, 25]))
return 2.0 * iqr * x.size ** (-1.0 / 3.0)
def _hist_bin_auto(x, range):
"""
Histogram bin estimator that uses the minimum width of the
Freedman-Diaconis and Sturges estimators if the FD bandwidth is non zero
and the Sturges estimator if the FD bandwidth is 0.
The FD estimator is usually the most robust method, but its width
estimate tends to be too large for small `x` and bad for data with limited
variance. The Sturges estimator is quite good for small (<1000) datasets
and is the default in the R language. This method gives good off the shelf
behaviour.
.. versionchanged:: 1.15.0
If there is limited variance the IQR can be 0, which results in the
FD bin width being 0 too. This is not a valid bin width, so
``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal.
If the IQR is 0, it's unlikely any variance based estimators will be of
use, so we revert to the sturges estimator, which only uses the size of the
dataset in its calculation.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
See Also
--------
_hist_bin_fd, _hist_bin_sturges
"""
fd_bw = _hist_bin_fd(x, range)
sturges_bw = _hist_bin_sturges(x, range)
del range # unused
if fd_bw:
return min(fd_bw, sturges_bw)
else:
# limited variance, so we return a len dependent bw estimator
return sturges_bw
# Private dict initialized at module load time
_hist_bin_selectors = {'stone': _hist_bin_stone,
'auto': _hist_bin_auto,
'doane': _hist_bin_doane,
'fd': _hist_bin_fd,
'rice': _hist_bin_rice,
'scott': _hist_bin_scott,
'sqrt': _hist_bin_sqrt,
'sturges': _hist_bin_sturges}
def _ravel_and_check_weights(a, weights):
""" Check a and weights have matching shapes, and ravel both """
a = np.asarray(a)
# Ensure that the array is a "subtractable" dtype
if a.dtype == np.bool_:
warnings.warn("Converting input from {} to {} for compatibility."
.format(a.dtype, np.uint8),
RuntimeWarning, stacklevel=3)
a = a.astype(np.uint8)
if weights is not None:
weights = np.asarray(weights)
if weights.shape != a.shape:
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
return a, weights
def _get_outer_edges(a, range):
"""
Determine the outer bin edges to use, from either the data or the range
argument
"""
if range is not None:
first_edge, last_edge = range
if first_edge > last_edge:
raise ValueError(
'max must be larger than min in range parameter.')
if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
raise ValueError(
"supplied range of [{}, {}] is not finite".format(first_edge, last_edge))
elif a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
first_edge, last_edge = 0, 1
else:
first_edge, last_edge = a.min(), a.max()
if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
raise ValueError(
"autodetected range of [{}, {}] is not finite".format(first_edge, last_edge))
# expand empty range to avoid divide by zero
if first_edge == last_edge:
first_edge = first_edge - 0.5
last_edge = last_edge + 0.5
return first_edge, last_edge
def _unsigned_subtract(a, b):
"""
Subtract two values where a >= b, and produce an unsigned result
This is needed when finding the difference between the upper and lower
bound of an int16 histogram
"""
# coerce to a single type
signed_to_unsigned = {
np.byte: np.ubyte,
np.short: np.ushort,
np.intc: np.uintc,
np.int_: np.uint,
np.longlong: np.ulonglong
}
dt = np.result_type(a, b)
try:
dt = signed_to_unsigned[dt.type]
except KeyError:
return np.subtract(a, b, dtype=dt)
else:
# we know the inputs are integers, and we are deliberately casting
# signed to unsigned
return np.subtract(a, b, casting='unsafe', dtype=dt)
def _get_bin_edges(a, bins, range, weights):
"""
Computes the bins used internally by `histogram`.
Parameters
==========
a : ndarray
Ravelled data array
bins, range
Forwarded arguments from `histogram`.
weights : ndarray, optional
Ravelled weights array, or None
Returns
=======
bin_edges : ndarray
Array of bin edges
uniform_bins : (Number, Number, int):
The upper bound, lowerbound, and number of bins, used in the optimized
implementation of `histogram` that works on uniform bins.
"""
# parse the overloaded bins argument
n_equal_bins = None
bin_edges = None
if isinstance(bins, basestring):
bin_name = bins
# if `bins` is a string for an automatic method,
# this will replace it with the number of bins calculated
if bin_name not in _hist_bin_selectors:
raise ValueError(
"{!r} is not a valid estimator for `bins`".format(bin_name))
if weights is not None:
raise TypeError("Automated estimation of the number of "
"bins is not supported for weighted data")
first_edge, last_edge = _get_outer_edges(a, range)
# truncate the range if needed
if range is not None:
keep = (a >= first_edge)
keep &= (a <= last_edge)
if not np.logical_and.reduce(keep):
a = a[keep]
if a.size == 0:
n_equal_bins = 1
else:
# Do not call selectors on empty arrays
width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge))
if width:
n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width))
else:
# Width can be zero for some estimators, e.g. FD when
# the IQR of the data is zero.
n_equal_bins = 1
elif np.ndim(bins) == 0:
try:
n_equal_bins = operator.index(bins)
except TypeError:
raise TypeError(
'`bins` must be an integer, a string, or an array')
if n_equal_bins < 1:
raise ValueError('`bins` must be positive, when an integer')
first_edge, last_edge = _get_outer_edges(a, range)
elif np.ndim(bins) == 1:
bin_edges = np.asarray(bins)
if np.any(bin_edges[:-1] > bin_edges[1:]):
raise ValueError(
'`bins` must increase monotonically, when an array')
else:
raise ValueError('`bins` must be 1d, when an array')
if n_equal_bins is not None:
# gh-10322 means that type resolution rules are dependent on array
# shapes. To avoid this causing problems, we pick a type now and stick
# with it throughout.
bin_type = np.result_type(first_edge, last_edge, a)
if np.issubdtype(bin_type, np.integer):
bin_type = np.result_type(bin_type, float)
# bin edges must be computed
bin_edges = np.linspace(
first_edge, last_edge, n_equal_bins + 1,
endpoint=True, dtype=bin_type)
return bin_edges, (first_edge, last_edge, n_equal_bins)
else:
return bin_edges, None
def _search_sorted_inclusive(a, v):
"""
Like `searchsorted`, but where the last item in `v` is placed on the right.
In the context of a histogram, this makes the last bin edge inclusive
"""
return np.concatenate((
a.searchsorted(v[:-1], 'left'),
a.searchsorted(v[-1:], 'right')
))
def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None):
return (a, bins, weights)
@array_function_dispatch(_histogram_bin_edges_dispatcher)
def histogram_bin_edges(a, bins=10, range=None, weights=None):
r"""
Function to calculate only the edges of the bins used by the `histogram`
function.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines the bin edges, including the rightmost
edge, allowing for non-uniform bin widths.
If `bins` is a string from the list below, `histogram_bin_edges` will use
the method chosen to calculate the optimal bin width and
consequently the number of bins (see `Notes` for more detail on
the estimators) from the data that falls within the requested
range. While the bin width will be optimal for the actual data
in the range, the number of bins will be computed to fill the
entire range, including the empty portions. For visualisation,
using the 'auto' option is suggested. Weighted data is not
supported for automated bin size selection.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good
all around performance.
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into
account data variability and data size.
'doane'
An improved version of Sturges' estimator that works better
with non-normal datasets.
'scott'
Less robust estimator that that takes into account data
variability and data size.
'stone'
Estimator based on leave-one-out cross-validation estimate of
the integrated squared error. Can be regarded as a generalization
of Scott's rule.
'rice'
Estimator does not take variability into account, only data
size. Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only
optimal for gaussian data and underestimates number of bins
for large non-gaussian datasets.
'sqrt'
Square root (of data size) estimator, used by Excel and
other programs for its speed and simplicity.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). This is currently not used by any of the bin estimators,
but may be in the future.
Returns
-------
bin_edges : array of dtype float
The edges to pass into `histogram`
See Also
--------
histogram
Notes
-----
The methods to estimate the optimal number of bins are well founded
in literature, and are inspired by the choices R provides for
histogram visualisation. Note that having the number of bins
proportional to :math:`n^{1/3}` is asymptotically optimal, which is
why it appears in most estimators. These are simply plug-in methods
that give good starting points for number of bins. In the equations
below, :math:`h` is the binwidth and :math:`n_h` is the number of
bins. All estimators that compute bin counts are recast to bin width
using the `ptp` of the data. The final bin count is obtained from
``np.round(np.ceil(range / h))``.
'auto' (maximum of the 'sturges' and 'fd' estimators)
A compromise to get a good value. For small datasets the Sturges
value will usually be chosen, while larger datasets will usually
default to FD. Avoids the overly conservative behaviour of FD
and Sturges for small and large datasets respectively.
Switchover point is usually :math:`a.size \approx 1000`.
'fd' (Freedman Diaconis Estimator)
.. math:: h = 2 \frac{IQR}{n^{1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good for large
datasets. The IQR is very robust to outliers.
'scott'
.. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}}
The binwidth is proportional to the standard deviation of the
data and inversely proportional to cube root of ``x.size``. Can
be too conservative for small datasets, but is quite good for
large datasets. The standard deviation is not very robust to
outliers. Values are very similar to the Freedman-Diaconis
estimator in the absence of outliers.
'rice'
.. math:: n_h = 2n^{1/3}
The number of bins is only proportional to cube root of
``a.size``. It tends to overestimate the number of bins and it
does not take into account data variability.
'sturges'
.. math:: n_h = \log _{2}n+1
The number of bins is the base 2 log of ``a.size``. This
estimator assumes normality of data and is too conservative for
larger, non-normal datasets. This is the default method in R's
``hist`` method.
'doane'
.. math:: n_h = 1 + \log_{2}(n) +
\log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}})
g_1 = mean[(\frac{x - \mu}{\sigma})^3]
\sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
An improved version of Sturges' formula that produces better
estimates for non-normal datasets. This estimator attempts to
account for the skew of the data.
'sqrt'
.. math:: n_h = \sqrt n
The simplest and fastest estimator. Only takes into account the
data size.
Examples
--------
>>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])
>>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1))
array([0. , 0.25, 0.5 , 0.75, 1. ])
>>> np.histogram_bin_edges(arr, bins=2)
array([0. , 2.5, 5. ])
For consistency with histogram, an array of pre-computed bins is
passed through unmodified:
>>> np.histogram_bin_edges(arr, [1, 2])
array([1, 2])
This function allows one set of bins to be computed, and reused across
multiple histograms:
>>> shared_bins = np.histogram_bin_edges(arr, bins='auto')
>>> shared_bins
array([0., 1., 2., 3., 4., 5.])
>>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1])
>>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins)
>>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins)
>>> hist_0; hist_1
array([1, 1, 0, 1, 0])
array([2, 0, 1, 1, 2])
Which gives more easily comparable results than using separate bins for
each histogram:
>>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto')
>>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto')
>>> hist_0; hist_1
array([1, 1, 1])
array([2, 1, 1, 2])
>>> bins_0; bins_1
array([0., 1., 2., 3.])
array([0. , 1.25, 2.5 , 3.75, 5. ])
"""
a, weights = _ravel_and_check_weights(a, weights)
bin_edges, _ = _get_bin_edges(a, bins, range, weights)
return bin_edges
def _histogram_dispatcher(
a, bins=None, range=None, normed=None, weights=None, density=None):
return (a, bins, weights)
@array_function_dispatch(_histogram_dispatcher)
def histogram(a, bins=10, range=None, normed=None, weights=None,
density=None):
r"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines a monotonically increasing array of bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string, it defines the method used to calculate the
optimal bin width, as defined by `histogram_bin_edges`.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
normed : bool, optional
.. deprecated:: 1.6.0
This is equivalent to the `density` argument, but produces incorrect
results for unequal bin widths. It should not be used.
.. versionchanged:: 1.15.0
DeprecationWarnings are actually emitted.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). If `density` is True, the weights are
normalized, so that the integral of the density over the range
remains 1.
density : bool, optional
If ``False``, the result will contain the number of samples in
each bin. If ``True``, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the ``normed`` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `density` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize, histogram_bin_edges
Notes
-----
All but the last (righthand-most) bin is half-open. In other words,
if `bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
*includes* 4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist * np.diff(bin_edges))
1.0
.. versionadded:: 1.11.0
Automated Bin Selection Methods example, using 2 peak random data
with 2000 points:
>>> import matplotlib.pyplot as plt
>>> rng = np.random.RandomState(10) # deterministic random data
>>> a = np.hstack((rng.normal(size=1000),
... rng.normal(loc=5, scale=2, size=1000)))
>>> _ = plt.hist(a, bins='auto') # arguments are passed to np.histogram
>>> plt.title("Histogram with 'auto' bins")
Text(0.5, 1.0, "Histogram with 'auto' bins")
>>> plt.show()
"""
a, weights = _ravel_and_check_weights(a, weights)
bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = np.dtype(np.intp)
else:
ntype = weights.dtype
# We set a block size, as this allows us to iterate over chunks when
# computing histograms, to minimize memory usage.
BLOCK = 65536
# The fast path uses bincount, but that only works for certain types
# of weight
simple_weights = (
weights is None or
np.can_cast(weights.dtype, np.double) or
np.can_cast(weights.dtype, complex)
)
if uniform_bins is not None and simple_weights:
# Fast algorithm for equal bins
# We now convert values of a to bin indices, under the assumption of
# equal bin widths (which is valid here).
first_edge, last_edge, n_equal_bins = uniform_bins
# Initialize empty histogram
n = np.zeros(n_equal_bins, ntype)
# Pre-compute histogram scaling factor
norm = n_equal_bins / _unsigned_subtract(last_edge, first_edge)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
for i in _range(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
if weights is None:
tmp_w = None
else:
tmp_w = weights[i:i + BLOCK]
# Only include values in the right range
keep = (tmp_a >= first_edge)
keep &= (tmp_a <= last_edge)
if not np.logical_and.reduce(keep):
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
# This cast ensures no type promotions occur below, which gh-10322
# make unpredictable. Getting it wrong leads to precision errors
# like gh-8123.
tmp_a = tmp_a.astype(bin_edges.dtype, copy=False)
# Compute the bin indices, and for values that lie exactly on
# last_edge we need to subtract one
f_indices = _unsigned_subtract(tmp_a, first_edge) * norm
indices = f_indices.astype(np.intp)
indices[indices == n_equal_bins] -= 1
# The index computation is not guaranteed to give exactly
# consistent results within ~1 ULP of the bin edges.
decrement = tmp_a < bin_edges[indices]
indices[decrement] -= 1
# The last bin includes the right edge. The other bins do not.
increment = ((tmp_a >= bin_edges[indices + 1])
& (indices != n_equal_bins - 1))
indices[increment] += 1
# We now compute the histogram using bincount
if ntype.kind == 'c':
n.real += np.bincount(indices, weights=tmp_w.real,
minlength=n_equal_bins)
n.imag += np.bincount(indices, weights=tmp_w.imag,
minlength=n_equal_bins)
else:
n += np.bincount(indices, weights=tmp_w,
minlength=n_equal_bins).astype(ntype)
else:
# Compute via cumulative histogram
cum_n = np.zeros(bin_edges.shape, ntype)
if weights is None:
for i in _range(0, len(a), BLOCK):
sa = np.sort(a[i:i+BLOCK])
cum_n += _search_sorted_inclusive(sa, bin_edges)
else:
zero = np.zeros(1, dtype=ntype)
for i in _range(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
tmp_w = weights[i:i+BLOCK]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate((zero, sw.cumsum()))
bin_index = _search_sorted_inclusive(sa, bin_edges)
cum_n += cw[bin_index]
n = np.diff(cum_n)
# density overrides the normed keyword
if density is not None:
if normed is not None:
# 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6)
warnings.warn(
"The normed argument is ignored when density is provided. "
"In future passing both will result in an error.",
DeprecationWarning, stacklevel=3)
normed = None
if density:
db = np.array(np.diff(bin_edges), float)
return n/db/n.sum(), bin_edges
elif normed:
# 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6)
warnings.warn(
"Passing `normed=True` on non-uniform bins has always been "
"broken, and computes neither the probability density "
"function nor the probability mass function. "
"The result is only correct if the bins are uniform, when "
"density=True will produce the same result anyway. "
"The argument will be removed in a future version of "
"numpy.",
np.VisibleDeprecationWarning, stacklevel=3)
# this normalization is incorrect, but
db = np.array(np.diff(bin_edges), float)
return n/(n*db).sum(), bin_edges
else:
if normed is not None:
# 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6)
warnings.warn(
"Passing normed=False is deprecated, and has no effect. "
"Consider passing the density argument instead.",
DeprecationWarning, stacklevel=3)
return n, bin_edges
def _histogramdd_dispatcher(sample, bins=None, range=None, normed=None,
weights=None, density=None):
if hasattr(sample, 'shape'): # same condition as used in histogramdd
yield sample
else:
yield from sample
with contextlib.suppress(TypeError):
yield from bins
yield weights
@array_function_dispatch(_histogramdd_dispatcher)
def histogramdd(sample, bins=10, range=None, normed=None, weights=None,
density=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : (N, D) array, or (D, N) array_like
The data to be histogrammed.
Note the unusual interpretation of sample when an array_like:
* When an array, each row is a coordinate in a D-dimensional space -
such as ``histogramgramdd(np.array([p1, p2, p3]))``.
* When an array_like, each element is the list of values for single
coordinate - such as ``histogramgramdd((X, Y, Z))``.
The first form should be preferred.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the monotonically increasing bin
edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of length D, each an optional (lower, upper) tuple giving
the outer bin edges to be used if the edges are not given explicitly in
`bins`.
An entry of None in the sequence results in the minimum and maximum
values being used for the corresponding dimension.
The default, None, is equivalent to passing a tuple of D None values.
density : bool, optional
If False, the default, returns the number of samples in each bin.
If True, returns the probability *density* function at the bin,
``bin_count / sample_count / bin_volume``.
normed : bool, optional
An alias for the density argument that behaves identically. To avoid
confusion with the broken normed argument to `histogram`, `density`
should be preferred.
weights : (N,) array_like, optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
N, D = sample.shape
nbin = np.empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = np.asarray(weights)
try:
M = len(bins)
if M != D:
raise ValueError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# normalize the range argument
if range is None:
range = (None,) * D
elif len(range) != D:
raise ValueError('range argument must have one entry per dimension')
# Create edge arrays
for i in _range(D):
if np.ndim(bins[i]) == 0:
if bins[i] < 1:
raise ValueError(
'`bins[{}]` must be positive, when an integer'.format(i))
smin, smax = _get_outer_edges(sample[:,i], range[i])
edges[i] = np.linspace(smin, smax, bins[i] + 1)
elif np.ndim(bins[i]) == 1:
edges[i] = np.asarray(bins[i])
if np.any(edges[i][:-1] > edges[i][1:]):
raise ValueError(
'`bins[{}]` must be monotonically increasing, when an array'
.format(i))
else:
raise ValueError(
'`bins[{}]` must be a scalar or 1d array'.format(i))
nbin[i] = len(edges[i]) + 1 # includes an outlier on each end
dedges[i] = np.diff(edges[i])
# Compute the bin number each sample falls into.
Ncount = tuple(
# avoid np.digitize to work around gh-11022
np.searchsorted(edges[i], sample[:, i], side='right')
for i in _range(D)
)
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in _range(D):
# Find which points are on the rightmost edge.
on_edge = (sample[:, i] == edges[i][-1])
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened histogram matrix.
# This raises an error if the array is too large.
xy = np.ravel_multi_index(Ncount, nbin)
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
hist = np.bincount(xy, weights, minlength=nbin.prod())
# Shape into a proper matrix
hist = hist.reshape(nbin)
# This preserves the (bad) behavior observed in gh-7845, for now.
hist = hist.astype(float, casting='safe')
# Remove outliers (indices 0 and -1 for each dimension).
core = D*(slice(1, -1),)
hist = hist[core]
# handle the aliasing normed argument
if normed is None:
if density is None:
density = False
elif density is None:
# an explicit normed argument was passed, alias it to the new name
density = normed
else:
raise TypeError("Cannot specify both 'normed' and 'density'")
if density:
# calculate the probability density function
s = hist.sum()
for i in _range(D):
shape = np.ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
| bsd-3-clause |
alvarofierroclavero/scikit-learn | sklearn/datasets/tests/test_rcv1.py | 322 | 2414 | """Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
| bsd-3-clause |
yuanagain/seniorthesis | venv/lib/python2.7/site-packages/mpl_toolkits/axes_grid1/axes_rgb.py | 4 | 7031 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import numpy as np
from .axes_divider import make_axes_locatable, Size, locatable_axes_factory
import sys
from .mpl_axes import Axes
def make_rgb_axes(ax, pad=0.01, axes_class=None, add_all=True):
"""
pad : fraction of the axes height.
"""
divider = make_axes_locatable(ax)
pad_size = Size.Fraction(pad, Size.AxesY(ax))
xsize = Size.Fraction((1.-2.*pad)/3., Size.AxesX(ax))
ysize = Size.Fraction((1.-2.*pad)/3., Size.AxesY(ax))
divider.set_horizontal([Size.AxesX(ax), pad_size, xsize])
divider.set_vertical([ysize, pad_size, ysize, pad_size, ysize])
ax.set_axes_locator(divider.new_locator(0, 0, ny1=-1))
ax_rgb = []
if axes_class is None:
try:
axes_class = locatable_axes_factory(ax._axes_class)
except AttributeError:
axes_class = locatable_axes_factory(type(ax))
for ny in [4, 2, 0]:
ax1 = axes_class(ax.get_figure(),
ax.get_position(original=True),
sharex=ax, sharey=ax)
locator = divider.new_locator(nx=2, ny=ny)
ax1.set_axes_locator(locator)
for t in ax1.yaxis.get_ticklabels() + ax1.xaxis.get_ticklabels():
t.set_visible(False)
try:
for axis in ax1.axis.values():
axis.major_ticklabels.set_visible(False)
except AttributeError:
pass
ax_rgb.append(ax1)
if add_all:
fig = ax.get_figure()
for ax1 in ax_rgb:
fig.add_axes(ax1)
return ax_rgb
def imshow_rgb(ax, r, g, b, **kwargs):
ny, nx = r.shape
R = np.zeros([ny, nx, 3], dtype="d")
R[:,:,0] = r
G = np.zeros_like(R)
G[:,:,1] = g
B = np.zeros_like(R)
B[:,:,2] = b
RGB = R + G + B
im_rgb = ax.imshow(RGB, **kwargs)
return im_rgb
class RGBAxesBase(object):
"""base class for a 4-panel imshow (RGB, R, G, B)
Layout:
+---------------+-----+
| | R |
+ +-----+
| RGB | G |
+ +-----+
| | B |
+---------------+-----+
Attributes
----------
_defaultAxesClass : matplotlib.axes.Axes
defaults to 'Axes' in RGBAxes child class.
No default in abstract base class
RGB : _defaultAxesClass
The axes object for the three-channel imshow
R : _defaultAxesClass
The axes object for the red channel imshow
G : _defaultAxesClass
The axes object for the green channel imshow
B : _defaultAxesClass
The axes object for the blue channel imshow
"""
def __init__(self, *kl, **kwargs):
"""
Parameters
----------
pad : float
fraction of the axes height to put as padding.
defaults to 0.0
add_all : bool
True: Add the {rgb, r, g, b} axes to the figure
defaults to True.
axes_class : matplotlib.axes.Axes
kl :
Unpacked into axes_class() init for RGB
kwargs :
Unpacked into axes_class() init for RGB, R, G, B axes
"""
pad = kwargs.pop("pad", 0.0)
add_all = kwargs.pop("add_all", True)
try:
axes_class = kwargs.pop("axes_class", self._defaultAxesClass)
except AttributeError:
new_msg = ("A subclass of RGBAxesBase must have a "
"_defaultAxesClass attribute. If you are not sure which "
"axes class to use, consider using "
"mpl_toolkits.axes_grid1.mpl_axes.Axes.")
six.reraise(AttributeError, AttributeError(new_msg),
sys.exc_info()[2])
ax = axes_class(*kl, **kwargs)
divider = make_axes_locatable(ax)
pad_size = Size.Fraction(pad, Size.AxesY(ax))
xsize = Size.Fraction((1.-2.*pad)/3., Size.AxesX(ax))
ysize = Size.Fraction((1.-2.*pad)/3., Size.AxesY(ax))
divider.set_horizontal([Size.AxesX(ax), pad_size, xsize])
divider.set_vertical([ysize, pad_size, ysize, pad_size, ysize])
ax.set_axes_locator(divider.new_locator(0, 0, ny1=-1))
ax_rgb = []
for ny in [4, 2, 0]:
ax1 = axes_class(ax.get_figure(),
ax.get_position(original=True),
sharex=ax, sharey=ax, **kwargs)
locator = divider.new_locator(nx=2, ny=ny)
ax1.set_axes_locator(locator)
ax1.axis[:].toggle(ticklabels=False)
ax_rgb.append(ax1)
self.RGB = ax
self.R, self.G, self.B = ax_rgb
if add_all:
fig = ax.get_figure()
fig.add_axes(ax)
self.add_RGB_to_figure()
self._config_axes()
def _config_axes(self, line_color='w', marker_edge_color='w'):
"""Set the line color and ticks for the axes
Parameters
----------
line_color : any matplotlib color
marker_edge_color : any matplotlib color
"""
for ax1 in [self.RGB, self.R, self.G, self.B]:
ax1.axis[:].line.set_color(line_color)
ax1.axis[:].major_ticks.set_markeredgecolor(marker_edge_color)
def add_RGB_to_figure(self):
"""Add the red, green and blue axes to the RGB composite's axes figure
"""
self.RGB.get_figure().add_axes(self.R)
self.RGB.get_figure().add_axes(self.G)
self.RGB.get_figure().add_axes(self.B)
def imshow_rgb(self, r, g, b, **kwargs):
"""Create the four images {rgb, r, g, b}
Parameters
----------
r : array-like
The red array
g : array-like
The green array
b : array-like
The blue array
kwargs : imshow kwargs
kwargs get unpacked into the imshow calls for the four images
Returns
-------
rgb : matplotlib.image.AxesImage
r : matplotlib.image.AxesImage
g : matplotlib.image.AxesImage
b : matplotlib.image.AxesImage
"""
ny, nx = r.shape
if not ((nx, ny) == g.shape == b.shape):
raise ValueError('Input shapes do not match.'
'\nr.shape = {0}'
'\ng.shape = {1}'
'\nb.shape = {2}'
''.format(r.shape, g.shape, b.shape))
R = np.zeros([ny, nx, 3], dtype="d")
R[:,:,0] = r
G = np.zeros_like(R)
G[:,:,1] = g
B = np.zeros_like(R)
B[:,:,2] = b
RGB = R + G + B
im_rgb = self.RGB.imshow(RGB, **kwargs)
im_r = self.R.imshow(R, **kwargs)
im_g = self.G.imshow(G, **kwargs)
im_b = self.B.imshow(B, **kwargs)
return im_rgb, im_r, im_g, im_b
class RGBAxes(RGBAxesBase):
_defaultAxesClass = Axes
| mit |
hbp-unibi/cypress | scripts/decode_function.py | 2 | 2112 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Cypress -- C++ Spiking Neural Network Simulation Framework
# Copyright (C) 2016 Andreas Stöckel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from numpy.linalg import inv
import math
import matplotlib
import matplotlib.pyplot as plt
import sys
if len(sys.argv) < 2:
print "Usage: " + sys.argv[0] + " <TUNING CURVE INPUT>"
sys.exit(1)
def cm2inch(value):
return value / 2.54
input_file = sys.argv[1]
data = np.mat(np.loadtxt(input_file, delimiter=","))
# Fetch the design matrix Phi and the number of samples/functions
Phi = data[:, 1:]
n_samples = Phi.shape[0]
n_func = Phi.shape[1]
# Construct input and desired output vectors
X = data[:, 0]
Y = (np.sin(X * 2.0 * math.pi) + 1.0) * 0.5 # Target function
# Calculate the Moore-Penrose pseudo inverse of Phi
lambda_ = 0.2
PhiI = inv(Phi.T * Phi + lambda_ * np.eye(n_func)) * Phi.T
# Calculate the weights
w = PhiI * Y
print("Maximum w: " + str(np.max(w)))
print("Minimum w: " + str(np.min(w)))
# Reconstruct the function
YRec = Phi * w
print(w)
fig = plt.figure(figsize=(cm2inch(7), cm2inch(7 )))
ax = fig.gca()
n_pop = data.shape[1] - 1
ax.plot(X, Y, ':', color=[0.25, 0.25, 0.25])
ax.plot(X, YRec, '-', color=[0.0, 0.0, 0.0])
ax.set_xlabel("Input value")
ax.set_ylabel("Function value")
ax.set_title("Decoding of $\\frac{1}2 \\cdot (\\sin(x * 2\\pi) + 1)$")
ax.set_xlim(0.0, 1.0)
fig.savefig("reconstructed_function.pdf", format='pdf',
bbox_inches='tight')
| gpl-3.0 |
wathen/PhD | MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/GeneralisedEigen/NoBC/MHDallatonce.py | 4 | 9242 | import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
import numpy as np
from dolfin import tic, toc
import HiptmairSetup
import PETScIO as IO
import scipy.sparse as sp
import matplotlib.pylab as plt
import MatrixOperations as MO
class BaseMyPC(object):
def setup(self, pc):
pass
def reset(self, pc):
pass
def apply(self, pc, x, y):
raise NotImplementedError
def applyT(self, pc, x, y):
self.apply(pc, x, y)
def applyS(self, pc, x, y):
self.apply(pc, x, y)
def applySL(self, pc, x, y):
self.applyS(pc, x, y)
def applySR(self, pc, x, y):
self.applyS(pc, x, y)
def applyRich(self, pc, x, y, w, tols):
self.apply(pc, x, y)
class InnerOuterWITHOUT2inverse(BaseMyPC):
def __init__(self, W, kspF, kspA, kspQ,Fp,kspScalar, kspCGScalar, kspVector, G, P, A, Hiptmairtol,F):
self.W = W
self.kspF = kspF
self.kspA = kspA
self.kspQ = kspQ
self.Fp = Fp
self.kspScalar = kspScalar
self.kspCGScalar = kspCGScalar
self.kspVector = kspVector
# self.Bt = Bt
self.HiptmairIts = 0
self.CGits = 0
self.F = F
# print range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim())
# ss
self.P = P
self.G = G
self.AA = A
self.tol = Hiptmairtol
self.u_is = PETSc.IS().createGeneral(range(self.W[0].dim()))
self.b_is = PETSc.IS().createGeneral(range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim()))
self.p_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()))
self.r_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim()+self.W[2].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()+self.W[3].dim()))
def create(self, pc):
print "Create"
def setUp(self, pc):
A, P = pc.getOperators()
print A.size
self.Ct = A.getSubMatrix(self.u_is,self.b_is)
self.C = A.getSubMatrix(self.b_is,self.u_is)
self.D = A.getSubMatrix(self.r_is,self.b_is)
self.Bt = A.getSubMatrix(self.u_is,self.p_is)
self.B = A.getSubMatrix(self.p_is,self.u_is)
self.Dt = A.getSubMatrix(self.b_is,self.r_is)
# print self.Ct.view()
#CFC = sp.csr_matrix( (data,(row,column)), shape=(self.W[1].dim(),self.W[1].dim()) )
#print CFC.shape
#CFC = PETSc.Mat().createAIJ(size=CFC.shape,csr=(CFC.indptr, CFC.indices, CFC.data))
#print CFC.size, self.AA.size
#MX = self.AA+self.F
MX = self.F # MO.StoreMatrix(B,"A")
# print FC.todense()
self.kspF.setType('preonly')
self.kspF.getPC().setType('lu')
self.kspF.setFromOptions()
self.kspF.setPCSide(0)
self.kspA.setType('preonly')
self.kspA.getPC().setType('lu')
self.kspA.setFromOptions()
self.kspA.setPCSide(0)
self.kspQ.setType('preonly')
self.kspQ.getPC().setType('lu')
self.kspQ.setFromOptions()
self.kspQ.setPCSide(0)
self.kspScalar.setType('preonly')
self.kspScalar.getPC().setType('lu')
self.kspScalar.setFromOptions()
self.kspScalar.setPCSide(0)
kspMX = PETSc.KSP()
kspMX.create(comm=PETSc.COMM_WORLD)
pcMX = kspMX.getPC()
kspMX.setType('preonly')
pcMX.setType('lu')
kspMX.setOperators(MX,MX)
OptDB = PETSc.Options()
#OptDB["pc_factor_mat_ordering_type"] = "rcm"
#OptDB["pc_factor_mat_solver_package"] = "mumps"
kspMX.setFromOptions()
self.kspMX = kspMX
# self.kspCGScalar.setType('preonly')
# self.kspCGScalar.getPC().setType('lu')
# self.kspCGScalar.setFromOptions()
# self.kspCGScalar.setPCSide(0)
self.kspVector.setType('preonly')
self.kspVector.getPC().setType('lu')
self.kspVector.setFromOptions()
self.kspVector.setPCSide(0)
print "setup"
def apply(self, pc, x, y):
br = x.getSubVector(self.r_is)
xr = br.duplicate()
self.kspScalar.solve(br, xr)
# print self.D.size
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
y3 = x2.duplicate()
xp = x2.duplicate()
self.kspA.solve(x2,y2)
self.Fp.mult(y2,y3)
self.kspQ.solve(y3,xp)
# self.kspF.solve(bu1-bu4-bu2,xu)
bb = x.getSubVector(self.b_is)
bb = bb - self.Dt*xr
xb = bb.duplicate()
self.kspMX.solve(bb,xb)
bu1 = x.getSubVector(self.u_is)
bu2 = self.Bt*xp
bu4 = self.Ct*xb
XX = bu1.duplicate()
xu = XX.duplicate()
self.kspF.solve(bu1-bu4-bu2,xu)
#self.kspF.solve(bu1,xu)
y.array = (np.concatenate([xu.array, xb.array,xp.array,xr.array]))
def ITS(self):
return self.CGits, self.HiptmairIts , self.CGtime, self.HiptmairTime
class InnerOuterMAGNETICinverse(BaseMyPC):
def __init__(self, W, kspF, kspA, kspQ,Fp,kspScalar, kspCGScalar, kspVector, G, P, A, Hiptmairtol,F):
self.W = W
self.kspF = kspF
self.kspA = kspA
self.kspQ = kspQ
self.Fp = Fp
self.kspScalar = kspScalar
self.kspCGScalar = kspCGScalar
self.kspVector = kspVector
# self.Bt = Bt
self.HiptmairIts = 0
self.CGits = 0
self.F = F
# print range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim())
# ss
self.P = P
self.G = G
self.AA = A
self.tol = Hiptmairtol
self.u_is = PETSc.IS().createGeneral(range(self.W[0].dim()))
self.b_is = PETSc.IS().createGeneral(range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim()))
self.p_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()))
self.r_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim()+self.W[2].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()+self.W[3].dim()))
def create(self, pc):
print "Create"
def setUp(self, pc):
A, P = pc.getOperators()
print A.size
self.Ct = A.getSubMatrix(self.u_is,self.b_is)
self.C = A.getSubMatrix(self.b_is,self.u_is)
self.D = A.getSubMatrix(self.r_is,self.b_is)
self.Bt = A.getSubMatrix(self.u_is,self.p_is)
self.B = A.getSubMatrix(self.p_is,self.u_is)
self.Dt = A.getSubMatrix(self.b_is,self.r_is)
# print self.Ct.view()
#CFC = sp.csr_matrix( (data,(row,column)), shape=(self.W[1].dim(),self.W[1].dim()) )
#print CFC.shape
#CFC = PETSc.Mat().createAIJ(size=CFC.shape,csr=(CFC.indptr, CFC.indices, CFC.data))
#print CFC.size, self.AA.size
FF = self.F
# MO.StoreMatrix(B,"A")
# print FC.todense()
self.kspF.setOperators(FF,FF)
self.kspF.setType('preonly')
self.kspF.getPC().setType('lu')
self.kspF.setFromOptions()
self.kspF.setPCSide(0)
self.kspA.setType('preonly')
self.kspA.getPC().setType('lu')
self.kspA.setFromOptions()
self.kspA.setPCSide(0)
self.kspQ.setType('preonly')
self.kspQ.getPC().setType('lu')
self.kspQ.setFromOptions()
self.kspQ.setPCSide(0)
self.kspScalar.setType('preonly')
self.kspScalar.getPC().setType('lu')
self.kspScalar.setFromOptions()
self.kspScalar.setPCSide(0)
kspMX = PETSc.KSP()
kspMX.create(comm=PETSc.COMM_WORLD)
pcMX = kspMX.getPC()
kspMX.setType('preonly')
pcMX.setType('lu')
OptDB = PETSc.Options()
kspMX.setOperators(self.AA,self.AA)
self.kspMX = kspMX
# self.kspCGScalar.setType('preonly')
# self.kspCGScalar.getPC().setType('lu')
# self.kspCGScalar.setFromOptions()
# self.kspCGScalar.setPCSide(0)
self.kspVector.setType('preonly')
self.kspVector.getPC().setType('lu')
self.kspVector.setFromOptions()
self.kspVector.setPCSide(0)
print "setup"
def apply(self, pc, x, y):
br = x.getSubVector(self.r_is)
xr = br.duplicate()
self.kspScalar.solve(br, xr)
# print self.D.size
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
y3 = x2.duplicate()
xp = x2.duplicate()
self.kspA.solve(x2,y2)
self.Fp.mult(y2,y3)
self.kspQ.solve(y3,xp)
# self.kspF.solve(bu1-bu4-bu2,xu)
bb = x.getSubVector(self.b_is)
bb = bb - self.Dt*xr
xb = bb.duplicate()
self.kspMX.solve(bb,xb)
bu1 = x.getSubVector(self.u_is)
bu2 = self.Bt*xp
bu4 = self.Ct*xb
XX = bu1.duplicate()
xu = XX.duplicate()
self.kspF.solve(bu1-bu4-bu2,xu)
#self.kspF.solve(bu1,xu)
y.array = (np.concatenate([xu.array, xb.array,xp.array,xr.array]))
def ITS(self):
return self.CGits, self.HiptmairIts , self.CGtime, self.HiptmairTime
| mit |
pschella/scipy | doc/source/tutorial/examples/normdiscr_plot1.py | 84 | 1547 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
npoints = 20 # number of integer support points of the distribution minus 1
npointsh = npoints / 2
npointsf = float(npoints)
nbound = 4 #bounds for the truncated normal
normbound = (1 + 1 / npointsf) * nbound #actual bounds of truncated normal
grid = np.arange(-npointsh, npointsh+2, 1) #integer grid
gridlimitsnorm = (grid-0.5) / npointsh * nbound #bin limits for the truncnorm
gridlimits = grid - 0.5
grid = grid[:-1]
probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))
gridint = grid
normdiscrete = stats.rv_discrete(
values=(gridint, np.round(probs, decimals=7)),
name='normdiscrete')
n_sample = 500
np.random.seed(87655678) #fix the seed for replicability
rvs = normdiscrete.rvs(size=n_sample)
rvsnd=rvs
f,l = np.histogram(rvs, bins=gridlimits)
sfreq = np.vstack([gridint, f, probs*n_sample]).T
fs = sfreq[:,1] / float(n_sample)
ft = sfreq[:,2] / float(n_sample)
nd_std = np.sqrt(normdiscrete.stats(moments='v'))
ind = gridint # the x locations for the groups
width = 0.35 # the width of the bars
plt.subplot(111)
rects1 = plt.bar(ind, ft, width, color='b')
rects2 = plt.bar(ind+width, fs, width, color='r')
normline = plt.plot(ind+width/2.0, stats.norm.pdf(ind, scale=nd_std),
color='b')
plt.ylabel('Frequency')
plt.title('Frequency and Probability of normdiscrete')
plt.xticks(ind+width, ind)
plt.legend((rects1[0], rects2[0]), ('true', 'sample'))
plt.show()
| bsd-3-clause |
weissercn/MLTools | Dalitz_simplified/classifier_eval_simplified_example.py | 1 | 4731 | print(__doc__)
import p_value_scoring_object
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(probability=True), scoring=p_value_scoring_object.p_value_scoring_object ,param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
plt.savefig('prediction_comparison.png')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=-1.0, midpoint=-0.0001))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.savefig('Heat_map.png')
| mit |
s3h10r/avaon | django-avaon/acore/views/visualize.py | 1 | 6406 | """
statistics - heatmaps of impacts
"""
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import loader,Context,RequestContext
from django.conf import settings
import logging
logger = logging.getLogger(__name__)
# --- if no x-server is available, comment this out
#import matplotlib
#matplotlib.use("Cairo") # cairo-backend, so we can create figures without x-server (PDF & PNG ...)
# Cairo backend requires that pycairo is installed.
# --- end no x-server
def _calc_stats_heatmap(id=None):
"""
create a datstructure which gives us info about the density of impacts
per weekday & hour
returns
data[weekday][hour] = nr. of impacts
"""
"""
TODO: write a test for me
"""
import datetime as dt
from acore.models import Impact, Thing
if id:
imps = Impact.objects.filter(thing = id)
else:
imps = Impact.objects.all()
# --- init empty data[weekday][hour]
# data = [[0] *24]*7 # data[weekday][hour] # BUG! try: data[0][1] && print data (reference?)
data = []
for i in range(7):
data.append([0]*24)
# ---
delta = dt.timedelta(seconds=60*60)
for im in imps:
#print im.t_from, "+", im.duration, "==", im.duration.seconds, 's'
#print " "*4, im.t_from.weekday()
travel_t = im.t_from
dur = 0
while True:
wd = travel_t.weekday()
h = travel_t.hour
data[wd][h] += 1
dur += delta.seconds
travel_t += delta
if ((im.t_to - travel_t) < delta) and (im.t_to.hour != travel_t.hour):
break
return data
def _plot_heatmap(fig,ax,data,y_label="y_label",x_label="x_label",title="title"):
"""
"""
import numpy as np
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
ax.set_title(title,fontstyle='italic')
ax.set_ylabel(y_label)
ax.set_xlabel(x_label)
ticks_x=24
ticks_y=7
# we must turn our data into a numpy-array for using pcolor
data = np.array(data)
ax.pcolor(data, cmap=plt.cm.Blues)
pc = ax.pcolor(data, cmap=plt.cm.Blues)
fig.colorbar(pc, shrink=0.5)
# Shift ticks to be at 0.5, 1.5, etc
# http://stackoverflow.com/questions/24190858/matplotlib-move-ticklabels-between-ticks
ax.yaxis.set(ticks=np.arange(0.5,ticks_y + 0.5),ticklabels=range(0,ticks_y))
ax.xaxis.set(ticks=np.arange(0.5,ticks_x + 0.5),ticklabels=range(0,ticks_x))
ax.set_xlim(0.0,ticks_x)
ax.set_ylim(0.0,ticks_y)
ax.xaxis.labelpad = 10
fig.gca().set_aspect('equal')
fig.tight_layout()# not as good as savefig(bbox_inches="tight")?
def heatmap(request,id=None):
"""
plot a "impact density heatmap" in format data[weekday][hour]
for all available impacts or for Thing.id = id
"""
"""
TODO: time-range param
"""
import numpy as np
import random
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
data = _calc_stats_heatmap(id)
fig=Figure(figsize=(10,8), dpi=100)
ax=fig.add_subplot(111)
if not id:
_plot_heatmap(fig,ax,data,y_label="weekday", x_label="time", title="impact density #heatmap\n")
else:
from acore.models import Impact, Thing
thing = Thing.objects.get(id=id)
_plot_heatmap(fig,ax,data,y_label="weekday", x_label="time", title="'%s' - impact density #heatmap\n" % thing.name)
# cool, Django's HttpResponse object supports file-like API :)
# so we dunnot need StringIO
response = HttpResponse(content_type="image/png")
# bbox_inches 'tight' removes lot of unwanted whitespace around our plot
fig.savefig(response, format="png",bbox_inches='tight')
return response
def demo_heatmap(request):
"""
returns matplotlib plot as selftest
"""
"""
http://stackoverflow.com/questions/14391959/heatmap-in-matplotlib-with-pcolor
http://stackoverflow.com/questions/15988413/python-pylab-pcolor-options-for-publication-quality-plots
http://stackoverflow.com/questions/24190858/matplotlib-move-ticklabels-between-ticks
http://matplotlib.org/faq/usage_faq.html#matplotlib-pylab-and-pyplot-how-are-they-related
"""
import django
import numpy as np
import random
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
fig=Figure(figsize=(10,8), dpi=100)
ax=fig.add_subplot(111)
ax.set_title("demo heatmap e.g. 'impact density'",fontstyle='italic')
ax.set_ylabel("weekday")
ax.set_xlabel("time in h")
ticks_x=24
ticks_y=7
data = []
for i in range(ticks_y):
y = [ random.randint(0,10) for i in range(ticks_x) ]
data.append(y)
data = np.array(data)
pc = ax.pcolor(data, cmap=plt.cm.Blues) # http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps
ax.yaxis.set_ticks(np.arange(0.5,ticks_y + 0.5),range(0,ticks_y))
ax.xaxis.set_ticks(np.arange(0.5,ticks_x + 0.5),range(0,ticks_x))
# http://stackoverflow.com/questions/12608788/changing-the-tick-frequency-on-x-or-y-axis-in-matplotlib
import matplotlib.ticker as ticker
#ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))
loc = ticker.MultipleLocator(base=1) # this locator puts ticks at regular intervals
ax.xaxis.set_major_locator(loc)
ax.xaxis.set_minor_locator(ticker.NullLocator())
ax.set_xlim(0.0,ticks_x)
ax.set_ylim(0.0,ticks_y)
#ax.xaxis.labelpad = 20
fig.colorbar(pc, shrink=0.5)
#plt.gca().invert_yaxis()
fig.gca().set_aspect('equal')
# tight_layout() & bbox_inches 'tight' removes lot of unwanted
# whitespace around our plot
fig.tight_layout()# not as good as savefig(bbox_inches="tight")?
# cool, Django's HttpResponse object supports file-like API :)
# so we dunnot need StringIO
response=django.http.HttpResponse(content_type='image/png')
fig.savefig(response, format="png",bbox_inches='tight')
return response
if __name__ == '__main__':
print _calc_stats_heatmap()
| gpl-2.0 |
ZenDevelopmentSystems/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
zorroblue/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 21 | 26665 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from numpy.testing import run_module_suite
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (
chi2, f_classif, f_oneway, f_regression, mutual_info_classif,
mutual_info_regression, SelectPercentile, SelectKBest, SelectFpr,
SelectFdr, SelectFwe, GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# with centering, compare with sparse
F, pv = f_regression(X, y, center=True)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=True)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_almost_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_boundary_case_ch2():
# Test boundary case, and always aim to select 1 feature.
X = np.array([[10, 20], [20, 20], [20, 30]])
y = np.array([[1], [0], [0]])
scores, pvalues = chi2(X, y)
assert_array_almost_equal(scores, np.array([4., 0.71428571]))
assert_array_almost_equal(pvalues, np.array([0.04550026, 0.39802472]))
filter_fdr = SelectFdr(chi2, alpha=0.1)
filter_fdr.fit(X, y)
support_fdr = filter_fdr.get_support()
assert_array_equal(support_fdr, np.array([True, False]))
filter_kbest = SelectKBest(chi2, k=1)
filter_kbest.fit(X, y)
support_kbest = filter_kbest.get_support()
assert_array_equal(support_kbest, np.array([True, False]))
filter_percentile = SelectPercentile(chi2, percentile=50)
filter_percentile.fit(X, y)
support_percentile = filter_percentile.get_support()
assert_array_equal(support_percentile, np.array([True, False]))
filter_fpr = SelectFpr(chi2, alpha=0.1)
filter_fpr.fit(X, y)
support_fpr = filter_fpr.get_support()
assert_array_equal(support_fpr, np.array([True, False]))
filter_fwe = SelectFwe(chi2, alpha=0.1)
filter_fwe.fit(X, y)
support_fwe = filter_fwe.get_support()
assert_array_equal(support_fwe, np.array([True, False]))
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(100)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_scorefunc_multilabel():
# Test whether k-best and percentiles works with multilabels with chi2.
X = np.array([[10000, 9999, 0], [100, 9999, 0], [1000, 99, 0]])
y = [[1, 1], [0, 1], [1, 0]]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (3, 2))
assert_not_in(0, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (3, 2))
assert_not_in(0, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
def test_mutual_info_classif():
X, y = make_classification(n_samples=100, n_features=5,
n_informative=1, n_redundant=1,
n_repeated=0, n_classes=2,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_classif, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_classif, percentile=40)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='percentile', param=40).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
def test_mutual_info_regression():
X, y = make_regression(n_samples=100, n_features=10, n_informative=2,
shuffle=False, random_state=0, noise=10)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_regression, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
mutual_info_regression, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_regression, percentile=20)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(mutual_info_regression, mode='percentile',
param=20).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
LukeWoodSMU/Mushroom-Classification | visualizations/comparative_plots.py | 1 | 1980 | import sys
import os
import numpy as np
import matplotlib.pyplot as plt
sys.path.append(os.path.abspath('..'))
from preprocessing import shroom_dealer
def data(attribute):
df = shroom_dealer.get_data_frame()
attribute_values = shroom_dealer.get_attribute_dictionary()[attribute]
poisonous_data = {}
edible_data = {}
for a in attribute_values.keys():
poisonous_data[a] = \
df[attribute][df['poisonous'] == 'p'][df[attribute] == a].count()
edible_data[a] = \
df[attribute][df['poisonous'] == 'e'][df[attribute] == a].count()
return {"poisonous": poisonous_data, "edible": edible_data}
def plot_comparative_data(attribute, plot=True, save=False):
edible_data = data(attribute)["edible"]
poisonous_data = data(attribute)["poisonous"]
labels = shroom_dealer.get_attribute_dictionary()[attribute]
index = np.arange(len(edible_data))
bar_width = 0.35
opacity=0.4
fig, ax = plt.subplots()
plt.bar(index, edible_data.values(), bar_width, align='center',
color='b', label='edible', alpha=opacity)
plt.bar(index + bar_width, poisonous_data.values(), bar_width,
align='center', color='r', label='poisonous', alpha=opacity)
plt.xlabel('Attributes')
plt.ylabel('Frequency')
plt.title('Frequency by attribute and edibility ({})'.format(attribute))
plt.xticks(index + bar_width / 2,
[labels[key] for key in edible_data.keys()])
plt.legend()
plt.tight_layout()
if plot:
plt.show()
if save:
plt.savefig('comparative_barcharts/{}.png'.format(attribute))
plt.close()
def plot_all():
attributes = shroom_dealer.get_attribute_dictionary()
for a in attributes.keys():
plot_comparative_data(a, plot=True)
def save_all():
attributes = shroom_dealer.get_attribute_dictionary()
for a in attributes.keys():
plot_comparative_data(a, plot=False, save=True)
| gpl-3.0 |
erramuzpe/NeuroVault | neurovault/api/serializers.py | 3 | 11559 | import os
import json
import pandas as pd
from django.contrib.auth.models import User
from django.forms.utils import ErrorDict, ErrorList
from django.utils.http import urlquote
from rest_framework import serializers
from rest_framework.relations import PrimaryKeyRelatedField, StringRelatedField
from neurovault.apps.statmaps.forms import (
handle_update_ttl_urls,
ImageValidationMixin,
NIDMResultsValidationMixin,
save_nidm_statmaps
)
from neurovault.apps.statmaps.models import (
Atlas,
BaseCollectionItem,
CognitiveAtlasTask,
CognitiveAtlasContrast,
Collection,
NIDMResults,
NIDMResultStatisticMap,
StatisticMap
)
from neurovault.utils import strip, logical_xor
from neurovault.apps.statmaps.utils import get_paper_properties
class HyperlinkedFileField(serializers.FileField):
def to_representation(self, value):
if value:
request = self.context.get('request', None)
return request.build_absolute_uri(urlquote(value.url))
class HyperlinkedDownloadURL(serializers.RelatedField):
def to_representation(self, value):
if value:
request = self.context.get('request', None)
return request.build_absolute_uri(value + "download")
class HyperlinkedRelatedURL(serializers.RelatedField):
def to_representation(self, value):
if value:
request = self.context.get('request', None)
return request.build_absolute_uri(value.get_absolute_url())
class HyperlinkedImageURL(serializers.CharField):
def to_representation(self, value):
if value:
request = self.context.get('request', None)
return request.build_absolute_uri(value)
class SerializedContributors(serializers.CharField):
def to_representation(self, value):
if value:
return ', '.join([v.username for v in value.all()])
class NIDMDescriptionSerializedField(serializers.CharField):
def to_representation(self, value):
if value and self.parent.instance is not None:
parent = self.parent.instance.nidm_results.name
fname = os.path.split(self.parent.instance.file.name)[-1]
return 'NIDM Results: {0}.zip > {1}'.format(parent, fname)
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'first_name', 'last_name')
class ImageSerializer(serializers.HyperlinkedModelSerializer,
ImageValidationMixin):
id = serializers.ReadOnlyField()
file = HyperlinkedFileField()
collection = HyperlinkedRelatedURL(read_only=True)
collection_id = serializers.ReadOnlyField()
url = HyperlinkedImageURL(source='get_absolute_url',
read_only=True)
file_size = serializers.SerializerMethodField()
class Meta:
model = BaseCollectionItem
exclude = ['polymorphic_ctype']
def __init__(self, *args, **kwargs):
super(ImageSerializer, self).__init__(*args, **kwargs)
initial_data = getattr(self, 'initial_data', None)
if initial_data:
self._metadata_dict = self.extract_metadata_fields(
self.initial_data, self._writable_fields
)
def get_file_size(self, obj):
return obj.file.size
def to_representation(self, obj):
"""
Because Image is Polymorphic
"""
if isinstance(obj, StatisticMap):
serializer = StatisticMapSerializer
image_type = 'statistic_map'
elif isinstance(obj, Atlas):
serializer = AtlasSerializer
image_type = 'atlas'
elif isinstance(obj, NIDMResultStatisticMap):
serializer = NIDMResultStatisticMapSerializer
image_type = 'NIDM results statistic map'
elif isinstance(obj, NIDMResults):
serializer = NIDMResultsSerializer
image_type = 'NIDM Results'
orderedDict = serializer(obj, context={
'request': self.context['request']}).to_representation(obj)
orderedDict['image_type'] = image_type
for key, val in orderedDict.iteritems():
if pd.isnull(val):
orderedDict[key] = None
return orderedDict
def extract_metadata_fields(self, initial_data, writable_fields):
field_name_set = set(f.field_name for f in writable_fields)
metadata_field_set = initial_data.viewkeys() - field_name_set
return {key: initial_data[key] for key in metadata_field_set}
def validate(self, data):
self.afni_subbricks = []
self.afni_tmp = None
self._errors = ErrorDict()
self.error_class = ErrorList
cleaned_data = self.clean_and_validate(data)
if self.errors:
raise serializers.ValidationError(self.errors)
return cleaned_data
def save(self, *args, **kwargs):
metadata_dict = getattr(self, '_metadata_dict', None)
if metadata_dict:
data = self.instance.data.copy()
data.update(self._metadata_dict)
kwargs['data'] = data
self.is_valid = True
super(ImageSerializer, self).save(*args, **kwargs)
class EditableStatisticMapSerializer(ImageSerializer):
cognitive_paradigm_cogatlas = PrimaryKeyRelatedField(
queryset=CognitiveAtlasTask.objects.all(),
allow_null=True,
required=False
)
cognitive_contrast_cogatlas = PrimaryKeyRelatedField(
queryset=CognitiveAtlasContrast.objects.all(),
allow_null=True,
required=False
)
class Meta:
model = StatisticMap
read_only_fields = ('collection',)
exclude = ['polymorphic_ctype', 'ignore_file_warning', 'data']
class StatisticMapSerializer(ImageSerializer):
cognitive_paradigm_cogatlas = StringRelatedField(read_only=True)
cognitive_paradigm_cogatlas_id = PrimaryKeyRelatedField(
read_only=True, source="cognitive_paradigm_cogatlas")
cognitive_contrast_cogatlas = StringRelatedField(read_only=True)
cognitive_contrast_cogatlas_id = PrimaryKeyRelatedField(
read_only=True, source="cognitive_contrast_cogatlas")
map_type = serializers.SerializerMethodField()
analysis_level = serializers.SerializerMethodField()
def get_map_type(self, obj):
return obj.get_map_type_display()
def get_analysis_level(self, obj):
return obj.get_analysis_level_display()
class Meta:
model = StatisticMap
exclude = ['polymorphic_ctype', 'ignore_file_warning', 'data']
def value_to_python(self, value):
if not value:
return value
try:
return json.loads(value)
except (TypeError, ValueError):
return value
def to_representation(self, obj):
ret = super(ImageSerializer, self).to_representation(obj)
for field_name, value in obj.data.items():
if field_name not in ret:
ret[field_name] = self.value_to_python(value)
return ret
class NIDMResultStatisticMapSerializer(ImageSerializer):
nidm_results = HyperlinkedRelatedURL(read_only=True)
nidm_results_ttl = serializers.SerializerMethodField()
description = NIDMDescriptionSerializedField(source='get_absolute_url')
map_type = serializers.SerializerMethodField()
analysis_level = serializers.SerializerMethodField()
def get_map_type(self, obj):
return obj.get_map_type_display()
def get_analysis_level(self, obj):
return obj.get_analysis_level_display()
def get_nidm_results_ttl(self, obj):
return self.context['request'].build_absolute_uri(
obj.nidm_results.ttl_file.url
)
class Meta:
model = NIDMResultStatisticMap
exclude = ['polymorphic_ctype']
def to_representation(self, obj):
return super(ImageSerializer, self).to_representation(obj)
class AtlasSerializer(ImageSerializer):
label_description_file = HyperlinkedFileField()
class Meta:
model = Atlas
exclude = ['polymorphic_ctype']
def to_representation(self, obj):
return super(ImageSerializer, self).to_representation(obj)
class EditableAtlasSerializer(ImageSerializer):
class Meta:
model = Atlas
read_only_fields = ('collection',)
class NIDMResultsSerializer(serializers.ModelSerializer,
NIDMResultsValidationMixin):
zip_file = HyperlinkedFileField()
ttl_file = HyperlinkedFileField(required=False)
statmaps = ImageSerializer(many=True, source='nidmresultstatisticmap_set')
url = HyperlinkedImageURL(source='get_absolute_url', read_only=True)
def validate(self, data):
data['collection'] = self.instance.collection
return self.clean_and_validate(data)
def save(self):
instance = super(NIDMResultsSerializer, self).save()
nidm = getattr(self, 'nidm', False)
if nidm:
# Handle file upload
save_nidm_statmaps(nidm, instance)
handle_update_ttl_urls(instance)
return instance
class Meta:
model = NIDMResults
exclude = ['is_valid']
read_only_fields = ('collection',)
class EditableNIDMResultsSerializer(serializers.ModelSerializer,
NIDMResultsValidationMixin):
url = HyperlinkedImageURL(source='get_absolute_url', read_only=True)
def validate(self, data):
data['collection'] = self.instance.collection
return self.clean_and_validate(data)
def save(self):
instance = super(EditableNIDMResultsSerializer, self).save()
save_nidm_statmaps(self.nidm, instance)
handle_update_ttl_urls(instance)
return instance
class Meta:
model = NIDMResults
read_only_fields = ('collection',)
exclude = ['is_valid']
class CollectionSerializer(serializers.ModelSerializer):
url = HyperlinkedImageURL(source='get_absolute_url', read_only=True)
download_url = HyperlinkedDownloadURL(source='get_absolute_url', read_only=True)
owner = serializers.ReadOnlyField(source='owner.id')
images = ImageSerializer(many=True, source='basecollectionitem_set')
contributors = SerializedContributors(required=False)
owner_name = serializers.SerializerMethodField()
number_of_images = serializers.SerializerMethodField('num_im')
def num_im(self, obj):
return obj.basecollectionitem_set.count()
def get_owner_name(self, obj):
return obj.owner.username
def validate(self, data):
doi = strip(data.get('DOI'))
name = strip(data.get('name'))
if not self.instance:
if not (logical_xor(doi, name)):
raise serializers.ValidationError(
'Specify either "name" or "DOI"'
)
if doi:
try:
(name, authors,
paper_url, _, journal_name) = get_paper_properties(doi)
data['name'] = name
data['authors'] = authors
data['paper_url'] = paper_url
data['journal_name'] = journal_name
except:
raise serializers.ValidationError('Could not resolve DOI')
return data
class Meta:
model = Collection
exclude = ['private_token', 'images']
# Override `required` to allow name fetching by DOI
extra_kwargs = {'name': {'required': False}}
| mit |
mschmidt87/nest-simulator | pynest/examples/hh_phaseplane.py | 9 | 4973 | # -*- coding: utf-8 -*-
#
# hh_phaseplane.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
hh_phaseplane makes a numerical phase-plane analysis of the Hodgkin-Huxley
neuron (iaf_psc_alpha). Dynamics is investigated in the V-n space (see remark
below). A constant DC can be specified and its influence on the nullclines
can be studied.
REMARK
To make the two-dimensional analysis possible, the (four-dimensional)
Hodgkin-Huxley formalism needs to be artificially reduced to two dimensions,
in this case by 'clamping' the two other variables, m an h, to
constant values (m_eq and h_eq).
'''
import nest
from matplotlib import pyplot as plt
amplitude = 100. # Set externally applied current amplitude in pA
dt = 0.1 # simulation step length [ms]
nest.ResetKernel()
nest.set_verbosity('M_ERROR')
nest.SetKernelStatus({'resolution': dt})
neuron = nest.Create('hh_psc_alpha')
# Numerically obtain equilibrium state
nest.Simulate(1000)
m_eq = nest.GetStatus(neuron)[0]['Act_m']
h_eq = nest.GetStatus(neuron)[0]['Act_h']
nest.SetStatus(neuron, {'I_e': amplitude}) # Apply external current
# Scan state space
print('Scanning phase space')
V_new_vec = []
n_new_vec = []
# x will contain the phase-plane data as a vector field
x = []
count = 0
for V in range(-100, 42, 2):
n_V = []
n_n = []
for n in range(10, 81):
# Set V_m and n
nest.SetStatus(neuron, {'V_m': V*1.0, 'Inact_n': n/100.0,
'Act_m': m_eq, 'Act_h': h_eq})
# Find state
V_m = nest.GetStatus(neuron)[0]['V_m']
Inact_n = nest.GetStatus(neuron)[0]['Inact_n']
# Simulate a short while
nest.Simulate(dt)
# Find difference between new state and old state
V_m_new = nest.GetStatus(neuron)[0]['V_m'] - V*1.0
Inact_n_new = nest.GetStatus(neuron)[0]['Inact_n'] - n/100.0
# Store in vector for later analysis
n_V.append(abs(V_m_new))
n_n.append(abs(Inact_n_new))
x.append([V_m, Inact_n, V_m_new, Inact_n_new])
if count % 10 == 0:
# Write updated state next to old state
print('')
print('Vm: \t', V_m)
print('new Vm:\t', V_m_new)
print('Inact_n:', Inact_n)
print('new Inact_n:', Inact_n_new)
count += 1
# Store in vector for later analysis
V_new_vec.append(n_V)
n_new_vec.append(n_n)
# Set state for AP generation
nest.SetStatus(neuron, {'V_m': -34., 'Inact_n': 0.2,
'Act_m': m_eq, 'Act_h': h_eq})
print('')
print('AP-trajectory')
# ap will contain the trace of a single action potential as one possible
# numerical solution in the vector field
ap = []
for i in range(1, 1001):
# Find state
V_m = nest.GetStatus(neuron)[0]['V_m']
Inact_n = nest.GetStatus(neuron)[0]['Inact_n']
if i % 10 == 0:
# Write new state next to old state
print('Vm: \t', V_m)
print('Inact_n:', Inact_n)
ap.append([V_m, Inact_n])
# Simulate again
nest.SetStatus(neuron, {'Act_m': m_eq, 'Act_h': h_eq})
nest.Simulate(dt)
# Make analysis
print('')
print('Plot analysis')
V_matrix = [list(x) for x in zip(*V_new_vec)]
n_matrix = [list(x) for x in zip(*n_new_vec)]
n_vec = [x/100. for x in range(10, 81)]
V_vec = [x*1. for x in range(-100, 42, 2)]
nullcline_V = []
nullcline_n = []
print('Searching nullclines')
for i in range(0, len(V_vec)):
index = V_matrix[:][i].index(min(V_matrix[:][i]))
if index != 0 and index != len(n_vec):
nullcline_V.append([V_vec[i], n_vec[index]])
index = n_matrix[:][i].index(min(n_matrix[:][i]))
if index != 0 and index != len(n_vec):
nullcline_n.append([V_vec[i], n_vec[index]])
print('Plotting vector field')
factor = 0.1
for i in range(0, count, 3):
plt.plot([x[i][0], x[i][0] + factor*x[i][2]],
[x[i][1], x[i][1] + factor*x[i][3]], color=[0.6, 0.6, 0.6])
plt.plot(nullcline_V[:][0], nullcline_V[:][1], linewidth=2.0)
plt.plot(nullcline_n[:][0], nullcline_n[:][1], linewidth=2.0)
plt.xlim([V_vec[0], V_vec[-1]])
plt.ylim([n_vec[0], n_vec[-1]])
plt.plot(ap[:][0], ap[:][1], color='black', linewidth=1.0)
plt.xlabel('Membrane potential V [mV]')
plt.ylabel('Inactivation variable n')
plt.title('Phase space of the Hodgkin-Huxley Neuron')
plt.show()
| gpl-2.0 |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/sklearn/manifold/t_sne.py | 3 | 35089 | # Author: Alexander Fabisch -- <[email protected]>
# Author: Christopher Moody <[email protected]>
# Author: Nick Travers <[email protected]>
# License: BSD 3 clause (C) 2014
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
from time import time
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from scipy.sparse import csr_matrix
from ..neighbors import NearestNeighbors
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..decomposition import PCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
from . import _barnes_hut_tsne
from ..externals.six import string_types
from ..utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = distances.astype(np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, None, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : array, shape (n_samples, k)
Distances of samples to its k nearest neighbors.
neighbors : array, shape (n_samples, k)
Indices of the k nearest-neighbors for each samples.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : csr sparse matrix, shape (n_samples, n_samples)
Condensed joint probability matrix with only nearest neighbors.
"""
t0 = time()
# Compute conditional probabilities such that they approximately match
# the desired perplexity
n_samples, k = neighbors.shape
distances = distances.astype(np.float32, copy=False)
neighbors = neighbors.astype(np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, neighbors, desired_perplexity, verbose)
assert np.all(np.isfinite(conditional_P)), \
"All probabilities should be finite"
# Symmetrize the joint probability distribution using sparse operations
P = csr_matrix((conditional_P.ravel(), neighbors.ravel(),
range(0, n_samples * k + 1, k)),
shape=(n_samples, n_samples))
P = P + P.T
# Normalize the joint probability distribution
sum_P = np.maximum(P.sum(), MACHINE_EPSILON)
P /= sum_P
assert np.all(np.abs(P.data) <= 1.0)
if verbose >= 2:
duration = time() - t0
print("[t-SNE] Computed conditional probabilities in {:.3f}s"
.format(duration))
return P
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
dist = pdist(X_embedded, "sqeuclidean")
dist /= degrees_of_freedom
dist += 1.
dist **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(np.maximum(P, MACHINE_EPSILON) / Q))
# Gradient: dC/dY
# pdist always returns double precision distances. Thus we need to take
grad = np.ndarray((n_samples, n_components), dtype=params.dtype)
PQd = squareform((P - Q) * dist)
for i in range(skip_num_points, n_samples):
grad[i] = np.dot(np.ravel(PQd[i], order='K'),
X_embedded[i] - X_embedded)
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_bh(params, P, degrees_of_freedom, n_samples, n_components,
angle=0.5, skip_num_points=0, verbose=False):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : csr sparse matrix, shape (n_samples, n_sample)
Sparse approximate joint probability matrix, computed only for the
k nearest-neighbors and symmetrized.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = params.astype(np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
val_P = P.data.astype(np.float32, copy=False)
neighbors = P.indices.astype(np.int64, copy=False)
indptr = P.indptr.astype(np.int64, copy=False)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(val_P, X_embedded, neighbors, indptr,
grad, angle, n_components, verbose,
dof=degrees_of_freedom)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(objective, p0, it, n_iter,
n_iter_check=1, n_iter_without_progress=300,
momentum=0.8, learning_rate=200.0, min_gain=0.01,
min_grad_norm=1e-7, verbose=0, args=None, kwargs=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.8)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
kwargs : dict
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = i = it
tic = time()
for i in range(it, n_iter):
error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad < 0.0
dec = np.invert(inc)
gains[inc] += 0.2
gains[dec] *= 0.8
np.clip(gains, min_gain, np.inf, out=gains)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
toc = time()
duration = toc - tic
tic = toc
if verbose >= 2:
print("[t-SNE] Iteration %d: error = %.7f,"
" gradient norm = %.7f"
" (%s iterations in %0.3fs)"
% (i + 1, error, grad_norm, n_iter_check, duration))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i} (r(i, j) - k)
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 12.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers. If the cost function gets stuck in a bad local
minimum increasing the learning rate may help.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 250.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization, used after 250 initial iterations with early
exaggeration. Note that progress is only checked every 50 iterations so
this value is rounded to the next multiple of 50.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be stopped.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string or numpy array, optional (default: "random")
Initialization of embedding. Possible options are 'random', 'pca',
and a numpy array of shape (n_samples, n_components).
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int, RandomState instance or None, optional (default: None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Note that different initializations might result in
different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> X_embedded = TSNE(n_components=2).fit_transform(X)
>>> X_embedded.shape
(4, 2)
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
# Control the number of exploration iterations with early_exaggeration on
_EXPLORATION_N_ITER = 250
# Control the number of iterations between progress checks
_N_ITER_CHECK = 50
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=12.0, learning_rate=200.0, n_iter=1000,
n_iter_without_progress=300, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TruncatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.metric == "precomputed":
if isinstance(self.init, string_types) and self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
if np.any(X < 0):
raise ValueError("All distances should be positive, the "
"precomputed distances given as X is not "
"correct")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=[np.float32, np.float64])
if self.method == 'barnes_hut' and self.n_components > 3:
raise ValueError("'n_components' should be inferior to 4 for the "
"barnes_hut algorithm as it relies on "
"quad-tree or oct-tree.")
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is {}"
.format(self.early_exaggeration))
if self.n_iter < 250:
raise ValueError("n_iter should be at least 250")
n_samples = X.shape[0]
neighbors_nn = None
if self.method == "exact":
# Retrieve the distance matrix, either using the precomputed one or
# computing it.
if self.metric == "precomputed":
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if np.any(distances < 0):
raise ValueError("All distances should be positive, the "
"metric given is not correct")
# compute the joint probability distribution for the input space
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be non-negative"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
else:
# Cpmpute the number of nearest neighbors to find.
# LvdM uses 3 * perplexity as the number of neighbors.
# In the event that we have very small # of points
# set the neighbors to n - 1.
k = min(n_samples - 1, int(3. * self.perplexity + 1))
if self.verbose:
print("[t-SNE] Computing {} nearest neighbors...".format(k))
# Find the nearest neighbors for every point
knn = NearestNeighbors(algorithm='auto', n_neighbors=k,
metric=self.metric)
t0 = time()
knn.fit(X)
duration = time() - t0
if self.verbose:
print("[t-SNE] Indexed {} samples in {:.3f}s...".format(
n_samples, duration))
t0 = time()
distances_nn, neighbors_nn = knn.kneighbors(
None, n_neighbors=k)
duration = time() - t0
if self.verbose:
print("[t-SNE] Computed neighbors for {} samples in {:.3f}s..."
.format(n_samples, duration))
# Free the memory used by the ball_tree
del knn
if self.metric == "euclidean":
# knn return the euclidean distance but we need it squared
# to be consistent with the 'exact' method. Note that the
# the method was derived using the euclidean method as in the
# input space. Not sure of the implication of using a different
# metric.
distances_nn **= 2
# compute the joint probability distribution for the input space
P = _joint_probabilities_nn(distances_nn, neighbors_nn,
self.perplexity, self.verbose)
if isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'pca':
pca = PCA(n_components=self.n_components, svd_solver='randomized',
random_state=random_state)
X_embedded = pca.fit_transform(X).astype(np.float32, copy=False)
elif self.init == 'random':
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
X_embedded = 1e-4 * random_state.randn(
n_samples, self.n_components).astype(np.float32)
else:
raise ValueError("'init' must be 'pca', 'random', or "
"a numpy array")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
return self._tsne(P, degrees_of_freedom, n_samples,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
@property
@deprecated("Attribute n_iter_final was deprecated in version 0.19 and "
"will be removed in 0.21. Use ``n_iter_`` instead")
def n_iter_final(self):
return self.n_iter_
def _tsne(self, P, degrees_of_freedom, n_samples, X_embedded,
neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with two stages:
# * initial optimization with early exaggeration and momentum at 0.5
# * final optimization with momentum at 0.8
params = X_embedded.ravel()
opt_args = {
"it": 0,
"n_iter_check": self._N_ITER_CHECK,
"min_grad_norm": self.min_grad_norm,
"learning_rate": self.learning_rate,
"verbose": self.verbose,
"kwargs": dict(skip_num_points=skip_num_points),
"args": [P, degrees_of_freedom, n_samples, self.n_components],
"n_iter_without_progress": self._EXPLORATION_N_ITER,
"n_iter": self._EXPLORATION_N_ITER,
"momentum": 0.5,
}
if self.method == 'barnes_hut':
obj_func = _kl_divergence_bh
opt_args['kwargs']['angle'] = self.angle
# Repeat verbose argument for _kl_divergence_bh
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
# Learning schedule (part 1): do 250 iteration with lower momentum but
# higher learning rate controlled via the early exageration parameter
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] KL divergence after %d iterations with early "
"exaggeration: %f" % (it + 1, kl_divergence))
# Learning schedule (part 2): disable early exaggeration and finish
# optimization with a higher momentum at 0.8
P /= self.early_exaggeration
remaining = self.n_iter - self._EXPLORATION_N_ITER
if it < self._EXPLORATION_N_ITER or remaining > 0:
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
opt_args['momentum'] = 0.8
opt_args['n_iter_without_progress'] = self.n_iter_without_progress
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
# Save the final number of iterations
self.n_iter_ = it
if self.verbose:
print("[t-SNE] Error after %d iterations: %f"
% (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
y : Ignored.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
y : Ignored.
"""
self.fit_transform(X)
return self
| mit |
TsinghuaX/ease | ease/util_functions.py | 3 | 16740 | #Collection of misc functions needed to support essay_set.py and feature_extractor.py.
#Requires aspell to be installed and added to the path
from fisher import pvalue
aspell_path = "aspell"
import re
import os
from sklearn.feature_extraction.text import CountVectorizer
import numpy
from itertools import chain
import math
import nltk
import pickle
import logging
import sys
import tempfile
log=logging.getLogger(__name__)
base_path = os.path.dirname(__file__)
sys.path.append(base_path)
if not base_path.endswith("/"):
base_path=base_path+"/"
#Paths to needed data files
ESSAY_CORPUS_PATH = base_path + "data/essaycorpus.txt"
ESSAY_COR_TOKENS_PATH = base_path + "data/essay_cor_tokens.p"
class AlgorithmTypes(object):
"""
Defines what types of algorithm can be used
"""
regression = "regression"
classification = "classifiction"
def create_model_path(model_path):
"""
Creates a path to model files
model_path - string
"""
if not model_path.startswith("/") and not model_path.startswith("models/"):
model_path="/" + model_path
if not model_path.startswith("models"):
model_path = "models" + model_path
if not model_path.endswith(".p"):
model_path+=".p"
return model_path
def sub_chars(string):
"""
Strips illegal characters from a string. Used to sanitize input essays.
Removes all non-punctuation, digit, or letter characters.
Returns sanitized string.
string - string
"""
#Define replacement patterns
sub_pat = r"[^A-Za-z\.\?!,';:]"
char_pat = r"\."
com_pat = r","
ques_pat = r"\?"
excl_pat = r"!"
sem_pat = r";"
col_pat = r":"
whitespace_pat = r"\s{1,}"
#Replace text. Ordering is very important!
nstring = re.sub(sub_pat, " ", string)
nstring = re.sub(char_pat," .", nstring)
nstring = re.sub(com_pat, " ,", nstring)
nstring = re.sub(ques_pat, " ?", nstring)
nstring = re.sub(excl_pat, " !", nstring)
nstring = re.sub(sem_pat, " ;", nstring)
nstring = re.sub(col_pat, " :", nstring)
nstring = re.sub(whitespace_pat, " ", nstring)
return nstring
def spell_correct(string):
"""
Uses aspell to spell correct an input string.
Requires aspell to be installed and added to the path.
Returns the spell corrected string if aspell is found, original string if not.
string - string
"""
# Create a temp file so that aspell could be used
# By default, tempfile will delete this file when the file handle is closed.
f = tempfile.NamedTemporaryFile(mode='w')
f.write(string)
f.flush()
f_path = os.path.abspath(f.name)
try:
p = os.popen(aspell_path + " -a < " + f_path + " --sug-mode=ultra")
# Aspell returns a list of incorrect words with the above flags
incorrect = p.readlines()
p.close()
except Exception:
log.exception("aspell process failed; could not spell check")
# Return original string if aspell fails
return string,0, string
finally:
f.close()
incorrect_words = list()
correct_spelling = list()
for i in range(1, len(incorrect)):
if(len(incorrect[i]) > 10):
#Reformat aspell output to make sense
match = re.search(":", incorrect[i])
if hasattr(match, "start"):
begstring = incorrect[i][2:match.start()]
begmatch = re.search(" ", begstring)
begword = begstring[0:begmatch.start()]
sugstring = incorrect[i][match.start() + 2:]
sugmatch = re.search(",", sugstring)
if hasattr(sugmatch, "start"):
sug = sugstring[0:sugmatch.start()]
incorrect_words.append(begword)
correct_spelling.append(sug)
#Create markup based on spelling errors
newstring = string
markup_string = string
already_subbed=[]
for i in range(0, len(incorrect_words)):
sub_pat = r"\b" + incorrect_words[i] + r"\b"
sub_comp = re.compile(sub_pat)
newstring = re.sub(sub_comp, correct_spelling[i], newstring)
if incorrect_words[i] not in already_subbed:
markup_string=re.sub(sub_comp,'<bs>' + incorrect_words[i] + "</bs>", markup_string)
already_subbed.append(incorrect_words[i])
return newstring,len(incorrect_words),markup_string
def ngrams(tokens, min_n, max_n):
"""
Generates ngrams(word sequences of fixed length) from an input token sequence.
tokens is a list of words.
min_n is the minimum length of an ngram to return.
max_n is the maximum length of an ngram to return.
returns a list of ngrams (words separated by a space)
"""
all_ngrams = list()
n_tokens = len(tokens)
for i in xrange(n_tokens):
for j in xrange(i + min_n, min(n_tokens, i + max_n) + 1):
all_ngrams.append(" ".join(tokens[i:j]))
return all_ngrams
def f7(seq):
"""
Makes a list unique
"""
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
def count_list(the_list):
"""
Generates a count of the number of times each unique item appears in a list
"""
count = the_list.count
result = [(item, count(item)) for item in set(the_list)]
result.sort()
return result
def regenerate_good_tokens(string):
"""
Given an input string, part of speech tags the string, then generates a list of
ngrams that appear in the string.
Used to define grammatically correct part of speech tag sequences.
Returns a list of part of speech tag sequences.
"""
toks = nltk.word_tokenize(string)
pos_string = nltk.pos_tag(toks)
pos_seq = [tag[1] for tag in pos_string]
pos_ngrams = ngrams(pos_seq, 2, 4)
sel_pos_ngrams = f7(pos_ngrams)
return sel_pos_ngrams
def get_vocab(text, score, max_feats=750, max_feats2=200):
"""
Uses a fisher test to find words that are significant in that they separate
high scoring essays from low scoring essays.
text is a list of input essays.
score is a list of scores, with score[n] corresponding to text[n]
max_feats is the maximum number of features to consider in the first pass
max_feats2 is the maximum number of features to consider in the second (final) pass
Returns a list of words that constitute the significant vocabulary
"""
dict = CountVectorizer(ngram_range=(1,2), max_features=max_feats)
dict_mat = dict.fit_transform(text)
set_score = numpy.asarray(score, dtype=numpy.int)
med_score = numpy.median(set_score)
new_score = set_score
if(med_score == 0):
med_score = 1
new_score[set_score < med_score] = 0
new_score[set_score >= med_score] = 1
fish_vals = []
for col_num in range(0, dict_mat.shape[1]):
loop_vec = dict_mat.getcol(col_num).toarray()
good_loop_vec = loop_vec[new_score == 1]
bad_loop_vec = loop_vec[new_score == 0]
good_loop_present = len(good_loop_vec[good_loop_vec > 0])
good_loop_missing = len(good_loop_vec[good_loop_vec == 0])
bad_loop_present = len(bad_loop_vec[bad_loop_vec > 0])
bad_loop_missing = len(bad_loop_vec[bad_loop_vec == 0])
fish_val = pvalue(good_loop_present, bad_loop_present, good_loop_missing, bad_loop_missing).two_tail
fish_vals.append(fish_val)
cutoff = 1
if(len(fish_vals) > max_feats2):
cutoff = sorted(fish_vals)[max_feats2]
good_cols = numpy.asarray([num for num in range(0, dict_mat.shape[1]) if fish_vals[num] <= cutoff])
getVar = lambda searchList, ind: [searchList[i] for i in ind]
vocab = getVar(dict.get_feature_names(), good_cols)
return vocab
def edit_distance(s1, s2):
"""
Calculates string edit distance between string 1 and string 2.
Deletion, insertion, substitution, and transposition all increase edit distance.
"""
d = {}
lenstr1 = len(s1)
lenstr2 = len(s2)
for i in xrange(-1, lenstr1 + 1):
d[(i, -1)] = i + 1
for j in xrange(-1, lenstr2 + 1):
d[(-1, j)] = j + 1
for i in xrange(lenstr1):
for j in xrange(lenstr2):
if s1[i] == s2[j]:
cost = 0
else:
cost = 1
d[(i, j)] = min(
d[(i - 1, j)] + 1, # deletion
d[(i, j - 1)] + 1, # insertion
d[(i - 1, j - 1)] + cost, # substitution
)
if i and j and s1[i] == s2[j - 1] and s1[i - 1] == s2[j]:
d[(i, j)] = min(d[(i, j)], d[i - 2, j - 2] + cost) # transposition
return d[lenstr1 - 1, lenstr2 - 1]
class Error(Exception):
pass
class InputError(Error):
def __init__(self, expr, msg):
self.expr = expr
self.msg = msg
def gen_cv_preds(clf, arr, sel_score, num_chunks=3):
"""
Generates cross validated predictions using an input classifier and data.
clf is a classifier that implements that implements the fit and predict methods.
arr is the input data array (X)
sel_score is the target list (y). y[n] corresponds to X[n,:]
num_chunks is the number of cross validation folds to use
Returns an array of the predictions where prediction[n] corresponds to X[n,:]
"""
cv_len = int(math.floor(len(sel_score) / num_chunks))
chunks = []
for i in range(0, num_chunks):
range_min = i * cv_len
range_max = ((i + 1) * cv_len)
if i == num_chunks - 1:
range_max = len(sel_score)
chunks.append(range(range_min, range_max))
preds = []
set_score = numpy.asarray(sel_score, dtype=numpy.int)
chunk_vec = numpy.asarray(range(0, len(chunks)))
for i in xrange(0, len(chunks)):
loop_inds = list(
chain.from_iterable([chunks[int(z)] for z, m in enumerate(range(0, len(chunks))) if int(z) != i]))
sim_fit = clf.fit(arr[loop_inds], set_score[loop_inds])
preds.append(list(sim_fit.predict(arr[chunks[i]])))
all_preds = list(chain(*preds))
return(all_preds)
def gen_model(clf, arr, sel_score):
"""
Fits a classifier to data and a target score
clf is an input classifier that implements the fit method.
arr is a data array(X)
sel_score is the target list (y) where y[n] corresponds to X[n,:]
sim_fit is not a useful return value. Instead the clf is the useful output.
"""
set_score = numpy.asarray(sel_score, dtype=numpy.int)
sim_fit = clf.fit(arr, set_score)
return(sim_fit)
def gen_preds(clf, arr):
"""
Generates predictions on a novel data array using a fit classifier
clf is a classifier that has already been fit
arr is a data array identical in dimension to the array clf was trained on
Returns the array of predictions.
"""
if(hasattr(clf, "predict_proba")):
ret = clf.predict(arr)
# pred_score=preds.argmax(1)+min(x._score)
else:
ret = clf.predict(arr)
return ret
def calc_list_average(l):
"""
Calculates the average value of a list of numbers
Returns a float
"""
total = 0.0
for value in l:
total += value
return total / len(l)
stdev = lambda d: (sum((x - 1. * sum(d) / len(d)) ** 2 for x in d) / (1. * (len(d) - 1))) ** .5
def quadratic_weighted_kappa(rater_a, rater_b, min_rating=None, max_rating=None):
"""
Calculates kappa correlation between rater_a and rater_b.
Kappa measures how well 2 quantities vary together.
rater_a is a list of rater a scores
rater_b is a list of rater b scores
min_rating is an optional argument describing the minimum rating possible on the data set
max_rating is an optional argument describing the maximum rating possible on the data set
Returns a float corresponding to the kappa correlation
"""
assert(len(rater_a) == len(rater_b))
rater_a = [int(a) for a in rater_a]
rater_b = [int(b) for b in rater_b]
if min_rating is None:
min_rating = min(rater_a + rater_b)
if max_rating is None:
max_rating = max(rater_a + rater_b)
conf_mat = confusion_matrix(rater_a, rater_b,
min_rating, max_rating)
num_ratings = len(conf_mat)
num_scored_items = float(len(rater_a))
hist_rater_a = histogram(rater_a, min_rating, max_rating)
hist_rater_b = histogram(rater_b, min_rating, max_rating)
numerator = 0.0
denominator = 0.0
if(num_ratings > 1):
for i in range(num_ratings):
for j in range(num_ratings):
expected_count = (hist_rater_a[i] * hist_rater_b[j]
/ num_scored_items)
d = pow(i - j, 2.0) / pow(num_ratings - 1, 2.0)
numerator += d * conf_mat[i][j] / num_scored_items
denominator += d * expected_count / num_scored_items
return 1.0 - numerator / denominator
else:
return 1.0
def confusion_matrix(rater_a, rater_b, min_rating=None, max_rating=None):
"""
Generates a confusion matrix between rater_a and rater_b
A confusion matrix shows how often 2 values agree and disagree
See quadratic_weighted_kappa for argument descriptions
"""
assert(len(rater_a) == len(rater_b))
rater_a = [int(a) for a in rater_a]
rater_b = [int(b) for b in rater_b]
min_rating = int(min_rating)
max_rating = int(max_rating)
if min_rating is None:
min_rating = min(rater_a)
if max_rating is None:
max_rating = max(rater_a)
num_ratings = int(max_rating - min_rating + 1)
conf_mat = [[0 for i in range(num_ratings)]
for j in range(num_ratings)]
for a, b in zip(rater_a, rater_b):
conf_mat[int(a - min_rating)][int(b - min_rating)] += 1
return conf_mat
def histogram(ratings, min_rating=None, max_rating=None):
"""
Generates a frequency count of each rating on the scale
ratings is a list of scores
Returns a list of frequencies
"""
ratings = [int(r) for r in ratings]
if min_rating is None:
min_rating = min(ratings)
if max_rating is None:
max_rating = max(ratings)
num_ratings = int(max_rating - min_rating + 1)
hist_ratings = [0 for x in range(num_ratings)]
for r in ratings:
hist_ratings[r - min_rating] += 1
return hist_ratings
def get_wordnet_syns(word):
"""
Utilize wordnet (installed with nltk) to get synonyms for words
word is the input word
returns a list of unique synonyms
"""
synonyms = []
regex = r"_"
pat = re.compile(regex)
synset = nltk.wordnet.wordnet.synsets(word)
for ss in synset:
for swords in ss.lemma_names:
synonyms.append(pat.sub(" ", swords.lower()))
synonyms = f7(synonyms)
return synonyms
def get_separator_words(toks1):
"""
Finds the words that separate a list of tokens from a background corpus
Basically this generates a list of informative/interesting words in a set
toks1 is a list of words
Returns a list of separator words
"""
tab_toks1 = nltk.FreqDist(word.lower() for word in toks1)
if(os.path.isfile(ESSAY_COR_TOKENS_PATH)):
toks2 = pickle.load(open(ESSAY_COR_TOKENS_PATH, 'rb'))
else:
essay_corpus = open(ESSAY_CORPUS_PATH).read()
essay_corpus = sub_chars(essay_corpus)
toks2 = nltk.FreqDist(word.lower() for word in nltk.word_tokenize(essay_corpus))
pickle.dump(toks2, open(ESSAY_COR_TOKENS_PATH, 'wb'))
sep_words = []
for word in tab_toks1.keys():
tok1_present = tab_toks1[word]
if(tok1_present > 2):
tok1_total = tab_toks1._N
tok2_present = toks2[word]
tok2_total = toks2._N
fish_val = pvalue(tok1_present, tok2_present, tok1_total, tok2_total).two_tail
if(fish_val < .001 and tok1_present / float(tok1_total) > (tok2_present / float(tok2_total)) * 2):
sep_words.append(word)
sep_words = [w for w in sep_words if not w in nltk.corpus.stopwords.words("english") and len(w) > 5]
return sep_words
def encode_plus(s):
"""
Literally encodes the plus sign
input is a string
returns the string with plus signs encoded
"""
regex = r"\+"
pat = re.compile(regex)
return pat.sub("%2B", s)
def getMedian(numericValues):
"""
Gets the median of a list of values
Returns a float/int
"""
theValues = sorted(numericValues)
if len(theValues) % 2 == 1:
return theValues[(len(theValues) + 1) / 2 - 1]
else:
lower = theValues[len(theValues) / 2 - 1]
upper = theValues[len(theValues) / 2]
return (float(lower + upper)) / 2
| agpl-3.0 |
trichter/sito | bin/console/plotspec.py | 1 | 2764 | #!/usr/bin/env python
# by TR
import argparse
from obspy.core import UTCDateTime as UTC
import logging
from obspy.core.utcdatetime import UTCDateTime
logging.basicConfig()
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('file_station',
help='file to plot or station to plot')
parser.add_argument('date', nargs='?', default=None, type=UTC,
help='if first argument is station: date')
parser.add_argument('-a', '--absolute-scale', type=float, default=0.0005,
help='display with different scale, default: 0.0005')
parser.add_argument('-r', '--relative-scale', type=float,
help='display with different relative scale - '
'overwrites ABSOLUTE_SCALE')
parser.add_argument('-s', '--save',
help='save plot to this file instead of showing')
parser.add_argument('-x', '--xcorr-append',
help='dont plot raw data and pass this argument to Data object')
parser.add_argument('-c', '--component', default='Z',
help='component to plot, default: Z')
parser.add_argument('-d', '--downsample', default=1,
help='downsample to this sampling rate, default: 1')
parser.add_argument('-o', '--options',
help='dictionary with kwargs passed to plotday')
args = parser.parse_args()
if args.relative_scale is not None:
args.absolute_scale = None
if args.options is None:
kwargs = {}
else:
kwargs = eval('dict(%s)' % args.options)
kwargs.update(dict(absolutescale=args.absolute_scale,
scale=args.relative_scale,
downsample=args.downsample,
save=args.save, show=args.save is None))
print kwargs
if args.date is None:
from sito import read
from sito.imaging import plotTrace
stream = read(args.file_station)
plotTrace(stream, **kwargs)
else:
from sito.imaging import plotTrace2
station = args.file_station
if station.startswith('PB') or station == 'LVC':
from sito.data import IPOC
data = IPOC(xcorr_append=args.xcorr_append)
elif station == 'PKD':
from sito.data import Parkfield
data = Parkfield(xcorr_append=args.xcorr_append)
else:
raise argparse.ArgumentError('Not a valid station name')
day = UTCDateTime(args.date)
if args.xcorr_append is None:
stream = data.getRawStream(day, station, component=args.component)
else:
stream = data.getStream(day, station, component=args.component)
if stream[0].stats.is_fft:
stream.ifft()
plotTrace(stream, component=args.component, **kwargs)
if args.save is None:
from matplotlib.pyplot import show
show()
| mit |
rollend/trading-with-python | lib/qtpandas.py | 77 | 7937 | '''
Easy integration of DataFrame into pyqt framework
Copyright: Jev Kuznetsov
Licence: BSD
'''
from PyQt4.QtCore import (QAbstractTableModel,Qt,QVariant,QModelIndex,SIGNAL)
from PyQt4.QtGui import (QApplication,QDialog,QVBoxLayout, QHBoxLayout, QTableView, QPushButton,
QWidget,QTableWidget, QHeaderView, QFont,QMenu,QAbstractItemView)
from pandas import DataFrame, Index
class DataFrameModel(QAbstractTableModel):
''' data model for a DataFrame class '''
def __init__(self,parent=None):
super(DataFrameModel,self).__init__(parent)
self.df = DataFrame()
self.columnFormat = {} # format columns
def setFormat(self,fmt):
"""
set string formatting for the output
example : format = {'close':"%.2f"}
"""
self.columnFormat = fmt
def setDataFrame(self,dataFrame):
self.df = dataFrame
self.signalUpdate()
def signalUpdate(self):
''' tell viewers to update their data (this is full update, not efficient)'''
self.layoutChanged.emit()
def __repr__(self):
return str(self.df)
def setData(self,index,value, role=Qt.EditRole):
if index.isValid():
row,column = index.row(), index.column()
dtype = self.df.dtypes.tolist()[column] # get column dtype
if np.issubdtype(dtype,np.float):
val,ok = value.toFloat()
elif np.issubdtype(dtype,np.int):
val,ok = value.toInt()
else:
val = value.toString()
ok = True
if ok:
self.df.iloc[row,column] = val
return True
return False
def flags(self, index):
if not index.isValid():
return Qt.ItemIsEnabled
return Qt.ItemFlags(
QAbstractTableModel.flags(self, index)|
Qt.ItemIsEditable)
def appendRow(self, index, data=0):
self.df.loc[index,:] = data
self.signalUpdate()
def deleteRow(self, index):
idx = self.df.index[index]
#self.beginRemoveRows(QModelIndex(), index,index)
#self.df = self.df.drop(idx,axis=0)
#self.endRemoveRows()
#self.signalUpdate()
#------------- table display functions -----------------
def headerData(self,section,orientation,role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
try:
return self.df.columns.tolist()[section]
except (IndexError, ):
return QVariant()
elif orientation == Qt.Vertical:
try:
#return self.df.index.tolist()
return str(self.df.index.tolist()[section])
except (IndexError, ):
return QVariant()
def data(self, index, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if not index.isValid():
return QVariant()
col = self.df.ix[:,index.column()] # get a column slice first to get the right data type
elm = col[index.row()]
#elm = self.df.ix[index.row(),index.column()]
if self.df.columns[index.column()] in self.columnFormat.keys():
return QVariant(self.columnFormat[self.df.columns[index.column()]] % elm )
else:
return QVariant(str(elm))
def sort(self,nCol,order):
self.layoutAboutToBeChanged.emit()
if order == Qt.AscendingOrder:
self.df = self.df.sort(columns=self.df.columns[nCol], ascending=True)
elif order == Qt.DescendingOrder:
self.df = self.df.sort(columns=self.df.columns[nCol], ascending=False)
self.layoutChanged.emit()
def rowCount(self, index=QModelIndex()):
return self.df.shape[0]
def columnCount(self, index=QModelIndex()):
return self.df.shape[1]
class TableView(QTableView):
""" extended table view """
def __init__(self,name='TableView1', parent=None):
super(TableView,self).__init__(parent)
self.name = name
self.setSelectionBehavior(QAbstractItemView.SelectRows)
def contextMenuEvent(self, event):
menu = QMenu(self)
Action = menu.addAction("delete row")
Action.triggered.connect(self.deleteRow)
menu.exec_(event.globalPos())
def deleteRow(self):
print "Action triggered from " + self.name
print 'Selected rows:'
for idx in self.selectionModel().selectedRows():
print idx.row()
# self.model.deleteRow(idx.row())
class DataFrameWidget(QWidget):
''' a simple widget for using DataFrames in a gui '''
def __init__(self,name='DataFrameTable1', parent=None):
super(DataFrameWidget,self).__init__(parent)
self.name = name
self.dataModel = DataFrameModel()
self.dataModel.setDataFrame(DataFrame())
self.dataTable = QTableView()
#self.dataTable.setSelectionBehavior(QAbstractItemView.SelectRows)
self.dataTable.setSortingEnabled(True)
self.dataTable.setModel(self.dataModel)
self.dataModel.signalUpdate()
#self.dataTable.setFont(QFont("Courier New", 8))
layout = QVBoxLayout()
layout.addWidget(self.dataTable)
self.setLayout(layout)
def setFormat(self,fmt):
""" set non-default string formatting for a column """
for colName, f in fmt.iteritems():
self.dataModel.columnFormat[colName]=f
def fitColumns(self):
self.dataTable.horizontalHeader().setResizeMode(QHeaderView.Stretch)
def setDataFrame(self,df):
self.dataModel.setDataFrame(df)
def resizeColumnsToContents(self):
self.dataTable.resizeColumnsToContents()
def insertRow(self,index, data=None):
self.dataModel.appendRow(index,data)
#-----------------stand alone test code
def testDf():
''' creates test dataframe '''
data = {'int':[1,2,3],'float':[1./3,2.5,3.5],'string':['a','b','c'],'nan':[np.nan,np.nan,np.nan]}
return DataFrame(data, index=Index(['AAA','BBB','CCC']))[['int','float','string','nan']]
class Form(QDialog):
def __init__(self,parent=None):
super(Form,self).__init__(parent)
df = testDf() # make up some data
self.table = DataFrameWidget(parent=self)
self.table.setDataFrame(df)
#self.table.resizeColumnsToContents()
self.table.fitColumns()
self.table.setFormat({'float': '%.2f'})
#buttons
#but_add = QPushButton('Add')
but_test = QPushButton('Test')
but_test.clicked.connect(self.testFcn)
hbox = QHBoxLayout()
#hbox.addself.table(but_add)
hbox.addWidget(but_test)
layout = QVBoxLayout()
layout.addWidget(self.table)
layout.addLayout(hbox)
self.setLayout(layout)
def testFcn(self):
print 'test function'
self.table.insertRow('foo')
if __name__=='__main__':
import sys
import numpy as np
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
| bsd-3-clause |
srinathv/bokeh | examples/plotting/server/boxplot.py | 42 | 2372 | # The plot server must be running
# Go to http://localhost:5006/bokeh to view this plot
import numpy as np
import pandas as pd
from bokeh.plotting import figure, show, output_server
# Generate some synthetic time series for six different categories
cats = list("abcdef")
data = np.random.randn(2000)
g = np.random.choice(cats, 2000)
for i, l in enumerate(cats):
data[g == l] += i // 2
df = pd.DataFrame(dict(score=data, group=g))
# Find the quartiles and IQR foor each category
groups = df.groupby('group')
q1 = groups.quantile(q=0.25)
q2 = groups.quantile(q=0.5)
q3 = groups.quantile(q=0.75)
iqr = q3 - q1
upper = q3 + 1.5*iqr
lower = q1 - 1.5*iqr
# find the outliers for each category
def outliers(group):
cat = group.name
return group[(group.score > upper.loc[cat][0]) | (group.score < lower.loc[cat][0])]['score']
out = groups.apply(outliers).dropna()
# Prepare outlier data for plotting, we need coordinate for every outlier.
outx = []
outy = []
for cat in cats:
# only add outliers if they exist
if not out.loc[cat].empty:
for value in out[cat]:
outx.append(cat)
outy.append(value)
output_server('boxplot')
p = figure(tools="previewsave", background_fill="#EFE8E2", title="", x_range=cats)
# If no outliers, shrink lengths of stems to be no longer than the minimums or maximums
qmin = groups.quantile(q=0.00)
qmax = groups.quantile(q=1.00)
upper.score = [min([x,y]) for (x,y) in zip(list(qmax.iloc[:,0]),upper.score) ]
lower.score = [max([x,y]) for (x,y) in zip(list(qmin.iloc[:,0]),lower.score) ]
# stems
p.segment(cats, upper.score, cats, q3.score, line_width=2, line_color="black")
p.segment(cats, lower.score, cats, q1.score, line_width=2, line_color="black")
# boxes
p.rect(cats, (q3.score+q2.score)/2, 0.7, q3.score-q2.score,
fill_color="#E08E79", line_width=2, line_color="black")
p.rect(cats, (q2.score+q1.score)/2, 0.7, q2.score-q1.score,
fill_color="#3B8686", line_width=2, line_color="black")
# whisters (almost-0 height rects simpler than segments)
p.rect(cats, lower.score, 0.2, 0.01, line_color="black")
p.rect(cats, upper.score, 0.2, 0.01, line_color="black")
# outliers
p.circle(outx, outy, size=6, color="#F38630", fill_alpha=0.6)
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = "white"
p.ygrid.grid_line_width = 2
p.xaxis.major_label_text_font_size="12pt"
show(p)
| bsd-3-clause |
oscarlazoarjona/quantum_memories | examples/hyperfine_orca/doppler_dephasing/plot_data_rb.py | 1 | 14036 | # -*- coding: utf-8 -*-
# Copyright (C) 2017 Oscar Gerardo Lazo Arjona
# mailto: [email protected]
r"""This is a template."""
import numpy as np
from scipy.optimize import curve_fit
import pandas as pd
from matplotlib import pyplot as plt
from scipy.constants import physical_constants
from tabulate import tabulate
from fast import State, Transition, Integer
from fast import all_atoms
from math import pi, sqrt
def gaussian_formula(t, amp, sigma):
"""Return points on a gaussian with the given amplitude and dephasing."""
return amp*np.exp(-(t/sigma)**2)
def simple_formula(t, amp, gamma, sigma, Delta):
"""Return points on the modeled simple efficiency."""
return amp*np.exp(-gamma*t - (Delta*sigma*t)**2)
def hyperfine_formula(t, amp, gamma, sigma, Delta, omega87, omega97, omega107,
A, B, C, D, phib, phic, phid):
"""Return points on the modeled hyperfine efficiency."""
eta = amp*np.exp(-gamma*t - (Delta*sigma*t)**2)
eta = eta*abs(A +
B*np.exp(1j*omega87*t+1j*phib) +
C*np.exp(1j*omega97*t+1j*phic) +
D*np.exp(1j*omega107*t+1j*phid))**2
# eta = eta/abs(A+B*np.exp(1j*phib)+C*np.exp(1j*phic))**2
return eta
def get_model(gamma, sigma, Delta, omega87, omega97, omega107, fit_gamma=None):
r"""Get a model to fit."""
def f(t, amp, A, B, C, D, phib, phic, phid):
return hyperfine_formula(t, amp, gamma, sigma, Delta,
omega87, omega97, omega107,
A, B, C, D, phib, phic, phid)
def g(t, gamma):
return hyperfine_formula(t, amp, gamma, sigma, Delta,
omega87, omega97, A, B, C, phib, phic)
if fit_gamma is not None:
A, B, C, phib, phic = fit_gamma
return g
else:
return f
tdelay_exp = [5.1788101408450705e-09, 9.9592504225352128e-09,
1.5337245633802818e-08, 2.011768591549296e-08,
2.529649605633803e-08, 3.0276121126760566e-08,
3.5255746478873246e-08, 4.0434557746478886e-08,
4.52149971830986e-08, 5.0393808450704239e-08,
5.517424788732396e-08, 6.015387323943664e-08,
6.5315278873239461e-08, 7.0511492957746506e-08,
7.529193239436622e-08, 8.0072374647887346e-08,
8.5649554929577499e-08, 9.0629180281690165e-08,
9.5210433802816913e-08, 1.0019005915492959e-07,
1.0516968450704226e-07, 1.1054767887323945e-07,
1.1532812112676057e-07, 1.2050692957746481e-07,
1.2508818591549298e-07, 1.3026699718309861e-07,
1.3564499154929577e-07, 1.404254309859155e-07,
1.4580342535211266e-07, 1.5018549577464785e-07,
1.5516512112676056e-07, 1.6034393239436617e-07,
1.6532355774647887e-07, 1.7010399718309857e-07,
1.7528280845070421e-07, 1.8046161690140844e-07,
1.8544124225352112e-07, 1.904208676056338e-07,
1.9520130985915493e-07, 2.0018093521126761e-07]
eta_exp = [0.20500400458756521, 0.06670728759622839,
0.030760132618571991, 0.053227104479607261,
0.10964417892068903, 0.16855758121498934,
0.20500400458756521, 0.16056932219972492,
0.045738113859262283, 0.021773329752779624,
0.055224170998595869, 0.089673534912872471,
0.10989380958780416, 0.10614931427763159,
0.074196285277262727, 0.03675132511484798,
0.011788032461283361, 0.023770375089700164,
0.045238845464342668, 0.049232978502319565,
0.038748384573147235, 0.022771852421239813,
0.0092916551832402938, 0.0037997805067081221,
0.0042990418409383779, 0.0097909165174705493,
0.012287293795513616, 0.010290248458592754,
0.0047983031751684729, -0.0006936421082558077,
-0.0006936421082558077, -0.0016921647767161587,
-0.0006936421082558077, 0.00030488056020454339,
0.00030488056020454339, -0.00019438077402571241,
-0.00069364210825596827, -0.00069364210825596827,
-0.00019438077402571241, -0.00019438077402571241]
tdelay_hyp = np.linspace(5e-9, 200e-9, 40*5)
eta_hyp = [6.49292859e-02, 5.07382837e-02, 3.72369196e-02, 2.53027370e-02,
1.55529475e-02, 8.30006291e-03, 3.55003301e-03, 1.04036579e-03,
3.10074579e-04, 7.89166056e-04, 1.89333699e-03, 3.10977792e-03,
4.06238220e-03, 4.54870105e-03, 4.54593917e-03, 4.18832154e-03,
3.72243106e-03, 3.44999435e-03, 3.66867785e-03, 4.62069832e-03,
6.45670720e-03, 9.21901639e-03, 1.28443964e-02, 1.71832101e-02,
2.20290340e-02, 2.71516926e-02, 3.23268102e-02, 3.73565539e-02,
4.20787257e-02, 4.63642798e-02, 5.01060320e-02, 5.32033427e-02,
5.55483620e-02, 5.70190292e-02, 5.74823940e-02, 5.68093679e-02,
5.48992035e-02, 5.17094189e-02, 4.72850647e-02, 4.17805480e-02,
3.54679462e-02, 2.87276684e-02, 2.20202379e-02, 1.58413587e-02,
1.06656516e-02, 6.88691070e-03, 4.76401366e-03, 4.38138522e-03,
5.63111059e-03, 8.22079292e-03, 1.17073629e-02, 1.55530074e-02,
1.91958079e-02, 2.21251704e-02, 2.39511342e-02, 2.44573284e-02,
2.36296432e-02, 2.16562276e-02, 1.88986884e-02, 1.58386209e-02,
1.30073039e-02, 1.09087957e-02, 9.94758243e-03, 1.03710851e-02,
1.22350238e-02, 1.53961088e-02, 1.95324368e-02, 2.41879746e-02,
2.88341175e-02, 3.29391083e-02, 3.60353805e-02, 3.77756391e-02,
3.79705464e-02, 3.66039907e-02, 3.38252781e-02, 2.99210201e-02,
2.52720957e-02, 2.03028565e-02, 1.54301327e-02, 1.10189900e-02,
7.35039491e-03, 4.60370389e-03, 2.85424203e-03, 2.08401199e-03,
2.20173021e-03, 3.06754052e-03, 4.51771176e-03, 6.38538753e-03,
8.51492118e-03, 1.07689175e-02, 1.30287872e-02, 1.51908811e-02,
1.71609059e-02, 1.88494323e-02, 2.01706591e-02, 2.10454367e-02,
2.14086698e-02, 2.12194420e-02, 2.04712494e-02, 1.92008297e-02,
1.74913992e-02, 1.54699917e-02, 1.32963217e-02, 1.11477068e-02,
9.19750740e-03, 7.59242269e-03, 6.43395894e-03, 5.76328135e-03,
5.55555984e-03, 5.72353213e-03, 6.13091440e-03, 6.61324900e-03,
7.00367668e-03, 7.15935278e-03, 6.98495574e-03, 6.44883567e-03,
5.58976788e-03, 4.51253655e-03, 3.37400509e-03, 2.36019286e-03,
1.65921138e-03, 1.43186721e-03, 1.78838297e-03, 2.76969271e-03,
4.33970213e-03, 6.38838984e-03, 8.74345561e-03, 1.11922786e-02,
1.35081129e-02, 1.54779625e-02, 1.69265503e-02, 1.77354298e-02,
1.78554245e-02, 1.73068547e-02, 1.61731057e-02, 1.45864330e-02,
1.27081045e-02, 1.07073932e-02, 8.74127105e-03, 6.94005568e-03,
5.39375909e-03, 4.14869024e-03, 3.21185551e-03, 2.55423908e-03,
2.12288703e-03, 1.85408537e-03, 1.68291960e-03, 1.55274907e-03,
1.42288155e-03, 1.26964567e-03, 1.08549464e-03, 8.77514561e-04,
6.60852869e-04, 4.54475130e-04, 2.77375984e-04, 1.44878833e-04,
6.81641395e-05, 5.43469430e-05, 1.07840697e-04, 2.32490793e-04,
4.32669142e-04, 7.14327428e-04, 1.08354963e-03, 1.54523025e-03,
2.10027580e-03, 2.74191594e-03, 3.45408943e-03, 4.20891200e-03,
4.96746908e-03, 5.68289904e-03, 6.30260816e-03, 6.77549352e-03,
7.05760016e-03, 7.11694071e-03, 6.93964817e-03, 6.53236487e-03,
5.92167971e-03, 5.15285733e-03, 4.28457926e-03, 3.38223460e-03,
2.51099788e-03, 1.72888114e-03, 1.08178247e-03, 5.93276280e-04,
2.72028526e-04, 1.06638665e-04, 7.04134086e-05, 1.26402824e-04,
2.33284988e-04, 3.51424036e-04, 4.48180762e-04, 5.01998112e-04,
5.09333495e-04, 4.63761824e-04, 3.84893007e-04, 2.94669021e-04,
2.18240891e-04, 1.79421482e-04, 1.96753482e-04, 2.80866472e-04]
tdelay_exp = np.array(tdelay_exp)
eta_exp = np.array(eta_exp)
eta_hyp = np.array(eta_hyp)*eta_exp[0]/eta_hyp[0]
###############################################################################
# We calculate the numbers for the lifetime formulas.
Rb85 = all_atoms[0]
Cs133 = all_atoms[2]
mRb = Rb85.mass
mCs = Cs133.mass
kB = physical_constants["Boltzmann constant"][0]
e1rb85 = State("Rb", 85, 5, 0, 1/Integer(2))
e2rb85 = State("Rb", 85, 5, 1, 3/Integer(2))
e3rb85 = State("Rb", 85, 5, 2, 5/Integer(2))
e1cs133 = State("Cs", 133, 6, 0, 1/Integer(2))
e2cs133 = State("Cs", 133, 6, 1, 3/Integer(2))
e3cs133 = State("Cs", 133, 6, 2, 5/Integer(2))
t1rb85 = Transition(e2rb85, e1rb85)
t2rb85 = Transition(e3rb85, e2rb85)
k1rb85 = 2*pi/t1rb85.wavelength
k2rb85 = 2*pi/t2rb85.wavelength
kmrb85 = abs(k2rb85-k1rb85)
kmrb85 = 0.000001
t1cs133 = Transition(e2cs133, e1cs133)
t2cs133 = Transition(e3cs133, e2cs133)
k1cs133 = 2*pi/t1cs133.wavelength
k2cs133 = 2*pi/t2cs133.wavelength
kmcs133 = abs(k2cs133-k1cs133)
# We calculate the Doppler and spontaneous decay lifetimes
T = 273.15+90
sigmarb = sqrt(kB*T/mRb)
sigmacs = sqrt(kB*T/mCs)
gamma32cs = t2cs133.einsteinA
gamma32rb = t2rb85.einsteinA
taucs = (-gamma32cs+sqrt(4*(kmcs133*sigmacs)**2 +
gamma32cs**2))/2/(kmcs133*sigmacs)**2
taurb = (-gamma32rb+sqrt(4*(kmrb85*sigmarb)**2 +
gamma32rb**2))/2/(kmrb85*sigmarb)**2
print "A few properties of our alkalis:"
table = [["85Rb", "133Cs"],
["m", mRb, mCs],
["Deltak", kmrb85, kmcs133],
["sigma_v", sigmarb, sigmacs],
["gamma_32", gamma32rb/2/pi*1e-6, gamma32cs/2/pi*1e-6],
["tau", taurb*1e9, taucs*1e9]]
print tabulate(table, headers="firstrow")
###############################################################################
# We make continous plots for the formulas
tdelay_cont = np.linspace(0, tdelay_exp[-1]*1.05, 500)
eta_simple_cont = simple_formula(tdelay_cont, 1.0, gamma32rb,
sigmarb, kmrb85)*0.22
omega87 = 2*pi*28.8254e6
omega97 = omega87 + 2*pi*22.954e6
omega107 = omega97 + 2*pi*15.9384e6
# We fit the hyperfine formula to the experimental data.
f = get_model(gamma32rb, sigmarb, kmrb85, omega87, omega97, omega107)
p0 = [1.0, 1.0/3, 1.0/3, 1.0/3, 0.0, 0.0, 0.0, 0.0]
res = curve_fit(f, tdelay_exp, eta_exp, p0=p0)
p0 = res[0]
amp, A, B, C, D, phib, phic, phid = p0
amp01 = amp*abs(A + B*np.exp(1j*phib) + C*np.exp(1j*phic)+D*np.exp(1j*phid))**2
eta_exp_cont = f(tdelay_cont, *p0)/amp01
# We fit the hyperfine formula to the hyperfine model.
res = curve_fit(f, tdelay_hyp, eta_hyp, p0=p0)
p0 = res[0]
amp, A, B, C, D, phib, phic, phid = p0
print A, B, C, D
amp02 = amp*abs(A + B*np.exp(1j*phib) + C*np.exp(1j*phic)+D*np.exp(1j*phid))**2
eta_hyp_cont = f(tdelay_cont, *p0)/amp02
###############################################################################
# We find the 1/e lifetime with pumping.
tau_hfs = 0
eta_pump = eta_simple_cont/eta_simple_cont[0]
tau_fs = 0
for i in range(len(eta_pump)):
if eta_pump[i] < np.exp(-1.0):
tau_fs = tdelay_cont[i]
break
print "The 1/e lifetime with pumping is", tau_fs*1e9, "ns."
#######################################
# We find the 1/e lifetime without pumping.
tau_hfs = 0
eta_no_pump = eta_hyp_cont
for i in range(len(eta_pump)):
if eta_no_pump[i] < np.exp(-1.0):
tau_fs = tdelay_cont[i]
break
print "The 1/e lifetime without pumping is", tau_fs*1e9, "ns."
# We make a plot including the ab-initio formulas:
###############################################################################
plt.title(r"$^{87}\mathrm{Rb \ Memory \ Lifetime}$", fontsize=15)
plt.plot(tdelay_hyp*1e9, eta_hyp/amp02, "rx-",
label=r"$\mathrm{Hyperfine \ Theory}$", ms=5)
plt.plot(tdelay_cont*1e9, eta_hyp_cont, "b-",
label=r"$\eta_{\mathrm{hfs}} \ \mathrm{fit}$")
plt.plot(tdelay_cont*1e9, eta_simple_cont/eta_simple_cont[0], "g-",
label=r"$\mathrm{Simple \ Theory}$")
plt.xlim([0, tdelay_cont[-1]*1e9])
plt.ylim([0, 1.02])
plt.xlabel(r"$\tau \ \mathrm{[ns]}$", fontsize=15)
plt.ylabel(r"$\eta_\mathrm{N}$", fontsize=15)
plt.legend(loc=1, fontsize=12)
plt.savefig("doppler_dephasing_rb1.png", bbox_inches="tight")
plt.savefig("doppler_dephasing_rb1.pdf", bbox_inches="tight")
#############################################
# plt.plot(tdelay_exp*1e9, eta_exp/amp01, "x", color=(1, 0.5, 0),
# label=r"$\mathrm{Experiment}$", ms=5)
# plt.plot(tdelay_cont*1e9, eta_exp_cont, "-", color=(1, 0.5, 0),
# label=r"$\eta_{\mathrm{hfs}} \ \mathrm{fit}$")
# plt.legend(loc=1, fontsize=12)
plt.savefig("doppler_dephasing_rb2.png", bbox_inches="tight")
plt.savefig("doppler_dephasing_rb2.pdf", bbox_inches="tight")
plt.close("all")
#############################################
plt.title(r"$^{87}\mathrm{Rb \ Memory \ Lifetime}$", fontsize=15)
plt.plot(tdelay_cont*1e9, eta_hyp_cont, "b-",
label=r"$\mathrm{Theory \ (without \ pumping)}$")
plt.plot(tdelay_cont*1e9, eta_simple_cont/eta_simple_cont[0], "g-",
label=r"$\mathrm{Theory \ (with \ pumping)}$")
# plt.xlim([0, tdelay_cont[-1]*1e9])
plt.ylim([0, 1.02])
plt.xlabel(r"$\tau \ \mathrm{[ns]}$", fontsize=15)
plt.ylabel(r"$\eta_\mathrm{N}$", fontsize=15)
plt.legend(loc=1, fontsize=12)
plt.savefig("doppler_dephasing_rb3.png", bbox_inches="tight")
plt.savefig("doppler_dephasing_rb3.pdf", bbox_inches="tight")
plt.savefig("doppler_dephasing_rb3.svg", bbox_inches="tight")
# We save all the data.
###############################################################################
continous = np.asarray([tdelay_cont, eta_hyp_cont,
eta_simple_cont/eta_simple_cont[0]]).T
continous = pd.DataFrame(continous)
continous.to_csv("eta_cont_rb.csv",
header=["tdelay_cont", "etan_hyp", "etan_sim"])
maxwell_bloch = np.asarray([tdelay_hyp, eta_hyp/amp02]).T
maxwell_bloch = pd.DataFrame(maxwell_bloch)
maxwell_bloch.to_csv("eta_the_rb.csv",
header=["tdelay_exp", "etan_hyp"])
| gpl-3.0 |
doubaoatthu/UWRoutingSystem | server/dt.py | 1 | 2276 | from sklearn import tree
from sklearn.externals.six import StringIO
from IPython.display import Image
from sklearn.neighbors import KNeighborsClassifier
import os
import pydot
import numpy
import math
import random
preference = []
label = []
with open("../data/survey.txt") as f:
contents = f.readlines()
for content in contents:
content = content[1:-2]
content = content.replace("\"","")
labelarr = content.split(",")
labelarr = labelarr[1:]
intlabelarr = map(int,labelarr)
preference.append(intlabelarr)
with open("../data/path.txt") as f:
contents = f.readlines()
for content in contents:
fstring = content.split("feature")
if(len(fstring) > 1):
features = fstring[1]
features = features[3:-4]
featarr = features.split(",")
for i in range(0,len(featarr)):
if(featarr[i] == "true"):
featarr[i] = "1"
if(featarr[i] == "false"):
featarr[i] = "0"
label.append(map(int,map(float,featarr)))
for i in range(0,len(label)):
if(label[i][7] < 1500):
label[i][7] = 1
elif(label[i][7] < 2500):
label[i][7] = 2
else:
label[i][7] = 3
# print(preference)
# print(label)
# print(len(label))
x = numpy.array(label)
y = x.T
fname = ["sunny","cloudy","rainy/snowy","tired","coffee","bathroom","avoid crowd","curiousity","printer","campus event","hurry","fresh air","meet friend"]
def drawDecisionTree(classIndex):
clf = tree.DecisionTreeClassifier()
clf = clf.fit(preference,y[classIndex])
# dot_data = StringIO()
# # change it: class_names = cnames[classIndex]
# tree.export_graphviz(clf,out_file=dot_data,feature_names= fname,filled=True, rounded=True,special_characters=True)
# graph = pydot.graph_from_dot_data(dot_data.getvalue())
# filename = "decisionTree_" + str(classIndex) + ".pdf"
# graph.write_pdf(filename)
return clf
toparse = []
for i in range(12):
toparse.append(random.randint(1, 2))
trees = []
result = []
for i in range (1,9):
result.append(drawDecisionTree(i).predict(toparse)[0])
ix = 0
choice = 0
max = 1000
for l in label:
dis = 0
for i in range(0,len(l)-1):
if(l[i] - result[i] == 0):
dis = dis - 2
dis = dis + math.sqrt((l[i] - result[i])*(l[i] - result[i]))
if(dis < max):
max = dis
choice = ix
ix += 1
print(toparse)
print(choice)
| mit |
rachel3834/mulens_modeler | trunk/scripts/gen_mag_error_relation.py | 1 | 2082 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 20 00:28:47 2016
@author: robouser
"""
import mulens_class
from astropy.time import Time, TimeDelta
import numpy as np
from astropy import constants
import matplotlib.pyplot as plt
def generate_mag_err_relations():
"""Function to generate datafiles of the magnitude error relations"""
event = mulens_class.MicrolensingEvent()
event.u_min = 0.0
event.u_offset = 0.001
event.t_E = TimeDelta((1.0 * 24.0 * 3600.0),format='sec')
event.phi = ( 0.0 * np.pi ) / 180.0
event.rho = 0.001
event.M_L = constants.M_sun * 0.3
event.D_L = constants.pc * 3000.0
event.D_S = constants.pc * 8000.0
event.RA = '17:57:34.0'
event.Dec = '-29:13:15.0'
event.t_o = Time('2015-06-15T15:00:00', format='isot', scale='utc')
event.get_earth_perihelion()
exp_time = 200.0
output = open('/home/robouser/mag_err_relation.data','w')
output.write('# Column 1: magnitude\n')
output.write('# Column 2: magnitude uncertainty (1m telescope on Earth)\n')
output.write('# Column 3: magnitude uncertainty (Swift)\n')
mags = []
earth_merr = []
swift_merr = []
for mag in np.arange(12,18.0,0.01):
event.mag_base = mag
mags.append( mag )
earth_merr.append( event.sim_mag_error( exp_time, mag, precision_model='1m') )
swift_merr.append( event.sim_mag_error( exp_time, mag, precision_model='swift') )
output.write( str(mag) + ' ' + str(earth_merr[-1]) + ' ' + str(swift_merr[-1]) + '\n' )
output.close()
mags = np.array(mags)
earth_merr = np.array( earth_merr )
swift_merr = np.array( swift_merr )
fig = plt.figure(1,(12,12))
plt.plot( mags, earth_merr, 'r.', label='Earth 1m')
plt.plot( mags, swift_merr, 'b+', label='Swift')
plt.xlabel( 'Mag' )
plt.ylabel( 'Mag uncertainty' )
plt.yscale('log')
plt.legend(loc='best',frameon=False)
plt.grid()
plt.savefig('/home/robouser/mag_err_relation.png')
if __name__ == '__main__':
generate_mag_err_relations()
| gpl-2.0 |
joshbohde/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 2 | 7884 | import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_almost_equal, assert_array_almost_equal, \
assert_equal, assert_array_equal
from sklearn import datasets
from sklearn.metrics import mean_square_error
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
np.random.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
np.random.seed(0)
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
"""Ridge regression convergence test using score
TODO: for this test to be robust, we should use a dataset instead
of np.random.
"""
alpha = 1.0
# With more samples than features
n_samples, n_features = 6, 5
y = np.random.randn(n_samples)
X = np.random.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert ridge.score(X, y) > 0.5
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert ridge.score(X, y) > 0.5
# With more features than samples
n_samples, n_features = 5, 10
y = np.random.randn(n_samples)
X = np.random.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha)
ridge.fit(X, y)
assert ridge.score(X, y) > .9
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert ridge.score(X, y) > 0.9
def test_toy_ridge_object():
"""Test BayesianRegression ridge classifier
TODO: test also n_samples > n_features
"""
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y,Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
"""On alpha=0., Ridge and OLS yield the same solution."""
# we need more samples than features
n_samples, n_features = 5, 4
np.random.seed(0)
y = np.random.randn(n_samples)
X = np.random.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit (X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit (X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
K, v, Q = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(v, Q, y_diabetes, 1.0)
values, c = ridge_gcv._values(K, v, Q, y_diabetes, 1.0)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
best_alpha = ridge_gcv.best_alpha
ret.append(best_alpha)
# check that we get same best alpha with custom loss_func
ridge_gcv2 = _RidgeGCV(fit_intercept=False, loss_func=mean_square_error)
ridge_gcv2.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.best_alpha, best_alpha)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.best_alpha, best_alpha)
# simulate several responses
Y = np.vstack((y_diabetes,y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred,y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes,y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred,y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert np.mean(y_iris == y_pred) >= 0.8
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert np.mean(y_iris == y_pred) >= 0.8
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert score >= score2
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense != None and ret_sparse != None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
| bsd-3-clause |
quheng/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
tkarna/cofs | test/pressure_grad/test_int_pg_mes.py | 1 | 7608 | """
Unit tests for computing the internal pressure gradient
Runs MES convergence tests against a non-trivial analytical solution in a
deformed geometry.
P1DGxP2 space yields 1st order convergence. For second order convergence both
the scalar fields and its gradient must be in P2DGxP2 space.
"""
from thetis import *
from thetis.momentum_eq import InternalPressureGradientCalculator
from scipy import stats
import pytest
def compute_l2_error(refinement=1, quadratic=False, no_exports=True):
"""
Computes pressure gradient in a setting where bathymetry, mesh surface
elevation, and pressure are analytical, non-trivial functions.
"""
print_output(' ---- running refinement {:}'.format(refinement))
# create mesh
rho_0 = 1000.0
physical_constants['rho0'] = rho_0
delta_x = 120e3/refinement
lx = 360e3
ly = 360e3
nx = int(lx/delta_x)
ny = int(ly/delta_x)
mesh2d = RectangleMesh(nx, ny, lx, ly)
layers = 3*refinement
# bathymetry
P1_2d = get_functionspace(mesh2d, 'CG', 1)
bathymetry_2d = Function(P1_2d, name='Bathymetry')
xy = SpatialCoordinate(mesh2d)
depth = 3600.
bath_expr = 0.5*(depth + depth)*(1 - 0.6*tanh(4*(xy[1]-ly/2)/ly)*sin(1.5*xy[0]/ly+0.2))
bathymetry_2d.project(bath_expr)
mesh = extrude_mesh_sigma(mesh2d, layers, bathymetry_2d)
bnd_len = compute_boundary_length(mesh2d)
mesh2d.boundary_len = bnd_len
mesh.boundary_len = bnd_len
# make function spaces and fields
p1 = get_functionspace(mesh, 'CG', 1)
if quadratic:
# NOTE for 3rd order convergence both the scalar and grad must be p2
fs_pg = get_functionspace(mesh, 'DG', 2, 'CG', 2, vector=True, dim=2)
fs_scalar = get_functionspace(mesh, 'DG', 2, vfamily='CG', vdegree=2)
else:
# the default function spaces in Thetis
fs_pg = get_functionspace(mesh, 'DG', 1, 'CG', 2, vector=True, dim=2)
fs_scalar = get_functionspace(mesh, 'DG', 1, vfamily='CG', vdegree=2)
density_3d = Function(fs_scalar, name='density')
baroc_head_3d = Function(fs_scalar, name='baroclinic head')
int_pg_3d = Function(fs_pg, name='pressure gradient')
elev_3d = Function(p1, name='elevation')
bathymetry_3d = Function(p1, name='elevation')
ExpandFunctionTo3d(bathymetry_2d, bathymetry_3d).solve()
# analytic expressions
xyz = SpatialCoordinate(mesh)
elev_expr = 2000.0*sin(0.3 + 1.5*xyz[1]/ly)*cos(2*xyz[0]/lx)
density_expr = sin((xyz[1] - 0.3)/lx)*cos(2*xyz[2]/depth)*cos(2*xyz[0]/lx)
baroc_head_expr = -depth*sin((2*xyz[2] - 4000.0*sin((0.3*ly + 1.5*xyz[1])/ly)*cos(2*xyz[0]/lx))/depth)*sin((xyz[1] - 0.3)/lx)*cos(2*xyz[0]/lx)/2
baroc_head_expr_dx = (depth*sin((2*xyz[2] - 4000.0*sin((0.3*ly + 1.5*xyz[1])/ly)*cos(2*xyz[0]/lx))/depth) - 4000.0*sin((0.3*ly + 1.5*xyz[1])/ly)*cos((2*xyz[2] - 4000.0*sin((0.3*ly + 1.5*xyz[1])/ly)*cos(2*xyz[0]/lx))/depth)*cos(2*xyz[0]/lx))*sin(2*xyz[0]/lx)*sin((xyz[1] - 0.3)/lx)/lx
baroc_head_expr_dy = (-depth*ly*sin((2*xyz[2] - 4000.0*sin((0.3*ly + 1.5*xyz[1])/ly)*cos(2*xyz[0]/lx))/depth)*cos((xyz[1] - 0.3)/lx) + 6000.0*lx*sin((xyz[1] - 0.3)/lx)*cos((2*xyz[2] - 4000.0*sin((0.3*ly + 1.5*xyz[1])/ly)*cos(2*xyz[0]/lx))/depth)*cos(2*xyz[0]/lx)*cos((0.3*ly + 1.5*xyz[1])/ly))*cos(2*xyz[0]/lx)/(2*lx*ly)
# deform mesh by elevation
elev_3d.project(elev_expr)
z_ref = mesh.coordinates.dat.data[:, 2]
bath = bathymetry_3d.dat.data[:]
eta = elev_3d.dat.data[:]
new_z = eta*(z_ref + bath)/bath + z_ref
mesh.coordinates.dat.data[:, 2] = new_z
if not no_exports:
out_density = File('density.pvd')
out_bhead = File('baroc_head.pvd')
out_pg = File('int_pg.pvd')
# project initial scalar
density_3d.project(density_expr)
baroc_head_3d.project(baroc_head_expr)
# compute int_pg
fields = FieldDict()
fields.baroc_head_3d = baroc_head_3d
fields.int_pg_3d = int_pg_3d
fields.bathymetry_3d = bathymetry_3d
options = None
bnd_functions = {}
int_pg_solver = InternalPressureGradientCalculator(
fields,
options,
bnd_functions,
solver_parameters=None)
int_pg_solver.solve()
if not no_exports:
out_density.write(density_3d)
out_bhead.write(baroc_head_3d)
out_pg.write(int_pg_3d)
g_grav = physical_constants['g_grav']
ana_sol_expr = g_grav*as_vector((
baroc_head_expr_dx,
baroc_head_expr_dy,))
volume = comp_volume_3d(mesh)
l2_err = errornorm(ana_sol_expr, int_pg_3d, degree_rise=2)/np.sqrt(volume)
print_output('L2 error {:}'.format(l2_err))
if not no_exports:
out_density.write(density_3d)
out_bhead.write(baroc_head_3d)
out_pg.write(int_pg_3d.project(ana_sol_expr))
return l2_err
def run_convergence(ref_list, save_plot=False, **options):
"""Runs test for a list of refinements and computes error convergence rate"""
l2_err = []
for r in ref_list:
l2_err.append(compute_l2_error(r, **options))
x_log = np.log10(np.array(ref_list, dtype=float)**-1)
y_log = np.log10(np.array(l2_err))
setup_name = 'intpg'
quadratic = options.get('quadratic', False)
order = 2 if quadratic else 1
def check_convergence(x_log, y_log, expected_slope, field_str, save_plot):
slope_rtol = 0.2
slope, intercept, r_value, p_value, std_err = stats.linregress(x_log, y_log)
if save_plot:
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(5, 5))
# plot points
ax.plot(x_log, y_log, 'k.')
x_min = x_log.min()
x_max = x_log.max()
offset = 0.05*(x_max - x_min)
n = 50
xx = np.linspace(x_min - offset, x_max + offset, n)
yy = intercept + slope*xx
# plot line
ax.plot(xx, yy, linestyle='--', linewidth=0.5, color='k')
ax.text(xx[int(2*n/3)], yy[int(2*n/3)], '{:4.2f}'.format(slope),
verticalalignment='top',
horizontalalignment='left')
ax.set_xlabel('log10(dx)')
ax.set_ylabel('log10(L2 error)')
ax.set_title(field_str)
ref_str = 'ref-' + '-'.join([str(r) for r in ref_list])
order_str = 'o{:}'.format(order)
imgfile = '_'.join(['convergence', setup_name, field_str, ref_str, order_str])
imgfile += '.png'
img_dir = create_directory('plots')
imgfile = os.path.join(img_dir, imgfile)
print_output('saving figure {:}'.format(imgfile))
plt.savefig(imgfile, dpi=200, bbox_inches='tight')
if expected_slope is not None:
err_msg = '{:}: Wrong convergence rate {:.4f}, expected {:.4f}'.format(setup_name, slope, expected_slope)
assert abs(slope - expected_slope)/expected_slope < slope_rtol, err_msg
print_output('{:}: convergence rate {:.4f} PASSED'.format(setup_name, slope))
else:
print_output('{:}: {:} convergence rate {:.4f}'.format(setup_name, field_str, slope))
return slope
check_convergence(x_log, y_log, order, 'intpg', save_plot)
@pytest.mark.parametrize(('quadratic'), [True, False], ids=['quadratic', 'linear'])
def test_int_pg(quadratic):
run_convergence([1, 2, 3], quadratic=quadratic, save_plot=False, no_exports=True)
if __name__ == '__main__':
# compute_l2_error(refinement=3, quadratic=False, no_exports=False)
run_convergence([1, 2, 3, 4, 6, 8], quadratic=False, save_plot=True, no_exports=True)
| mit |
michaelpacer/scikit-image | doc/examples/plot_multiblock_local_binary_pattern.py | 22 | 2498 | """
===========================================================
Multi-Block Local Binary Pattern for texture classification
===========================================================
This example shows how to compute multi-block local binary pattern (MB-LBP)
features as well as how to visualize them.
The features are calculated similarly to local binary patterns (LBPs), except
that summed blocks are used instead of individual pixel values.
MB-LBP is an extension of LBP that can be computed on multiple scales in
constant time using the integral image. 9 equally-sized rectangles are used to
compute a feature. For each rectangle, the sum of the pixel intensities is
computed. Comparisons of these sums to that of the central rectangle determine
the feature, similarly to LBP (See `LBP <plot_local_binary_pattern.html>`_).
First, we generate an image to illustrate the functioning of MB-LBP: consider
a (9, 9) rectangle and divide it into (3, 3) block, upon which we then apply
MB-LBP.
"""
from __future__ import print_function
from skimage.feature import multiblock_lbp
import numpy as np
from numpy.testing import assert_equal
from skimage.transform import integral_image
# Create test matrix where first and fifth rectangles starting
# from top left clockwise have greater value than the central one.
test_img = np.zeros((9, 9), dtype='uint8')
test_img[3:6, 3:6] = 1
test_img[:3, :3] = 50
test_img[6:, 6:] = 50
# First and fifth bits should be filled. This correct value will
# be compared to the computed one.
correct_answer = 0b10001000
int_img = integral_image(test_img)
lbp_code = multiblock_lbp(int_img, 0, 0, 3, 3)
assert_equal(correct_answer, lbp_code)
"""
Now let's apply the operator to a real image and see how the
visualization works.
"""
from skimage import data
from matplotlib import pyplot as plt
from skimage.feature import draw_multiblock_lbp
test_img = data.coins()
int_img = integral_image(test_img)
lbp_code = multiblock_lbp(int_img, 0, 0, 90, 90)
img = draw_multiblock_lbp(test_img, 0, 0, 90, 90,
lbp_code=lbp_code, alpha=0.5)
plt.imshow(img, interpolation='nearest')
plt.show()
"""
.. image:: PLOT2RST.current_figure
On the above plot we see the result of computing a MB-LBP and visualization of
the computed feature. The rectangles that have less intensities' sum than the
central rectangle are marked in cyan. The ones that have higher intensity
values are marked in white. The central rectangle is left untouched.
"""
| bsd-3-clause |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/indexes/period/test_ops.py | 2 | 13093 | import numpy as np
import pytest
import pandas as pd
from pandas import DatetimeIndex, Index, NaT, PeriodIndex, Series
from pandas.core.arrays import PeriodArray
from pandas.tests.test_base import Ops
import pandas.util.testing as tm
class TestPeriodIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, PeriodIndex)
self.check_ops_properties(PeriodArray._field_ops, f)
self.check_ops_properties(PeriodArray._object_ops, f)
self.check_ops_properties(PeriodArray._bool_ops, f)
def test_resolution(self):
for freq, expected in zip(
["A", "Q", "M", "D", "H", "T", "S", "L", "U"],
[
"day",
"day",
"day",
"day",
"hour",
"minute",
"second",
"millisecond",
"microsecond",
],
):
idx = pd.period_range(start="2013-04-01", periods=30, freq=freq)
assert idx.resolution == expected
def test_value_counts_unique(self):
# GH 7735
idx = pd.period_range("2011-01-01 09:00", freq="H", periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = PeriodIndex(np.repeat(idx._values, range(1, len(idx) + 1)), freq="H")
exp_idx = PeriodIndex(
[
"2011-01-01 18:00",
"2011-01-01 17:00",
"2011-01-01 16:00",
"2011-01-01 15:00",
"2011-01-01 14:00",
"2011-01-01 13:00",
"2011-01-01 12:00",
"2011-01-01 11:00",
"2011-01-01 10:00",
"2011-01-01 09:00",
],
freq="H",
)
expected = Series(range(10, 0, -1), index=exp_idx, dtype="int64")
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.period_range("2011-01-01 09:00", freq="H", periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = PeriodIndex(
[
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 08:00",
"2013-01-01 08:00",
NaT,
],
freq="H",
)
exp_idx = PeriodIndex(["2013-01-01 09:00", "2013-01-01 08:00"], freq="H")
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = PeriodIndex(["2013-01-01 09:00", "2013-01-01 08:00", NaT], freq="H")
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.append(idx) # freq will not be reset
result = idx_dup.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep="last")
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep="last")
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_order_compat(self):
def _check_freq(index, expected_index):
if isinstance(index, PeriodIndex):
assert index.freq == expected_index.freq
pidx = PeriodIndex(["2011", "2012", "2013"], name="pidx", freq="A")
# for compatibility check
iidx = Index([2011, 2012, 2013], name="idx")
for idx in [pidx, iidx]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
tm.assert_index_equal(ordered, idx[::-1])
_check_freq(ordered, idx[::-1])
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]), check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
tm.assert_index_equal(ordered, idx[::-1])
tm.assert_numpy_array_equal(indexer, np.array([2, 1, 0]), check_dtype=False)
_check_freq(ordered, idx[::-1])
pidx = PeriodIndex(
["2011", "2013", "2015", "2012", "2011"], name="pidx", freq="A"
)
pexpected = PeriodIndex(
["2011", "2011", "2012", "2013", "2015"], name="pidx", freq="A"
)
# for compatibility check
iidx = Index([2011, 2013, 2015, 2012, 2011], name="idx")
iexpected = Index([2011, 2011, 2012, 2013, 2015], name="idx")
for idx, expected in [(pidx, pexpected), (iidx, iexpected)]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, expected)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
_check_freq(ordered, idx)
pidx = PeriodIndex(["2011", "2013", "NaT", "2011"], name="pidx", freq="D")
result = pidx.sort_values()
expected = PeriodIndex(["NaT", "2011", "2011", "2013"], name="pidx", freq="D")
tm.assert_index_equal(result, expected)
assert result.freq == "D"
result = pidx.sort_values(ascending=False)
expected = PeriodIndex(["2013", "2011", "2011", "NaT"], name="pidx", freq="D")
tm.assert_index_equal(result, expected)
assert result.freq == "D"
def test_order(self):
for freq in ["D", "2D", "4D"]:
idx = PeriodIndex(
["2011-01-01", "2011-01-02", "2011-01-03"], freq=freq, name="idx"
)
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq == freq
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]), check_dtype=False)
assert ordered.freq == idx.freq
assert ordered.freq == freq
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
tm.assert_numpy_array_equal(indexer, np.array([2, 1, 0]), check_dtype=False)
assert ordered.freq == expected.freq
assert ordered.freq == freq
idx1 = PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
freq="D",
name="idx1",
)
exp1 = PeriodIndex(
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
freq="D",
name="idx1",
)
idx2 = PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
freq="D",
name="idx2",
)
exp2 = PeriodIndex(
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
freq="D",
name="idx2",
)
idx3 = PeriodIndex(
[NaT, "2011-01-03", "2011-01-05", "2011-01-02", NaT], freq="D", name="idx3"
)
exp3 = PeriodIndex(
[NaT, NaT, "2011-01-02", "2011-01-03", "2011-01-05"], freq="D", name="idx3"
)
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq == "D"
ordered = idx.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq == "D"
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq == "D"
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq == "D"
def test_shift(self):
# This is tested in test_arithmetic
pass
def test_nat(self):
assert pd.PeriodIndex._na_value is NaT
assert pd.PeriodIndex([], freq="M")._na_value is NaT
idx = pd.PeriodIndex(["2011-01-01", "2011-01-02"], freq="D")
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
assert idx.hasnans is False
tm.assert_numpy_array_equal(idx._nan_idxs, np.array([], dtype=np.intp))
idx = pd.PeriodIndex(["2011-01-01", "NaT"], freq="D")
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
assert idx.hasnans is True
tm.assert_numpy_array_equal(idx._nan_idxs, np.array([1], dtype=np.intp))
@pytest.mark.parametrize("freq", ["D", "M"])
def test_equals(self, freq):
# GH#13107
idx = pd.PeriodIndex(["2011-01-01", "2011-01-02", "NaT"], freq=freq)
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert idx.astype(object).equals(idx)
assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = pd.PeriodIndex(["2011-01-01", "2011-01-02", "NaT"], freq="H")
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.astype(object))
assert not idx.astype(object).equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
# same internal, different tz
idx3 = pd.PeriodIndex._simple_new(
idx._values._simple_new(idx._values.asi8, freq="H")
)
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
assert not idx.equals(idx3)
assert not idx.equals(idx3.copy())
assert not idx.equals(idx3.astype(object))
assert not idx.astype(object).equals(idx3)
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
def test_freq_setter_deprecated(self):
# GH 20678
idx = pd.period_range("2018Q1", periods=4, freq="Q")
# no warning for getter
with tm.assert_produces_warning(None):
idx.freq
# warning for setter
with tm.assert_produces_warning(FutureWarning):
idx.freq = pd.offsets.Day()
| apache-2.0 |
joernhees/scikit-learn | sklearn/tests/test_metaestimators.py | 52 | 4990 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba',
'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
Caranarq/01_Dmine | 01_Agua/P0106/P0106.py | 1 | 2761 | # -*- coding: utf-8 -*-
"""
Started on wed, thu 17th, 2018
@author: carlos.arana
"""
# Librerias utilizadas
import pandas as pd
import sys
module_path = r'D:\PCCS\01_Dmine\Scripts'
if module_path not in sys.path:
sys.path.append(module_path)
from VarInt.VarInt import VarInt
from classes.Meta import Meta
from Compilador.Compilador import compilar
"""
Las librerias locales utilizadas renglones arriba se encuentran disponibles en las siguientes direcciones:
SCRIPT: | DISPONIBLE EN:
------ | ------------------------------------------------------------------------------------
VarInt | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/VarInt
Meta | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/Classes
Compilador | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/Compilador
"""
# Documentacion del Parametro ---------------------------------------------------------------------------------------
# Descripciones del Parametro
M = Meta
M.ClaveParametro = 'P0106'
M.NombreParametro = 'Demanda Bioquimica de Oxigeno'
M.DescParam = 'Demanda Bioquimica de Oxigeno medida a 5 días'
M.UnidadesParam = 'mg/l'
M.TituloParametro = 'DBO5' # Para nombrar la columna del parametro
M.PeriodoParam = '2017'
M.TipoInt = 1 # 1: Binaria; 2: Multivariable, 3: Integral
# Handlings
M.ParDtype = 'float'
M.TipoVar = 'C' # (Tipos de Variable: [C]ontinua, [D]iscreta [O]rdinal, [B]inaria o [N]ominal)
M.array = []
M.TipoAgr = 'mean'
# Descripciones del proceso de Minería
M.nomarchivodataset = 'P0106'
M.extarchivodataset = 'xlsx'
M.ContenidoHojaDatos = 'Estaciones de monitoreo y datos de Demanda Bioquimica de Oxigeno a 5 días'
M.ClaveDataset = 'CONAGUA'
M.ActDatos = '2017'
M.Agregacion = 'Se prometió el valor de DBO para las estaciones de monitorieo que existen en los municipios ' \
'que componen cada ciudad del SUN'
# Descripciones generadas desde la clave del parámetro
M.getmetafromds = 1
Meta.fillmeta(M)
# Construccion del Parámetro -----------------------------------------------------------------------------------------
# Cargar dataset inicial
dataset = pd.read_excel(M.DirFuente + '\\' + M.ArchivoDataset,
sheetname='DATOS', dtype={'CVE_MUN': 'str'})
dataset.set_index('CVE_MUN', inplace=True)
dataset = dataset.rename_axis('CVE_MUN')
dataset.head(2)
list(dataset)
# Generar dataset para parámetro y Variable de Integridad
var1 = 'dbo'
par_dataset = dataset[var1]
par_dataset = par_dataset.to_frame(name = M.ClaveParametro)
par_dataset, variables_dataset = VarInt(par_dataset, dataset, tipo=M.TipoInt)
# Compilacion
compilar(M, dataset, par_dataset, variables_dataset)
| gpl-3.0 |
toobaz/pandas | pandas/tests/series/test_analytics.py | 1 | 56686 | from itertools import product
import operator
import numpy as np
from numpy import nan
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Series,
date_range,
isna,
notna,
)
from pandas.api.types import is_scalar
from pandas.core.index import MultiIndex
from pandas.core.indexes.datetimes import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal,
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
class TestSeriesAnalytics:
def test_describe(self):
s = Series([0, 1, 2, 3, 4], name="int_data")
result = s.describe()
expected = Series(
[5, 2, s.std(), 0, 1, 2, 3, 4],
name="int_data",
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
tm.assert_series_equal(result, expected)
s = Series([True, True, False, False, False], name="bool_data")
result = s.describe()
expected = Series(
[5, 2, False, 3], name="bool_data", index=["count", "unique", "top", "freq"]
)
tm.assert_series_equal(result, expected)
s = Series(["a", "a", "b", "c", "d"], name="str_data")
result = s.describe()
expected = Series(
[5, 4, "a", 2], name="str_data", index=["count", "unique", "top", "freq"]
)
tm.assert_series_equal(result, expected)
def test_describe_empty_object(self):
# https://github.com/pandas-dev/pandas/issues/27183
s = pd.Series([None, None], dtype=object)
result = s.describe()
expected = pd.Series(
[0, 0, np.nan, np.nan],
dtype=object,
index=["count", "unique", "top", "freq"],
)
tm.assert_series_equal(result, expected)
result = s[:0].describe()
tm.assert_series_equal(result, expected)
# ensure NaN, not None
assert np.isnan(result.iloc[2])
assert np.isnan(result.iloc[3])
def test_describe_with_tz(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
name = str(tz_naive_fixture)
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s = Series(date_range(start, end, tz=tz), name=name)
result = s.describe()
expected = Series(
[
5,
5,
s.value_counts().index[0],
1,
start.tz_localize(tz),
end.tz_localize(tz),
],
name=name,
index=["count", "unique", "top", "freq", "first", "last"],
)
tm.assert_series_equal(result, expected)
def test_argsort(self, datetime_series):
self._check_accum_op("argsort", datetime_series, check_dtype=False)
argsorted = datetime_series.argsort()
assert issubclass(argsorted.dtype.type, np.integer)
# GH 2967 (introduced bug in 0.11-dev I think)
s = Series([Timestamp("201301{i:02d}".format(i=i)) for i in range(1, 6)])
assert s.dtype == "datetime64[ns]"
shifted = s.shift(-1)
assert shifted.dtype == "datetime64[ns]"
assert isna(shifted[4])
result = s.argsort()
expected = Series(range(5), dtype="int64")
assert_series_equal(result, expected)
result = shifted.argsort()
expected = Series(list(range(4)) + [-1], dtype="int64")
assert_series_equal(result, expected)
def test_argsort_stable(self):
s = Series(np.random.randint(0, 100, size=10000))
mindexer = s.argsort(kind="mergesort")
qindexer = s.argsort()
mexpected = np.argsort(s.values, kind="mergesort")
qexpected = np.argsort(s.values, kind="quicksort")
tm.assert_series_equal(mindexer, Series(mexpected), check_dtype=False)
tm.assert_series_equal(qindexer, Series(qexpected), check_dtype=False)
msg = (
r"ndarray Expected type <class 'numpy\.ndarray'>,"
r" found <class 'pandas\.core\.series\.Series'> instead"
)
with pytest.raises(AssertionError, match=msg):
tm.assert_numpy_array_equal(qindexer, mindexer)
def test_cumsum(self, datetime_series):
self._check_accum_op("cumsum", datetime_series)
def test_cumprod(self, datetime_series):
self._check_accum_op("cumprod", datetime_series)
def test_cummin(self, datetime_series):
tm.assert_numpy_array_equal(
datetime_series.cummin().values,
np.minimum.accumulate(np.array(datetime_series)),
)
ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.cummin()[1::2]
expected = np.minimum.accumulate(ts.dropna())
tm.assert_series_equal(result, expected)
def test_cummax(self, datetime_series):
tm.assert_numpy_array_equal(
datetime_series.cummax().values,
np.maximum.accumulate(np.array(datetime_series)),
)
ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.cummax()[1::2]
expected = np.maximum.accumulate(ts.dropna())
tm.assert_series_equal(result, expected)
def test_cummin_datetime64(self):
s = pd.Series(
pd.to_datetime(["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-3"])
)
expected = pd.Series(
pd.to_datetime(["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-1"])
)
result = s.cummin(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "2000-1-2", "2000-1-1", "2000-1-1", "2000-1-1"]
)
)
result = s.cummin(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummax_datetime64(self):
s = pd.Series(
pd.to_datetime(["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-3"])
)
expected = pd.Series(
pd.to_datetime(["NaT", "2000-1-2", "NaT", "2000-1-2", "NaT", "2000-1-3"])
)
result = s.cummax(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "2000-1-2", "2000-1-2", "2000-1-2", "2000-1-3"]
)
)
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummin_timedelta64(self):
s = pd.Series(pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "3 min"]))
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "1 min"])
)
result = s.cummin(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "2 min", "1 min", "1 min", "1 min"])
)
result = s.cummin(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummax_timedelta64(self):
s = pd.Series(pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "3 min"]))
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "NaT", "2 min", "NaT", "3 min"])
)
result = s.cummax(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "2 min", "2 min", "2 min", "3 min"])
)
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
def test_npdiff(self):
pytest.skip("skipping due to Series no longer being an ndarray")
# no longer works as the return type of np.diff is now nd.array
s = Series(np.arange(5))
r = np.diff(s)
assert_series_equal(Series([nan, 0, 0, 0, nan]), r)
def _check_accum_op(self, name, datetime_series_, check_dtype=True):
func = getattr(np, name)
tm.assert_numpy_array_equal(
func(datetime_series_).values,
func(np.array(datetime_series_)),
check_dtype=check_dtype,
)
# with missing values
ts = datetime_series_.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.dropna()))
tm.assert_numpy_array_equal(result.values, expected, check_dtype=False)
def test_compress(self):
cond = [True, False, True, False, False]
s = Series([1, -1, 5, 8, 7], index=list("abcde"), name="foo")
expected = Series(s.values.compress(cond), index=list("ac"), name="foo")
with tm.assert_produces_warning(FutureWarning):
result = s.compress(cond)
tm.assert_series_equal(result, expected)
def test_numpy_compress(self):
cond = [True, False, True, False, False]
s = Series([1, -1, 5, 8, 7], index=list("abcde"), name="foo")
expected = Series(s.values.compress(cond), index=list("ac"), name="foo")
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
tm.assert_series_equal(np.compress(cond, s), expected)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.compress(cond, s, axis=1)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.compress(cond, s, out=s)
def test_round(self, datetime_series):
datetime_series.index.name = "index_name"
result = datetime_series.round(2)
expected = Series(
np.round(datetime_series.values, 2), index=datetime_series.index, name="ts"
)
assert_series_equal(result, expected)
assert result.name == datetime_series.name
def test_numpy_round(self):
# See gh-12600
s = Series([1.53, 1.36, 0.06])
out = np.round(s, decimals=0)
expected = Series([2.0, 1.0, 0.0])
assert_series_equal(out, expected)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.round(s, decimals=0, out=s)
def test_numpy_round_nan(self):
# See gh-14197
s = Series([1.53, np.nan, 0.06])
with tm.assert_produces_warning(None):
result = s.round()
expected = Series([2.0, np.nan, 0.0])
assert_series_equal(result, expected)
def test_built_in_round(self):
s = Series([1.123, 2.123, 3.123], index=range(3))
result = round(s)
expected_rounded0 = Series([1.0, 2.0, 3.0], index=range(3))
tm.assert_series_equal(result, expected_rounded0)
decimals = 2
expected_rounded = Series([1.12, 2.12, 3.12], index=range(3))
result = round(s, decimals)
tm.assert_series_equal(result, expected_rounded)
def test_prod_numpy16_bug(self):
s = Series([1.0, 1.0, 1.0], index=range(3))
result = s.prod()
assert not isinstance(result, Series)
@td.skip_if_no_scipy
def test_corr(self, datetime_series):
import scipy.stats as stats
# full overlap
tm.assert_almost_equal(datetime_series.corr(datetime_series), 1)
# partial overlap
tm.assert_almost_equal(datetime_series[:15].corr(datetime_series[5:]), 1)
assert isna(datetime_series[:15].corr(datetime_series[5:], min_periods=12))
ts1 = datetime_series[:15].reindex(datetime_series.index)
ts2 = datetime_series[5:].reindex(datetime_series.index)
assert isna(ts1.corr(ts2, min_periods=12))
# No overlap
assert np.isnan(datetime_series[::2].corr(datetime_series[1::2]))
# all NA
cp = datetime_series[:10].copy()
cp[:] = np.nan
assert isna(cp.corr(cp))
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
result = A.corr(B)
expected, _ = stats.pearsonr(A, B)
tm.assert_almost_equal(result, expected)
@td.skip_if_no_scipy
def test_corr_rank(self):
import scipy.stats as stats
# kendall and spearman
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
A[-5:] = A[:5]
result = A.corr(B, method="kendall")
expected = stats.kendalltau(A, B)[0]
tm.assert_almost_equal(result, expected)
result = A.corr(B, method="spearman")
expected = stats.spearmanr(A, B)[0]
tm.assert_almost_equal(result, expected)
# results from R
A = Series(
[
-0.89926396,
0.94209606,
-1.03289164,
-0.95445587,
0.76910310,
-0.06430576,
-2.09704447,
0.40660407,
-0.89926396,
0.94209606,
]
)
B = Series(
[
-1.01270225,
-0.62210117,
-1.56895827,
0.59592943,
-0.01680292,
1.17258718,
-1.06009347,
-0.10222060,
-0.89076239,
0.89372375,
]
)
kexp = 0.4319297
sexp = 0.5853767
tm.assert_almost_equal(A.corr(B, method="kendall"), kexp)
tm.assert_almost_equal(A.corr(B, method="spearman"), sexp)
def test_corr_invalid_method(self):
# GH PR #22298
s1 = pd.Series(np.random.randn(10))
s2 = pd.Series(np.random.randn(10))
msg = "method must be either 'pearson', 'spearman', 'kendall', or a callable, "
with pytest.raises(ValueError, match=msg):
s1.corr(s2, method="____")
def test_corr_callable_method(self, datetime_series):
# simple correlation example
# returns 1 if exact equality, 0 otherwise
my_corr = lambda a, b: 1.0 if (a == b).all() else 0.0
# simple example
s1 = Series([1, 2, 3, 4, 5])
s2 = Series([5, 4, 3, 2, 1])
expected = 0
tm.assert_almost_equal(s1.corr(s2, method=my_corr), expected)
# full overlap
tm.assert_almost_equal(
datetime_series.corr(datetime_series, method=my_corr), 1.0
)
# partial overlap
tm.assert_almost_equal(
datetime_series[:15].corr(datetime_series[5:], method=my_corr), 1.0
)
# No overlap
assert np.isnan(
datetime_series[::2].corr(datetime_series[1::2], method=my_corr)
)
# dataframe example
df = pd.DataFrame([s1, s2])
expected = pd.DataFrame([{0: 1.0, 1: 0}, {0: 0, 1: 1.0}])
tm.assert_almost_equal(df.transpose().corr(method=my_corr), expected)
def test_cov(self, datetime_series):
# full overlap
tm.assert_almost_equal(
datetime_series.cov(datetime_series), datetime_series.std() ** 2
)
# partial overlap
tm.assert_almost_equal(
datetime_series[:15].cov(datetime_series[5:]),
datetime_series[5:15].std() ** 2,
)
# No overlap
assert np.isnan(datetime_series[::2].cov(datetime_series[1::2]))
# all NA
cp = datetime_series[:10].copy()
cp[:] = np.nan
assert isna(cp.cov(cp))
# min_periods
assert isna(datetime_series[:15].cov(datetime_series[5:], min_periods=12))
ts1 = datetime_series[:15].reindex(datetime_series.index)
ts2 = datetime_series[5:].reindex(datetime_series.index)
assert isna(ts1.cov(ts2, min_periods=12))
def test_count(self, datetime_series):
assert datetime_series.count() == len(datetime_series)
datetime_series[::2] = np.NaN
assert datetime_series.count() == np.isfinite(datetime_series).sum()
mi = MultiIndex.from_arrays([list("aabbcc"), [1, 2, 2, nan, 1, 2]])
ts = Series(np.arange(len(mi)), index=mi)
left = ts.count(level=1)
right = Series([2, 3, 1], index=[1, 2, nan])
assert_series_equal(left, right)
ts.iloc[[0, 3, 5]] = nan
assert_series_equal(ts.count(level=1), right - 1)
def test_dot(self):
a = Series(np.random.randn(4), index=["p", "q", "r", "s"])
b = DataFrame(
np.random.randn(3, 4), index=["1", "2", "3"], columns=["p", "q", "r", "s"]
).T
result = a.dot(b)
expected = Series(np.dot(a.values, b.values), index=["1", "2", "3"])
assert_series_equal(result, expected)
# Check index alignment
b2 = b.reindex(index=reversed(b.index))
result = a.dot(b)
assert_series_equal(result, expected)
# Check ndarray argument
result = a.dot(b.values)
assert np.all(result == expected.values)
assert_almost_equal(a.dot(b["2"].values), expected["2"])
# Check series argument
assert_almost_equal(a.dot(b["1"]), expected["1"])
assert_almost_equal(a.dot(b2["1"]), expected["1"])
msg = r"Dot product shape mismatch, \(4,\) vs \(3,\)"
# exception raised is of type Exception
with pytest.raises(Exception, match=msg):
a.dot(a.values[:3])
msg = "matrices are not aligned"
with pytest.raises(ValueError, match=msg):
a.dot(b.T)
def test_matmul(self):
# matmul test is for GH #10259
a = Series(np.random.randn(4), index=["p", "q", "r", "s"])
b = DataFrame(
np.random.randn(3, 4), index=["1", "2", "3"], columns=["p", "q", "r", "s"]
).T
# Series @ DataFrame -> Series
result = operator.matmul(a, b)
expected = Series(np.dot(a.values, b.values), index=["1", "2", "3"])
assert_series_equal(result, expected)
# DataFrame @ Series -> Series
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"])
assert_series_equal(result, expected)
# Series @ Series -> scalar
result = operator.matmul(a, a)
expected = np.dot(a.values, a.values)
assert_almost_equal(result, expected)
# GH 21530
# vector (1D np.array) @ Series (__rmatmul__)
result = operator.matmul(a.values, a)
expected = np.dot(a.values, a.values)
assert_almost_equal(result, expected)
# GH 21530
# vector (1D list) @ Series (__rmatmul__)
result = operator.matmul(a.values.tolist(), a)
expected = np.dot(a.values, a.values)
assert_almost_equal(result, expected)
# GH 21530
# matrix (2D np.array) @ Series (__rmatmul__)
result = operator.matmul(b.T.values, a)
expected = np.dot(b.T.values, a.values)
assert_almost_equal(result, expected)
# GH 21530
# matrix (2D nested lists) @ Series (__rmatmul__)
result = operator.matmul(b.T.values.tolist(), a)
expected = np.dot(b.T.values, a.values)
assert_almost_equal(result, expected)
# mixed dtype DataFrame @ Series
a["p"] = int(a.p)
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"])
assert_series_equal(result, expected)
# different dtypes DataFrame @ Series
a = a.astype(int)
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"])
assert_series_equal(result, expected)
msg = r"Dot product shape mismatch, \(4,\) vs \(3,\)"
# exception raised is of type Exception
with pytest.raises(Exception, match=msg):
a.dot(a.values[:3])
msg = "matrices are not aligned"
with pytest.raises(ValueError, match=msg):
a.dot(b.T)
def test_clip(self, datetime_series):
val = datetime_series.median()
with tm.assert_produces_warning(FutureWarning):
assert datetime_series.clip_lower(val).min() == val
with tm.assert_produces_warning(FutureWarning):
assert datetime_series.clip_upper(val).max() == val
assert datetime_series.clip(lower=val).min() == val
assert datetime_series.clip(upper=val).max() == val
result = datetime_series.clip(-0.5, 0.5)
expected = np.clip(datetime_series, -0.5, 0.5)
assert_series_equal(result, expected)
assert isinstance(expected, Series)
def test_clip_types_and_nulls(self):
sers = [
Series([np.nan, 1.0, 2.0, 3.0]),
Series([None, "a", "b", "c"]),
Series(pd.to_datetime([np.nan, 1, 2, 3], unit="D")),
]
for s in sers:
thresh = s[2]
with tm.assert_produces_warning(FutureWarning):
lower = s.clip_lower(thresh)
with tm.assert_produces_warning(FutureWarning):
upper = s.clip_upper(thresh)
assert lower[notna(lower)].min() == thresh
assert upper[notna(upper)].max() == thresh
assert list(isna(s)) == list(isna(lower))
assert list(isna(s)) == list(isna(upper))
def test_clip_with_na_args(self):
"""Should process np.nan argument as None """
# GH # 17276
s = Series([1, 2, 3])
assert_series_equal(s.clip(np.nan), Series([1, 2, 3]))
assert_series_equal(s.clip(upper=np.nan, lower=np.nan), Series([1, 2, 3]))
# GH #19992
assert_series_equal(s.clip(lower=[0, 4, np.nan]), Series([1, 4, np.nan]))
assert_series_equal(s.clip(upper=[1, np.nan, 1]), Series([1, np.nan, 1]))
def test_clip_against_series(self):
# GH #6966
s = Series([1.0, 1.0, 4.0])
threshold = Series([1.0, 2.0, 3.0])
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(s.clip_lower(threshold), Series([1.0, 2.0, 4.0]))
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(s.clip_upper(threshold), Series([1.0, 1.0, 3.0]))
lower = Series([1.0, 2.0, 3.0])
upper = Series([1.5, 2.5, 3.5])
assert_series_equal(s.clip(lower, upper), Series([1.0, 2.0, 3.5]))
assert_series_equal(s.clip(1.5, upper), Series([1.5, 1.5, 3.5]))
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("upper", [[1, 2, 3], np.asarray([1, 2, 3])])
def test_clip_against_list_like(self, inplace, upper):
# GH #15390
original = pd.Series([5, 6, 7])
result = original.clip(upper=upper, inplace=inplace)
expected = pd.Series([1, 2, 3])
if inplace:
result = original
tm.assert_series_equal(result, expected, check_exact=True)
def test_clip_with_datetimes(self):
# GH 11838
# naive and tz-aware datetimes
t = Timestamp("2015-12-01 09:30:30")
s = Series([Timestamp("2015-12-01 09:30:00"), Timestamp("2015-12-01 09:31:00")])
result = s.clip(upper=t)
expected = Series(
[Timestamp("2015-12-01 09:30:00"), Timestamp("2015-12-01 09:30:30")]
)
assert_series_equal(result, expected)
t = Timestamp("2015-12-01 09:30:30", tz="US/Eastern")
s = Series(
[
Timestamp("2015-12-01 09:30:00", tz="US/Eastern"),
Timestamp("2015-12-01 09:31:00", tz="US/Eastern"),
]
)
result = s.clip(upper=t)
expected = Series(
[
Timestamp("2015-12-01 09:30:00", tz="US/Eastern"),
Timestamp("2015-12-01 09:30:30", tz="US/Eastern"),
]
)
assert_series_equal(result, expected)
def test_cummethods_bool(self):
# GH 6270
a = pd.Series([False, False, False, True, True, False, False])
b = ~a
c = pd.Series([False] * len(b))
d = ~c
methods = {
"cumsum": np.cumsum,
"cumprod": np.cumprod,
"cummin": np.minimum.accumulate,
"cummax": np.maximum.accumulate,
}
args = product((a, b, c, d), methods)
for s, method in args:
expected = Series(methods[method](s.values))
result = getattr(s, method)()
assert_series_equal(result, expected)
e = pd.Series([False, True, nan, False])
cse = pd.Series([0, 1, nan, 1], dtype=object)
cpe = pd.Series([False, 0, nan, 0])
cmin = pd.Series([False, False, nan, False])
cmax = pd.Series([False, True, nan, True])
expecteds = {"cumsum": cse, "cumprod": cpe, "cummin": cmin, "cummax": cmax}
for method in methods:
res = getattr(e, method)()
assert_series_equal(res, expecteds[method])
def test_isin(self):
s = Series(["A", "B", "C", "a", "B", "B", "A", "C"])
result = s.isin(["A", "C"])
expected = Series([True, False, True, False, False, False, True, True])
assert_series_equal(result, expected)
# GH: 16012
# This specific issue has to have a series over 1e6 in len, but the
# comparison array (in_list) must be large enough so that numpy doesn't
# do a manual masking trick that will avoid this issue altogether
s = Series(list("abcdefghijk" * 10 ** 5))
# If numpy doesn't do the manual comparison/mask, these
# unorderable mixed types are what cause the exception in numpy
in_list = [-1, "a", "b", "G", "Y", "Z", "E", "K", "E", "S", "I", "R", "R"] * 6
assert s.isin(in_list).sum() == 200000
def test_isin_with_string_scalar(self):
# GH4763
s = Series(["A", "B", "C", "a", "B", "B", "A", "C"])
msg = (
r"only list-like objects are allowed to be passed to isin\(\),"
r" you passed a \[str\]"
)
with pytest.raises(TypeError, match=msg):
s.isin("a")
s = Series(["aaa", "b", "c"])
with pytest.raises(TypeError, match=msg):
s.isin("aaa")
def test_isin_with_i8(self):
# GH 5021
expected = Series([True, True, False, False, False])
expected2 = Series([False, True, False, False, False])
# datetime64[ns]
s = Series(date_range("jan-01-2013", "jan-05-2013"))
result = s.isin(s[0:2])
assert_series_equal(result, expected)
result = s.isin(s[0:2].values)
assert_series_equal(result, expected)
# fails on dtype conversion in the first place
result = s.isin(s[0:2].values.astype("datetime64[D]"))
assert_series_equal(result, expected)
result = s.isin([s[1]])
assert_series_equal(result, expected2)
result = s.isin([np.datetime64(s[1])])
assert_series_equal(result, expected2)
result = s.isin(set(s[0:2]))
assert_series_equal(result, expected)
# timedelta64[ns]
s = Series(pd.to_timedelta(range(5), unit="d"))
result = s.isin(s[0:2])
assert_series_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
# see gh-16991
s = Series(["a", "b"])
expected = Series([False, False])
result = s.isin(empty)
tm.assert_series_equal(expected, result)
def test_ptp(self):
# GH21614
N = 1000
arr = np.random.randn(N)
ser = Series(arr)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert np.ptp(ser) == np.ptp(arr)
# GH11163
s = Series([3, 5, np.nan, -3, 10])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert s.ptp() == 13
assert pd.isna(s.ptp(skipna=False))
mi = pd.MultiIndex.from_product([["a", "b"], [1, 2, 3]])
s = pd.Series([1, np.nan, 7, 3, 5, np.nan], index=mi)
expected = pd.Series([6, 2], index=["a", "b"], dtype=np.float64)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
tm.assert_series_equal(s.ptp(level=0), expected)
expected = pd.Series([np.nan, np.nan], index=["a", "b"])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
tm.assert_series_equal(s.ptp(level=0, skipna=False), expected)
msg = "No axis named 1 for object type <class 'pandas.core.series.Series'>"
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s.ptp(axis=1)
s = pd.Series(["a", "b", "c", "d", "e"])
msg = r"unsupported operand type\(s\) for -: 'str' and 'str'"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s.ptp()
msg = r"Series\.ptp does not implement numeric_only\."
with pytest.raises(NotImplementedError, match=msg):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s.ptp(numeric_only=True)
def test_repeat(self):
s = Series(np.random.randn(3), index=["a", "b", "c"])
reps = s.repeat(5)
exp = Series(s.values.repeat(5), index=s.index.values.repeat(5))
assert_series_equal(reps, exp)
to_rep = [2, 3, 4]
reps = s.repeat(to_rep)
exp = Series(s.values.repeat(to_rep), index=s.index.values.repeat(to_rep))
assert_series_equal(reps, exp)
def test_numpy_repeat(self):
s = Series(np.arange(3), name="x")
expected = Series(s.values.repeat(2), name="x", index=s.index.values.repeat(2))
assert_series_equal(np.repeat(s, 2), expected)
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.repeat(s, 2, axis=0)
def test_searchsorted(self):
s = Series([1, 2, 3])
result = s.searchsorted(1, side="left")
assert is_scalar(result)
assert result == 0
result = s.searchsorted(1, side="right")
assert is_scalar(result)
assert result == 1
def test_searchsorted_numeric_dtypes_scalar(self):
s = Series([1, 2, 90, 1000, 3e9])
r = s.searchsorted(30)
assert is_scalar(r)
assert r == 2
r = s.searchsorted([30])
e = np.array([2], dtype=np.intp)
tm.assert_numpy_array_equal(r, e)
def test_searchsorted_numeric_dtypes_vector(self):
s = Series([1, 2, 90, 1000, 3e9])
r = s.searchsorted([91, 2e6])
e = np.array([3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(r, e)
def test_search_sorted_datetime64_scalar(self):
s = Series(pd.date_range("20120101", periods=10, freq="2D"))
v = pd.Timestamp("20120102")
r = s.searchsorted(v)
assert is_scalar(r)
assert r == 1
def test_search_sorted_datetime64_list(self):
s = Series(pd.date_range("20120101", periods=10, freq="2D"))
v = [pd.Timestamp("20120102"), pd.Timestamp("20120104")]
r = s.searchsorted(v)
e = np.array([1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(r, e)
def test_searchsorted_sorter(self):
# GH8490
s = Series([3, 1, 2])
r = s.searchsorted([0, 3], sorter=np.argsort(s))
e = np.array([0, 2], dtype=np.intp)
tm.assert_numpy_array_equal(r, e)
def test_is_monotonic(self):
s = Series(np.random.randint(0, 10, size=1000))
assert not s.is_monotonic
s = Series(np.arange(1000))
assert s.is_monotonic is True
assert s.is_monotonic_increasing is True
s = Series(np.arange(1000, 0, -1))
assert s.is_monotonic_decreasing is True
s = Series(pd.date_range("20130101", periods=10))
assert s.is_monotonic is True
assert s.is_monotonic_increasing is True
s = Series(list(reversed(s.tolist())))
assert s.is_monotonic is False
assert s.is_monotonic_decreasing is True
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
res = s.sort_index(level="A")
assert_series_equal(backwards, res)
res = s.sort_index(level=["A", "B"])
assert_series_equal(backwards, res)
res = s.sort_index(level="A", sort_remaining=False)
assert_series_equal(s, res)
res = s.sort_index(level=["A", "B"], sort_remaining=False)
assert_series_equal(s, res)
def test_apply_categorical(self):
values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True)
s = pd.Series(values, name="XX", index=list("abcdefg"))
result = s.apply(lambda x: x.lower())
# should be categorical dtype when the number of categories are
# the same
values = pd.Categorical(list("abbabcd"), categories=list("dcba"), ordered=True)
exp = pd.Series(values, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp.values)
result = s.apply(lambda x: "A")
exp = pd.Series(["A"] * 7, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
assert result.dtype == np.object
def test_shift_int(self, datetime_series):
ts = datetime_series.astype(int)
shifted = ts.shift(1)
expected = ts.astype(float).shift(1)
assert_series_equal(shifted, expected)
def test_shift_categorical(self):
# GH 9416
s = pd.Series(["a", "b", "c", "d"], dtype="category")
assert_series_equal(s.iloc[:-1], s.shift(1).shift(-1).dropna())
sp1 = s.shift(1)
assert_index_equal(s.index, sp1.index)
assert np.all(sp1.values.codes[:1] == -1)
assert np.all(s.values.codes[:-1] == sp1.values.codes[1:])
sn2 = s.shift(-2)
assert_index_equal(s.index, sn2.index)
assert np.all(sn2.values.codes[-2:] == -1)
assert np.all(s.values.codes[2:] == sn2.values.codes[:-2])
assert_index_equal(s.values.categories, sp1.values.categories)
assert_index_equal(s.values.categories, sn2.values.categories)
def test_unstack(self):
from numpy import nan
index = MultiIndex(
levels=[["bar", "foo"], ["one", "three", "two"]],
codes=[[1, 1, 0, 0], [0, 1, 0, 2]],
)
s = Series(np.arange(4.0), index=index)
unstacked = s.unstack()
expected = DataFrame(
[[2.0, nan, 3.0], [0.0, 1.0, nan]],
index=["bar", "foo"],
columns=["one", "three", "two"],
)
assert_frame_equal(unstacked, expected)
unstacked = s.unstack(level=0)
assert_frame_equal(unstacked, expected.T)
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
)
s = Series(np.random.randn(6), index=index)
exp_index = MultiIndex(
levels=[["one", "two", "three"], [0, 1]],
codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
)
expected = DataFrame({"bar": s.values}, index=exp_index).sort_index(level=0)
unstacked = s.unstack(0).sort_index()
assert_frame_equal(unstacked, expected)
# GH5873
idx = pd.MultiIndex.from_arrays([[101, 102], [3.5, np.nan]])
ts = pd.Series([1, 2], index=idx)
left = ts.unstack()
right = DataFrame([[nan, 1], [2, nan]], index=[101, 102], columns=[nan, 3.5])
assert_frame_equal(left, right)
idx = pd.MultiIndex.from_arrays(
[
["cat", "cat", "cat", "dog", "dog"],
["a", "a", "b", "a", "b"],
[1, 2, 1, 1, np.nan],
]
)
ts = pd.Series([1.0, 1.1, 1.2, 1.3, 1.4], index=idx)
right = DataFrame(
[[1.0, 1.3], [1.1, nan], [nan, 1.4], [1.2, nan]], columns=["cat", "dog"]
)
tpls = [("a", 1), ("a", 2), ("b", nan), ("b", 1)]
right.index = pd.MultiIndex.from_tuples(tpls)
assert_frame_equal(ts.unstack(level=0), right)
def test_value_counts_datetime(self):
# most dtypes are tested in test_base.py
values = [
pd.Timestamp("2011-01-01 09:00"),
pd.Timestamp("2011-01-01 10:00"),
pd.Timestamp("2011-01-01 11:00"),
pd.Timestamp("2011-01-01 09:00"),
pd.Timestamp("2011-01-01 09:00"),
pd.Timestamp("2011-01-01 11:00"),
]
exp_idx = pd.DatetimeIndex(
["2011-01-01 09:00", "2011-01-01 11:00", "2011-01-01 10:00"]
)
exp = pd.Series([3, 2, 1], index=exp_idx, name="xxx")
s = pd.Series(values, name="xxx")
tm.assert_series_equal(s.value_counts(), exp)
# check DatetimeIndex outputs the same result
idx = pd.DatetimeIndex(values, name="xxx")
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
exp = pd.Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
def test_value_counts_datetime_tz(self):
values = [
pd.Timestamp("2011-01-01 09:00", tz="US/Eastern"),
pd.Timestamp("2011-01-01 10:00", tz="US/Eastern"),
pd.Timestamp("2011-01-01 11:00", tz="US/Eastern"),
pd.Timestamp("2011-01-01 09:00", tz="US/Eastern"),
pd.Timestamp("2011-01-01 09:00", tz="US/Eastern"),
pd.Timestamp("2011-01-01 11:00", tz="US/Eastern"),
]
exp_idx = pd.DatetimeIndex(
["2011-01-01 09:00", "2011-01-01 11:00", "2011-01-01 10:00"],
tz="US/Eastern",
)
exp = pd.Series([3, 2, 1], index=exp_idx, name="xxx")
s = pd.Series(values, name="xxx")
tm.assert_series_equal(s.value_counts(), exp)
idx = pd.DatetimeIndex(values, name="xxx")
tm.assert_series_equal(idx.value_counts(), exp)
exp = pd.Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
def test_value_counts_period(self):
values = [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
pd.Period("2011-01", freq="M"),
pd.Period("2011-01", freq="M"),
pd.Period("2011-03", freq="M"),
]
exp_idx = pd.PeriodIndex(["2011-01", "2011-03", "2011-02"], freq="M")
exp = pd.Series([3, 2, 1], index=exp_idx, name="xxx")
s = pd.Series(values, name="xxx")
tm.assert_series_equal(s.value_counts(), exp)
# check DatetimeIndex outputs the same result
idx = pd.PeriodIndex(values, name="xxx")
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
exp = pd.Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
def test_value_counts_categorical_ordered(self):
# most dtypes are tested in test_base.py
values = pd.Categorical([1, 2, 3, 1, 1, 3], ordered=True)
exp_idx = pd.CategoricalIndex([1, 3, 2], categories=[1, 2, 3], ordered=True)
exp = pd.Series([3, 2, 1], index=exp_idx, name="xxx")
s = pd.Series(values, name="xxx")
tm.assert_series_equal(s.value_counts(), exp)
# check CategoricalIndex outputs the same result
idx = pd.CategoricalIndex(values, name="xxx")
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
exp = pd.Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
def test_value_counts_categorical_not_ordered(self):
values = pd.Categorical([1, 2, 3, 1, 1, 3], ordered=False)
exp_idx = pd.CategoricalIndex([1, 3, 2], categories=[1, 2, 3], ordered=False)
exp = pd.Series([3, 2, 1], index=exp_idx, name="xxx")
s = pd.Series(values, name="xxx")
tm.assert_series_equal(s.value_counts(), exp)
# check CategoricalIndex outputs the same result
idx = pd.CategoricalIndex(values, name="xxx")
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
exp = pd.Series(np.array([3.0, 2.0, 1]) / 6.0, index=exp_idx, name="xxx")
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
@pytest.mark.parametrize("func", [np.any, np.all])
@pytest.mark.parametrize("kwargs", [dict(keepdims=True), dict(out=object())])
@td.skip_if_np_lt("1.15")
def test_validate_any_all_out_keepdims_raises(self, kwargs, func):
s = pd.Series([1, 2])
param = list(kwargs)[0]
name = func.__name__
msg = (
r"the '{arg}' parameter is not "
r"supported in the pandas "
r"implementation of {fname}\(\)"
).format(arg=param, fname=name)
with pytest.raises(ValueError, match=msg):
func(s, **kwargs)
@td.skip_if_np_lt("1.15")
def test_validate_sum_initial(self):
s = pd.Series([1, 2])
msg = (
r"the 'initial' parameter is not "
r"supported in the pandas "
r"implementation of sum\(\)"
)
with pytest.raises(ValueError, match=msg):
np.sum(s, initial=10)
def test_validate_median_initial(self):
s = pd.Series([1, 2])
msg = (
r"the 'overwrite_input' parameter is not "
r"supported in the pandas "
r"implementation of median\(\)"
)
with pytest.raises(ValueError, match=msg):
# It seems like np.median doesn't dispatch, so we use the
# method instead of the ufunc.
s.median(overwrite_input=True)
@td.skip_if_np_lt("1.15")
def test_validate_stat_keepdims(self):
s = pd.Series([1, 2])
msg = (
r"the 'keepdims' parameter is not "
r"supported in the pandas "
r"implementation of sum\(\)"
)
with pytest.raises(ValueError, match=msg):
np.sum(s, keepdims=True)
def test_compound_deprecated(self):
s = Series([0.1, 0.2, 0.3, 0.4])
with tm.assert_produces_warning(FutureWarning):
s.compound()
df = pd.DataFrame({"s": s})
with tm.assert_produces_warning(FutureWarning):
df.compound()
main_dtypes = [
"datetime",
"datetimetz",
"timedelta",
"int8",
"int16",
"int32",
"int64",
"float32",
"float64",
"uint8",
"uint16",
"uint32",
"uint64",
]
@pytest.fixture
def s_main_dtypes():
"""A DataFrame with many dtypes
* datetime
* datetimetz
* timedelta
* [u]int{8,16,32,64}
* float{32,64}
The columns are the name of the dtype.
"""
df = pd.DataFrame(
{
"datetime": pd.to_datetime(["2003", "2002", "2001", "2002", "2005"]),
"datetimetz": pd.to_datetime(
["2003", "2002", "2001", "2002", "2005"]
).tz_localize("US/Eastern"),
"timedelta": pd.to_timedelta(["3d", "2d", "1d", "2d", "5d"]),
}
)
for dtype in [
"int8",
"int16",
"int32",
"int64",
"float32",
"float64",
"uint8",
"uint16",
"uint32",
"uint64",
]:
df[dtype] = Series([3, 2, 1, 2, 5], dtype=dtype)
return df
@pytest.fixture(params=main_dtypes)
def s_main_dtypes_split(request, s_main_dtypes):
"""Each series in s_main_dtypes."""
return s_main_dtypes[request.param]
def assert_check_nselect_boundary(vals, dtype, method):
# helper function for 'test_boundary_{dtype}' tests
s = Series(vals, dtype=dtype)
result = getattr(s, method)(3)
expected_idxr = [0, 1, 2] if method == "nsmallest" else [3, 2, 1]
expected = s.loc[expected_idxr]
tm.assert_series_equal(result, expected)
class TestNLargestNSmallest:
@pytest.mark.parametrize(
"r",
[
Series([3.0, 2, 1, 2, "5"], dtype="object"),
Series([3.0, 2, 1, 2, 5], dtype="object"),
# not supported on some archs
# Series([3., 2, 1, 2, 5], dtype='complex256'),
Series([3.0, 2, 1, 2, 5], dtype="complex128"),
Series(list("abcde")),
Series(list("abcde"), dtype="category"),
],
)
def test_error(self, r):
dt = r.dtype
msg = "Cannot use method 'n(larg|small)est' with dtype {dt}".format(dt=dt)
args = 2, len(r), 0, -1
methods = r.nlargest, r.nsmallest
for method, arg in product(methods, args):
with pytest.raises(TypeError, match=msg):
method(arg)
def test_nsmallest_nlargest(self, s_main_dtypes_split):
# float, int, datetime64 (use i8), timedelts64 (same),
# object that are numbers, object that are strings
s = s_main_dtypes_split
assert_series_equal(s.nsmallest(2), s.iloc[[2, 1]])
assert_series_equal(s.nsmallest(2, keep="last"), s.iloc[[2, 3]])
empty = s.iloc[0:0]
assert_series_equal(s.nsmallest(0), empty)
assert_series_equal(s.nsmallest(-1), empty)
assert_series_equal(s.nlargest(0), empty)
assert_series_equal(s.nlargest(-1), empty)
assert_series_equal(s.nsmallest(len(s)), s.sort_values())
assert_series_equal(s.nsmallest(len(s) + 1), s.sort_values())
assert_series_equal(s.nlargest(len(s)), s.iloc[[4, 0, 1, 3, 2]])
assert_series_equal(s.nlargest(len(s) + 1), s.iloc[[4, 0, 1, 3, 2]])
def test_misc(self):
s = Series([3.0, np.nan, 1, 2, 5])
assert_series_equal(s.nlargest(), s.iloc[[4, 0, 3, 2]])
assert_series_equal(s.nsmallest(), s.iloc[[2, 3, 0, 4]])
msg = 'keep must be either "first", "last"'
with pytest.raises(ValueError, match=msg):
s.nsmallest(keep="invalid")
with pytest.raises(ValueError, match=msg):
s.nlargest(keep="invalid")
# GH 15297
s = Series([1] * 5, index=[1, 2, 3, 4, 5])
expected_first = Series([1] * 3, index=[1, 2, 3])
expected_last = Series([1] * 3, index=[5, 4, 3])
result = s.nsmallest(3)
assert_series_equal(result, expected_first)
result = s.nsmallest(3, keep="last")
assert_series_equal(result, expected_last)
result = s.nlargest(3)
assert_series_equal(result, expected_first)
result = s.nlargest(3, keep="last")
assert_series_equal(result, expected_last)
@pytest.mark.parametrize("n", range(1, 5))
def test_n(self, n):
# GH 13412
s = Series([1, 4, 3, 2], index=[0, 0, 1, 1])
result = s.nlargest(n)
expected = s.sort_values(ascending=False).head(n)
assert_series_equal(result, expected)
result = s.nsmallest(n)
expected = s.sort_values().head(n)
assert_series_equal(result, expected)
def test_boundary_integer(self, nselect_method, any_int_dtype):
# GH 21426
dtype_info = np.iinfo(any_int_dtype)
min_val, max_val = dtype_info.min, dtype_info.max
vals = [min_val, min_val + 1, max_val - 1, max_val]
assert_check_nselect_boundary(vals, any_int_dtype, nselect_method)
def test_boundary_float(self, nselect_method, float_dtype):
# GH 21426
dtype_info = np.finfo(float_dtype)
min_val, max_val = dtype_info.min, dtype_info.max
min_2nd, max_2nd = np.nextafter([min_val, max_val], 0, dtype=float_dtype)
vals = [min_val, min_2nd, max_2nd, max_val]
assert_check_nselect_boundary(vals, float_dtype, nselect_method)
@pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"])
def test_boundary_datetimelike(self, nselect_method, dtype):
# GH 21426
# use int64 bounds and +1 to min_val since true minimum is NaT
# (include min_val/NaT at end to maintain same expected_idxr)
dtype_info = np.iinfo("int64")
min_val, max_val = dtype_info.min, dtype_info.max
vals = [min_val + 1, min_val + 2, max_val - 1, max_val, min_val]
assert_check_nselect_boundary(vals, dtype, nselect_method)
def test_duplicate_keep_all_ties(self):
# see gh-16818
s = Series([10, 9, 8, 7, 7, 7, 7, 6])
result = s.nlargest(4, keep="all")
expected = Series([10, 9, 8, 7, 7, 7, 7])
assert_series_equal(result, expected)
result = s.nsmallest(2, keep="all")
expected = Series([6, 7, 7, 7, 7], index=[7, 3, 4, 5, 6])
assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data,expected", [([True, False], [True]), ([True, False, True, True], [True])]
)
def test_boolean(self, data, expected):
# GH 26154 : ensure True > False
s = Series(data)
result = s.nlargest(1)
expected = Series(expected)
assert_series_equal(result, expected)
class TestCategoricalSeriesAnalytics:
def test_count(self):
s = Series(
Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True
)
)
result = s.count()
assert result == 2
def test_value_counts(self):
# GH 12835
cats = Categorical(list("abcccb"), categories=list("cabd"))
s = Series(cats, name="xxx")
res = s.value_counts(sort=False)
exp_index = CategoricalIndex(list("cabd"), categories=cats.categories)
exp = Series([3, 1, 2, 0], name="xxx", index=exp_index)
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp_index = CategoricalIndex(list("cbad"), categories=cats.categories)
exp = Series([3, 2, 1, 0], name="xxx", index=exp_index)
tm.assert_series_equal(res, exp)
# check object dtype handles the Series.name as the same
# (tested in test_base.py)
s = Series(["a", "b", "c", "c", "c", "b"], name="xxx")
res = s.value_counts()
exp = Series([3, 2, 1], name="xxx", index=["c", "b", "a"])
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# see gh-9443
# sanity check
s = Series(["a", "b", "a"], dtype="category")
exp = Series([2, 1], index=CategoricalIndex(["a", "b"]))
res = s.value_counts(dropna=True)
tm.assert_series_equal(res, exp)
res = s.value_counts(dropna=True)
tm.assert_series_equal(res, exp)
# same Series via two different constructions --> same behaviour
series = [
Series(["a", "b", None, "a", None, None], dtype="category"),
Series(
Categorical(["a", "b", None, "a", None, None], categories=["a", "b"])
),
]
for s in series:
# None is a NaN value, so we exclude its count here
exp = Series([2, 1], index=CategoricalIndex(["a", "b"]))
res = s.value_counts(dropna=True)
tm.assert_series_equal(res, exp)
# we don't exclude the count of None and sort by counts
exp = Series([3, 2, 1], index=CategoricalIndex([np.nan, "a", "b"]))
res = s.value_counts(dropna=False)
tm.assert_series_equal(res, exp)
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
exp = Series([2, 1, 3], index=CategoricalIndex(["a", "b", np.nan]))
res = s.value_counts(dropna=False, sort=False)
tm.assert_series_equal(res, exp)
@pytest.mark.parametrize(
"dtype",
[
"int_",
"uint",
"float_",
"unicode_",
"timedelta64[h]",
pytest.param(
"datetime64[D]", marks=pytest.mark.xfail(reason="GH#7996", strict=False)
),
],
)
def test_drop_duplicates_categorical_non_bool(self, dtype, ordered_fixture):
cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
# Test case 1
input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype))
tc1 = Series(Categorical(input1, categories=cat_array, ordered=ordered_fixture))
expected = Series([False, False, False, True])
tm.assert_series_equal(tc1.duplicated(), expected)
tm.assert_series_equal(tc1.drop_duplicates(), tc1[~expected])
sc = tc1.copy()
sc.drop_duplicates(inplace=True)
tm.assert_series_equal(sc, tc1[~expected])
expected = Series([False, False, True, False])
tm.assert_series_equal(tc1.duplicated(keep="last"), expected)
tm.assert_series_equal(tc1.drop_duplicates(keep="last"), tc1[~expected])
sc = tc1.copy()
sc.drop_duplicates(keep="last", inplace=True)
tm.assert_series_equal(sc, tc1[~expected])
expected = Series([False, False, True, True])
tm.assert_series_equal(tc1.duplicated(keep=False), expected)
tm.assert_series_equal(tc1.drop_duplicates(keep=False), tc1[~expected])
sc = tc1.copy()
sc.drop_duplicates(keep=False, inplace=True)
tm.assert_series_equal(sc, tc1[~expected])
# Test case 2
input2 = np.array([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(dtype))
tc2 = Series(Categorical(input2, categories=cat_array, ordered=ordered_fixture))
expected = Series([False, False, False, False, True, True, False])
tm.assert_series_equal(tc2.duplicated(), expected)
tm.assert_series_equal(tc2.drop_duplicates(), tc2[~expected])
sc = tc2.copy()
sc.drop_duplicates(inplace=True)
tm.assert_series_equal(sc, tc2[~expected])
expected = Series([False, True, True, False, False, False, False])
tm.assert_series_equal(tc2.duplicated(keep="last"), expected)
tm.assert_series_equal(tc2.drop_duplicates(keep="last"), tc2[~expected])
sc = tc2.copy()
sc.drop_duplicates(keep="last", inplace=True)
tm.assert_series_equal(sc, tc2[~expected])
expected = Series([False, True, True, False, True, True, False])
tm.assert_series_equal(tc2.duplicated(keep=False), expected)
tm.assert_series_equal(tc2.drop_duplicates(keep=False), tc2[~expected])
sc = tc2.copy()
sc.drop_duplicates(keep=False, inplace=True)
tm.assert_series_equal(sc, tc2[~expected])
def test_drop_duplicates_categorical_bool(self, ordered_fixture):
tc = Series(
Categorical(
[True, False, True, False],
categories=[True, False],
ordered=ordered_fixture,
)
)
expected = Series([False, False, True, True])
tm.assert_series_equal(tc.duplicated(), expected)
tm.assert_series_equal(tc.drop_duplicates(), tc[~expected])
sc = tc.copy()
sc.drop_duplicates(inplace=True)
tm.assert_series_equal(sc, tc[~expected])
expected = Series([True, True, False, False])
tm.assert_series_equal(tc.duplicated(keep="last"), expected)
tm.assert_series_equal(tc.drop_duplicates(keep="last"), tc[~expected])
sc = tc.copy()
sc.drop_duplicates(keep="last", inplace=True)
tm.assert_series_equal(sc, tc[~expected])
expected = Series([True, True, True, True])
tm.assert_series_equal(tc.duplicated(keep=False), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=False), tc[~expected])
sc = tc.copy()
sc.drop_duplicates(keep=False, inplace=True)
tm.assert_series_equal(sc, tc[~expected])
| bsd-3-clause |
joelgrus/data-science-from-scratch | first-edition/code-python3/neural_networks.py | 8 | 6417 | from collections import Counter
from functools import partial
from linear_algebra import dot
import math, random
import matplotlib
import matplotlib.pyplot as plt
def step_function(x):
return 1 if x >= 0 else 0
def perceptron_output(weights, bias, x):
"""returns 1 if the perceptron 'fires', 0 if not"""
return step_function(dot(weights, x) + bias)
def sigmoid(t):
return 1 / (1 + math.exp(-t))
def neuron_output(weights, inputs):
return sigmoid(dot(weights, inputs))
def feed_forward(neural_network, input_vector):
"""takes in a neural network (represented as a list of lists of lists of weights)
and returns the output from forward-propagating the input"""
outputs = []
for layer in neural_network:
input_with_bias = input_vector + [1] # add a bias input
output = [neuron_output(neuron, input_with_bias) # compute the output
for neuron in layer] # for this layer
outputs.append(output) # and remember it
# the input to the next layer is the output of this one
input_vector = output
return outputs
def backpropagate(network, input_vector, target):
hidden_outputs, outputs = feed_forward(network, input_vector)
# the output * (1 - output) is from the derivative of sigmoid
output_deltas = [output * (1 - output) * (output - target[i])
for i, output in enumerate(outputs)]
# adjust weights for output layer (network[-1])
for i, output_neuron in enumerate(network[-1]):
for j, hidden_output in enumerate(hidden_outputs + [1]):
output_neuron[j] -= output_deltas[i] * hidden_output
# back-propagate errors to hidden layer
hidden_deltas = [hidden_output * (1 - hidden_output) *
dot(output_deltas, [n[i] for n in network[-1]])
for i, hidden_output in enumerate(hidden_outputs)]
# adjust weights for hidden layer (network[0])
for i, hidden_neuron in enumerate(network[0]):
for j, input in enumerate(input_vector + [1]):
hidden_neuron[j] -= hidden_deltas[i] * input
def patch(x, y, hatch, color):
"""return a matplotlib 'patch' object with the specified
location, crosshatch pattern, and color"""
return matplotlib.patches.Rectangle((x - 0.5, y - 0.5), 1, 1,
hatch=hatch, fill=False, color=color)
def show_weights(neuron_idx):
weights = network[0][neuron_idx]
abs_weights = [abs(weight) for weight in weights]
grid = [abs_weights[row:(row+5)] # turn the weights into a 5x5 grid
for row in range(0,25,5)] # [weights[0:5], ..., weights[20:25]]
ax = plt.gca() # to use hatching, we'll need the axis
ax.imshow(grid, # here same as plt.imshow
cmap=matplotlib.cm.binary, # use white-black color scale
interpolation='none') # plot blocks as blocks
# cross-hatch the negative weights
for i in range(5): # row
for j in range(5): # column
if weights[5*i + j] < 0: # row i, column j = weights[5*i + j]
# add black and white hatches, so visible whether dark or light
ax.add_patch(patch(j, i, '/', "white"))
ax.add_patch(patch(j, i, '\\', "black"))
plt.show()
if __name__ == "__main__":
raw_digits = [
"""11111
1...1
1...1
1...1
11111""",
"""..1..
..1..
..1..
..1..
..1..""",
"""11111
....1
11111
1....
11111""",
"""11111
....1
11111
....1
11111""",
"""1...1
1...1
11111
....1
....1""",
"""11111
1....
11111
....1
11111""",
"""11111
1....
11111
1...1
11111""",
"""11111
....1
....1
....1
....1""",
"""11111
1...1
11111
1...1
11111""",
"""11111
1...1
11111
....1
11111"""]
def make_digit(raw_digit):
return [1 if c == '1' else 0
for row in raw_digit.split("\n")
for c in row.strip()]
inputs = list(map(make_digit, raw_digits))
targets = [[1 if i == j else 0 for i in range(10)]
for j in range(10)]
random.seed(0) # to get repeatable results
input_size = 25 # each input is a vector of length 25
num_hidden = 5 # we'll have 5 neurons in the hidden layer
output_size = 10 # we need 10 outputs for each input
# each hidden neuron has one weight per input, plus a bias weight
hidden_layer = [[random.random() for __ in range(input_size + 1)]
for __ in range(num_hidden)]
# each output neuron has one weight per hidden neuron, plus a bias weight
output_layer = [[random.random() for __ in range(num_hidden + 1)]
for __ in range(output_size)]
# the network starts out with random weights
network = [hidden_layer, output_layer]
# 10,000 iterations seems enough to converge
for __ in range(10000):
for input_vector, target_vector in zip(inputs, targets):
backpropagate(network, input_vector, target_vector)
def predict(input):
return feed_forward(network, input)[-1]
for i, input in enumerate(inputs):
outputs = predict(input)
print(i, [round(p,2) for p in outputs])
print(""".@@@.
...@@
..@@.
...@@
.@@@.""")
print([round(x, 2) for x in
predict( [0,1,1,1,0, # .@@@.
0,0,0,1,1, # ...@@
0,0,1,1,0, # ..@@.
0,0,0,1,1, # ...@@
0,1,1,1,0])]) # .@@@.
print()
print(""".@@@.
@..@@
.@@@.
@..@@
.@@@.""")
print([round(x, 2) for x in
predict( [0,1,1,1,0, # .@@@.
1,0,0,1,1, # @..@@
0,1,1,1,0, # .@@@.
1,0,0,1,1, # @..@@
0,1,1,1,0])]) # .@@@.
print()
| mit |
deepchem/deepchem | contrib/DiabeticRetinopathy/model.py | 5 | 8069 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 10 06:12:10 2018
@author: zqwu
"""
import numpy as np
import tensorflow as tf
from deepchem.data import NumpyDataset, pad_features
from deepchem.metrics import to_one_hot
from deepchem.models.tensorgraph.layers import Layer, Dense, SoftMax, Reshape, \
SparseSoftMaxCrossEntropy, BatchNorm, Conv2D, MaxPool2D, WeightedError, \
Dropout, ReLU, Stack, Flatten, ReduceMax, WeightDecay
from deepchem.models.tensorgraph.layers import L2Loss, Label, Weights, Feature
from deepchem.models.tensorgraph.tensor_graph import TensorGraph
from deepchem.trans import undo_transforms
from deepchem.data.data_loader import ImageLoader
from sklearn.metrics import confusion_matrix, accuracy_score
class DRModel(TensorGraph):
def __init__(self,
n_tasks=1,
image_size=512,
n_downsample=6,
n_init_kernel=16,
n_fully_connected=[1024],
n_classes=5,
augment=False,
**kwargs):
"""
Parameters
----------
n_tasks: int
Number of tasks
image_size: int
Resolution of the input images(square)
n_downsample: int
Downsample ratio in power of 2
n_init_kernel: int
Kernel size for the first convolutional layer
n_fully_connected: list of int
Shape of FC layers after convolutions
n_classes: int
Number of classes to predict (only used in classification mode)
augment: bool
If to use data augmentation
"""
self.n_tasks = n_tasks
self.image_size = image_size
self.n_downsample = n_downsample
self.n_init_kernel = n_init_kernel
self.n_fully_connected = n_fully_connected
self.n_classes = n_classes
self.augment = augment
super(DRModel, self).__init__(**kwargs)
self.build_graph()
def build_graph(self):
# inputs placeholder
self.inputs = Feature(
shape=(None, self.image_size, self.image_size, 3), dtype=tf.float32)
# data preprocessing and augmentation
in_layer = DRAugment(
self.augment,
self.batch_size,
size=(self.image_size, self.image_size),
in_layers=[self.inputs])
# first conv layer
in_layer = Conv2D(
self.n_init_kernel,
kernel_size=7,
activation_fn=None,
in_layers=[in_layer])
in_layer = BatchNorm(in_layers=[in_layer])
in_layer = ReLU(in_layers=[in_layer])
# downsample by max pooling
res_in = MaxPool2D(
ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], in_layers=[in_layer])
for ct_module in range(self.n_downsample - 1):
# each module is a residual convolutional block
# followed by a convolutional downsample layer
in_layer = Conv2D(
self.n_init_kernel * 2**(ct_module - 1),
kernel_size=1,
activation_fn=None,
in_layers=[res_in])
in_layer = BatchNorm(in_layers=[in_layer])
in_layer = ReLU(in_layers=[in_layer])
in_layer = Conv2D(
self.n_init_kernel * 2**(ct_module - 1),
kernel_size=3,
activation_fn=None,
in_layers=[in_layer])
in_layer = BatchNorm(in_layers=[in_layer])
in_layer = ReLU(in_layers=[in_layer])
in_layer = Conv2D(
self.n_init_kernel * 2**ct_module,
kernel_size=1,
activation_fn=None,
in_layers=[in_layer])
res_a = BatchNorm(in_layers=[in_layer])
res_out = res_in + res_a
res_in = Conv2D(
self.n_init_kernel * 2**(ct_module + 1),
kernel_size=3,
stride=2,
in_layers=[res_out])
res_in = BatchNorm(in_layers=[res_in])
# max pooling over the final outcome
in_layer = ReduceMax(axis=(1, 2), in_layers=[res_in])
for layer_size in self.n_fully_connected:
# fully connected layers
in_layer = Dense(
layer_size, activation_fn=tf.nn.relu, in_layers=[in_layer])
# dropout for dense layers
#in_layer = Dropout(0.25, in_layers=[in_layer])
logit_pred = Dense(
self.n_tasks * self.n_classes, activation_fn=None, in_layers=[in_layer])
logit_pred = Reshape(
shape=(None, self.n_tasks, self.n_classes), in_layers=[logit_pred])
weights = Weights(shape=(None, self.n_tasks))
labels = Label(shape=(None, self.n_tasks), dtype=tf.int32)
output = SoftMax(logit_pred)
self.add_output(output)
loss = SparseSoftMaxCrossEntropy(in_layers=[labels, logit_pred])
weighted_loss = WeightedError(in_layers=[loss, weights])
# weight decay regularizer
# weighted_loss = WeightDecay(0.1, 'l2', in_layers=[weighted_loss])
self.set_loss(weighted_loss)
def DRAccuracy(y, y_pred):
y_pred = np.argmax(y_pred, 1)
return accuracy_score(y, y_pred)
def DRSpecificity(y, y_pred):
y_pred = (np.argmax(y_pred, 1) > 0) * 1
y = (y > 0) * 1
TN = sum((1 - y_pred) * (1 - y))
N = sum(1 - y)
return float(TN) / N
def DRSensitivity(y, y_pred):
y_pred = (np.argmax(y_pred, 1) > 0) * 1
y = (y > 0) * 1
TP = sum(y_pred * y)
P = sum(y)
return float(TP) / P
def ConfusionMatrix(y, y_pred):
y_pred = np.argmax(y_pred, 1)
return confusion_matrix(y, y_pred)
def QuadWeightedKappa(y, y_pred):
y_pred = np.argmax(y_pred, 1)
cm = confusion_matrix(y, y_pred)
classes_y, counts_y = np.unique(y, return_counts=True)
classes_y_pred, counts_y_pred = np.unique(y_pred, return_counts=True)
E = np.zeros((classes_y.shape[0], classes_y.shape[0]))
for i, c1 in enumerate(classes_y):
for j, c2 in enumerate(classes_y_pred):
E[c1, c2] = counts_y[i] * counts_y_pred[j]
E = E / np.sum(E) * np.sum(cm)
w = np.zeros((classes_y.shape[0], classes_y.shape[0]))
for i in range(classes_y.shape[0]):
for j in range(classes_y.shape[0]):
w[i, j] = float((i - j)**2) / (classes_y.shape[0] - 1)**2
re = 1 - np.sum(w * cm) / np.sum(w * E)
return re
class DRAugment(Layer):
def __init__(self,
augment,
batch_size,
distort_color=True,
central_crop=True,
size=(512, 512),
**kwargs):
"""
Parameters
----------
augment: bool
If to use data augmentation
batch_size: int
Number of images in the batch
distort_color: bool
If to apply random distortion on the color
central_crop: bool
If to randomly crop the sample around the center
size: int
Resolution of the input images(square)
"""
self.augment = augment
self.batch_size = batch_size
self.distort_color = distort_color
self.central_crop = central_crop
self.size = size
super(DRAugment, self).__init__(**kwargs)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
parent_tensor = inputs[0]
training = kwargs['training'] if 'training' in kwargs else 1.0
parent_tensor = parent_tensor / 255.0
if not self.augment:
out_tensor = parent_tensor
else:
def preprocess(img):
img = tf.image.random_flip_left_right(img)
img = tf.image.random_flip_up_down(img)
img = tf.image.rot90(img, k=np.random.randint(0, 4))
if self.distort_color:
img = tf.image.random_brightness(img, max_delta=32. / 255.)
img = tf.image.random_saturation(img, lower=0.5, upper=1.5)
img = tf.clip_by_value(img, 0.0, 1.0)
if self.central_crop:
# sample cut ratio from a clipped gaussian
img = tf.image.central_crop(img,
np.clip(
np.random.normal(1., 0.06), 0.8, 1.))
img = tf.image.resize_bilinear(
tf.expand_dims(img, 0), tf.convert_to_tensor(self.size))[0]
return img
outs = tf.map_fn(preprocess, parent_tensor)
# train/valid differences
out_tensor = training * outs + (1 - training) * parent_tensor
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
| mit |
thaole16/Boids | boids/boids.py | 1 | 4866 | """
A refactored implementation of Boids from a deliberately bad implementation of
[Boids](http://dl.acm.org/citation.cfm?doid=37401.37406): an exercise for class.
"""
from matplotlib import pyplot as plt
from matplotlib import animation
import numpy as np
class Boids(object):
def __init__(self,
boid_count=50,
x_positions=[-450, 50.0],
y_positions=[300.0, 600.0],
x_velocities=[0, 10.0],
y_velocities=[-20.0, 20.0],
move_to_middle_strength=0.01,
alert_distance=100,
formation_flying_distance=10000,
formation_flying_strength=0.125):
self.boid_count = boid_count
self.move_to_middle_strength = move_to_middle_strength
self.alert_distance = alert_distance
self.formation_flying_distance = formation_flying_distance
self.formation_flying_strength = formation_flying_strength
self.boids_x = np.random.uniform(size=boid_count, *x_positions)
self.boids_y = np.random.uniform(size=boid_count, *y_positions)
self.positions = np.stack((self.boids_x, self.boids_y))
self.boid_x_velocities = np.random.uniform(size=boid_count, *x_velocities)
self.boid_y_velocities = np.random.uniform(size=boid_count, *y_velocities)
self.velocities = np.stack((self.boid_x_velocities, self.boid_y_velocities))
self.boids = (self.positions, self.velocities)
def fly_towards_the_middle(self, boids, move_to_middle_strength=0.01):
(positions, velocities) = boids
middle = np.mean(positions, 1)
move_to_middle = (middle[:, np.newaxis] - positions) * move_to_middle_strength
velocities += move_to_middle
def separation(self, coords):
separations = np.array(coords)[:, np.newaxis, :] - np.array(coords)[:, :, np.newaxis]
separation_distance_squared = separations[0, :, :] ** 2 + separations[1, :, :] ** 2
return separations, separation_distance_squared
def fly_away_from_nearby_boids(self, boids, alert_distance=100):
(positions, velocities) = boids
separations, separation_distance_squared = self.separation(positions)
birds_outside_alert = separation_distance_squared > alert_distance
close_separations = np.copy(separations)
close_separations[0, :, :][birds_outside_alert] = 0 # x positions
close_separations[1, :, :][birds_outside_alert] = 0 # y positions
velocities += np.sum(close_separations, 1)
def match_speed_with_nearby_boids(self, boids,
formation_flying_distance=10000,
formation_flying_strength=0.125):
(positions, velocities) = boids
separations, separation_distance_squared = self.separation(positions)
birds_outside_formation = separation_distance_squared > formation_flying_distance
velocity_difference = velocities[:, np.newaxis, :] - velocities[:, :, np.newaxis]
close_formation = np.copy(velocity_difference)
close_formation[0, :, :][birds_outside_formation] = 0
close_formation[1, :, :][birds_outside_formation] = 0
velocities += -1 * np.mean(close_formation, 1) * formation_flying_strength
def update_boids(self, boids):
(positions, velocities) = boids
# Fly towards the middle
self.fly_towards_the_middle(boids, self.move_to_middle_strength)
# Fly away from nearby boids
self.fly_away_from_nearby_boids(boids, self.alert_distance)
# Try to match speed with nearby boids
self.match_speed_with_nearby_boids(boids, self.formation_flying_distance, self.formation_flying_strength)
# Update positions
positions += velocities
def _animate(self, frame):
self.update_boids(self.boids)
(positions, velocities) = self.boids
self.scatter.set_offsets(np.transpose(positions))
def model(self, xlim=(-500, 1500), ylim=(-500, 1500), frames=50, interval=50, savefile=None):
colors = np.random.rand(self.boid_count)
boidsize = np.pi * (2 * np.random.rand(self.boid_count) + 2) ** 2
figure = plt.figure()
axes = plt.axes(xlim=xlim, ylim=ylim)
self.scatter = axes.scatter(self.boids_x, self.boids_y,
s=boidsize, c=colors, alpha=0.5, edgecolors=None)
anim = animation.FuncAnimation(figure, self._animate,
frames=frames, interval=interval)
plt.xlabel('x (arbitrary units)')
plt.ylabel('y (arbitrary units)')
plt.title("Boids a'Flocking")
if savefile != None:
anim.save(savefile)
plt.show()
if __name__ == "__main__":
boidsobject = Boids()
boidsobject.model()
| mit |
gregreen/bayestar | scripts/resample_los.py | 1 | 19922 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# resample_los.py
#
# Copyright 2013 Greg Green <greg@greg-UX31A>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import numpy as np
import healpy as hp
import h5py
import hputils
import maptools
import model
def gc_dist_all(l, b):
l = np.pi / 180. * l
b = np.pi / 180. * b
l_0 = np.reshape(l, (1, l.size))
l_0 = np.repeat(l_0, l.size, axis=0)
l_1 = np.reshape(l, (l.size, 1))
l_1 = np.repeat(l_1, l.size, axis=1)
b_0 = np.reshape(b, (1, b.size))
b_0 = np.repeat(b_0, b.size, axis=0)
b_1 = np.reshape(b, (b.size, 1))
b_1 = np.repeat(b_1, b.size, axis=1)
#d = np.arccos(np.sin(b_0) * np.sin(b_1) + np.cos(b_0) * np.cos(b_1) * np.cos(l_1 - l_0))
d = np.arcsin(np.sqrt(np.sin(0.5*(b_1-b_0))**2 + np.cos(b_0) * np.cos(b_1) * np.sin(0.5*(l_1-l_0))**2))
return d
def gc_dist(l_source, b_source, l_dest, b_dest):
l_s = np.pi / 180. * l_source
b_s = np.pi / 180. * b_source
l_d = np.pi / 180. * l_dest
b_d = np.pi / 180. * b_dest
l_0 = np.reshape(l_s, (l_source.size, 1))
l_0 = np.repeat(l_0, l_dest.size, axis=1)
l_1 = np.reshape(l_d, (1, l_dest.size))
l_1 = np.repeat(l_1, l_source.size, axis=0)
b_0 = np.reshape(b_s, (b_source.size, 1))
b_0 = np.repeat(b_0, b_dest.size, axis=1)
b_1 = np.reshape(b_d, (1, b_dest.size))
b_1 = np.repeat(b_1, b_source.size, axis=0)
#d = np.arccos(np.sin(b_0) * np.sin(b_1) + np.cos(b_0) * np.cos(b_1) * np.cos(l_1 - l_0))
d = np.arcsin(np.sqrt(np.sin(0.5*(b_1-b_0))**2 + np.cos(b_0) * np.cos(b_1) * np.sin(0.5*(l_1-l_0))**2))
return d
def find_neighbors_naive(nside, pix_idx, n_neighbors):
'''
Find the neighbors of each pixel using a naive algorithm.
Each pixel is defined by a HEALPix nside and pixel index (in nested
order).
Returns two arrays:
neighbor_idx (n_pix, n_neighbors) Index of each neighbor in
the nside and pix_idx
arrays.
neighbor_dist (n_pix, n_neighbors) Distance to each neighbor.
'''
# Determine (l, b) of all pixels
l = np.empty(pix_idx.size, dtype='f8')
b = np.empty(pix_idx.size, dtype='f8')
nside_unique = np.unique(nside)
for n in nside_unique:
idx = (nside == n)
l[idx], b[idx] = hputils.pix2lb(n, pix_idx[idx], nest=True)
# Determine distances between all pixel pairs
dist = gc_dist_all(l, b)
# Determine closest neighbors
sort_idx = np.argsort(dist, axis=1)
neighbor_idx = sort_idx[:, 1:n_neighbors+1]
neighbor_dist = np.sort(dist, axis=1)[:, 1:n_neighbors+1]
return neighbor_idx, neighbor_dist
def find_neighbors(nside, pix_idx, n_neighbors):
'''
Find the neighbors of each pixel.
Each pixel is defined by a HEALPix nside and pixel index (in nested
order).
Returns two arrays:
neighbor_idx (n_pix, n_neighbors) Index of each neighbor in
the nside and pix_idx
arrays.
neighbor_dist (n_pix, n_neighbors) Distance to each neighbor.
'''
# Determine (l, b) and which downsampled pixel
# each (nside, pix_idx) combo belongs to
nside_rough = np.min(nside)
if nside_rough != 1:
nside_rough /= 2
l = np.empty(pix_idx.size, dtype='f8')
b = np.empty(pix_idx.size, dtype='f8')
pix_idx_rough = np.empty(pix_idx.size, dtype='i8')
nside_unique = np.unique(nside)
for n in nside_unique:
idx = (nside == n)
factor = (n / nside_rough)**2
pix_idx_rough[idx] = pix_idx[idx] / factor
l[idx], b[idx] = hputils.pix2lb(n, pix_idx[idx], nest=True)
# For each downsampled pixel, determine nearest neighbors of all subpixels
neighbor_idx = -np.ones((pix_idx.size, n_neighbors), dtype='i8')
neighbor_dist = np.inf * np.ones((pix_idx.size, n_neighbors), dtype='f8')
for i_rough in np.unique(pix_idx_rough):
rough_neighbors = hp.get_all_neighbours(nside_rough, i_rough, nest=True)
idx_centers = np.argwhere(pix_idx_rough == i_rough)[:,0]
tmp = [np.argwhere(pix_idx_rough == i)[:,0] for i in rough_neighbors]
tmp.append(idx_centers)
idx_search = np.hstack(tmp)
dist = gc_dist(l[idx_centers], b[idx_centers],
l[idx_search], b[idx_search])
tmp = np.argsort(dist, axis=1)[:, 1:n_neighbors+1]
fill = idx_search[tmp]
neighbor_idx[idx_centers, :fill.shape[1]] = fill
fill = np.sort(dist, axis=1)[:, 1:n_neighbors+1]
neighbor_dist[idx_centers, :fill.shape[1]] = fill
return neighbor_idx, neighbor_dist
def find_nearest_neighbors(nside, pix_idx):
# TODO
# Determine mapping of pixel indices at highest resolutions to index
# in the array pix_idx
nside_max = np.max(nside)
pix_idx_highres = []
for n in np.unique(nside):
idx = (nside == n)
pix_idx
l = np.empty(pix_idx.size, dtype='f8')
b = np.empty(pix_idx.size, dtype='f8')
neighbor_idx = np.empty((8, pix_idx.size), dtype='i8')
neighbor_dist = np.empty((8, pix_idx.size), dtype='f8')
for n in np.unique(nside):
idx = (nside == n)
l[idx], b[idx] = hputils.pix2lb(n, pix_idx[idx], nest=True)
neighbor_idx[:, idx] = 1
def test_gc_dist():
l = np.array([0., 1., 2., 359.])
b = np.array([0., 30., 60., 90.])
d_0 = gc_dist_all(l, b) * 180. / np.pi
d_1 = gc_dist(l, b, l, b) * 180. / np.pi
print d_1
print d_0
idx = np.argsort(d_1, axis=0)
print d_1 - d_0
def test_find_neighbors():
nside = 128
n_neighbors = 16
n_pix = hp.nside2npix(nside)
pix_idx = np.arange(n_pix)
nside_arr = np.ones(n_pix, dtype='i8') * nside
#neighbor_idx, neighbor_dist = find_neighbors_naive(nside_arr, pix_idx,
# n_neighbors)
neighbor_idx, neighbor_dist = find_neighbors(nside_arr, pix_idx,
n_neighbors)
#print neighbor_idx
#print neighbor_dist
import matplotlib.pyplot as plt
m = np.zeros(n_pix)
idx = np.random.randint(n_pix)
#idx = 1
print idx
print neighbor_idx[idx, :]
print neighbor_dist[idx, :]
m[idx] = 2
m[neighbor_idx[idx, :]] = 1
hp.visufunc.mollview(m, nest=True)
plt.show()
def test_find_neighbors_adaptive_res():
n_neighbors = 36
n_disp = 5
processes = 4
bounds = [60., 80., -5., 5.]
import glob
fnames = glob.glob('/n/fink1/ggreen/bayestar/output/l70/l70.*.h5')
los_coll = maptools.los_collection(fnames, bounds=bounds,
processes=processes)
nside, pix_idx = los_coll.nside, los_coll.pix_idx
print 'Finding neighbors ...'
neighbor_idx, neighbor_dist = find_neighbors(nside, pix_idx,
n_neighbors)
print 'Done.'
# Highlight a couple of nearest-neighbor sections
pix_val = np.zeros(pix_idx.size)
l = 0.025 * np.pi / 180.
print l
for k in xrange(n_disp):
idx = np.random.randint(pix_idx.size)
pix_val[idx] = 2
pix_val[neighbor_idx[idx, :]] = 1
d = neighbor_dist[idx, :]
print 'Center pixel: %d' % idx
print d
print np.exp(-(d/l)**2.)
print ''
nside_max, pix_idx_exp, pix_val_exp = maptools.reduce_to_single_res(pix_idx,
nside,
pix_val)
size = (2000, 2000)
img, bounds, xy_bounds = hputils.rasterize_map(pix_idx_exp, pix_val_exp,
nside_max, size)
# Plot nearest neighbors
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.imshow(img, extent=bounds, origin='lower', aspect='auto',
interpolation='nearest')
plt.show()
def get_prior_ln_Delta_EBV(nside, pix_idx, n_regions=30):
# Find (l, b) for each pixel
l = np.empty(pix_idx.size, dtype='f8')
b = np.empty(pix_idx.size, dtype='f8')
for n in np.unique(nside):
idx = (nside == n)
l[idx], b[idx] = hputils.pix2lb(n, pix_idx[idx], nest=True)
# Determine priors in each pixel
gal_model = model.TGalacticModel()
ln_Delta_EBV = np.empty((pix_idx.size, n_regions+1), dtype='f8')
for i,(ll,bb) in enumerate(zip(l, b)):
ret = gal_model.EBV_prior(ll, bb, n_regions=n_regions)
ln_Delta_EBV[i, :] = ret[1]
return ln_Delta_EBV
class map_resampler:
def __init__(self, fnames, bounds=None,
processes=1,
n_neighbors=32,
corr_length_core=0.25,
corr_length_tail=1.,
max_corr=1.,
tail_weight=0.,
dist_floor=0.1):
self.n_neighbors = n_neighbors
self.corr_length_core = corr_length_core
self.corr_length_tail = corr_length_tail
self.max_corr = max_corr
self.tail_weight = tail_weight
self.los_coll = maptools.los_collection(fnames, bounds=bounds,
processes=processes)
self.nside = self.los_coll.nside
self.pix_idx = self.los_coll.pix_idx
print 'Finding neighbors ...'
self.neighbor_idx, self.neighbor_ang_dist = find_neighbors(self.nside,
self.pix_idx,
self.n_neighbors)
# Determine difference from priors
self.delta = np.log(self.los_coll.los_delta_EBV)
self.n_pix, self.n_samples, self.n_slices = self.delta.shape
print 'Calculating priors ...'
ln_Delta_EBV_prior = get_prior_ln_Delta_EBV(self.nside,
self.pix_idx,
n_regions=self.n_slices-1)
#print ''
#print 'priors:'
#print ln_Delta_EBV_prior[0, :]
for n in xrange(self.n_samples):
self.delta[:, n, :] -= ln_Delta_EBV_prior
self.delta /= 1.5 # Standardize to units of the std. dev. on the priors
#print ''
#print 'delta:'
#print self.delta
# Distance in pc to each bin
slice_dist = np.power(10., self.los_coll.los_mu_anchor/5. + 1.)
slice_dist = np.hstack([[0.], slice_dist])
self.bin_dist = 0.5 * (slice_dist[:-1] + slice_dist[1:])
# Determine physical distance of each voxel to its neighbors
# shape = (pix, neighbor, slice)
print 'Determining neighbor correlation weights ...'
self.neighbor_dist = np.einsum('ij,k->ijk', self.neighbor_ang_dist, self.bin_dist)
self.neighbor_dist = np.sqrt(self.neighbor_dist * self.neighbor_dist + dist_floor * dist_floor)
self.neighbor_corr = self.corr_of_dist(self.neighbor_dist)
#self.neighbor_corr = self.hard_sphere_corr(self.neighbor_dist, self.corr_length_core)
#print ''
#print 'dist:'
#print self.bin_dist
#print ''
#print 'corr:'
#print self.neighbor_corr
#print ''
# Set initial state
print 'Randomizing initial state ...'
self.beta = 1.
self.chain = []
self.randomize()
self.log_state()
self.update_order = np.arange(self.n_pix)
def set_temperature(self, T):
self.beta = 1. / T
def corr_of_dist(self, d):
core = np.exp(-0.5 * (d/self.corr_length_core)**2)
tail = 1. / np.cosh(d / self.corr_length_core)
return self.max_corr * ((1. - self.tail_weight) * core + self.tail_weight * tail)
def hard_sphere_corr(self, d, d_max):
return self.corr_max * (d < d_max)
def randomize(self):
self.sel_idx = np.random.randint(self.n_samples, size=self.n_pix)
def update_pixel(self, idx):
'''
Update one pixel, using a Gibbs step.
'''
n_idx = self.neighbor_idx[idx, :]
delta = self.delta[idx, :, :] # (sample, slice)
n_delta = self.delta[n_idx, self.sel_idx[n_idx], :] # (neighbor, slice)
n_corr = self.neighbor_corr[idx, :, :] # (neighbor, slice)
#print delta.shape
#print n_delta.shape
#print n_corr.shape
p = np.einsum('ij,nj->i', delta, n_delta * n_corr)
p -= np.einsum('ij,nj->i', delta*delta, n_corr)
p -= np.sum(n_delta*n_delta * n_corr)
p = np.exp(0.5 * self.beta * p)
#p = np.exp(0.5 * np.einsum('ij,nj->i', delta, n_delta * n_corr))
p /= np.sum(p)
if idx == 0:
#print delta
#print n_delta
print self.neighbor_dist[idx, :, :]
print n_corr
#print p_norm
print np.min(p), np.percentile(p, 5.), np.median(p), np.percentile(p, 95.), np.max(p)
P = np.cumsum(p)
new_sample = np.sum(P < np.random.random())
self.sel_idx[idx] = new_sample
def round_robin(self):
'''
Update all pixels in a random order and add
the resulting state to the chain.
'''
np.random.shuffle(self.update_order)
for n in self.update_order:
self.update_pixel(n)
self.log_state()
print self.sel_idx[:5]
print np.array(self.chain)[:,0]
print ''
def clear_chain(self):
self.chain = []
def log_state(self):
'''
Add the current state of the map to the chain.
'''
self.chain.append(self.sel_idx.copy())
def save_resampled(self, fname, n_samples=None):
'''
Save the resampled map to an HDF5 file, with
one dataset containing the map samples, and
another dataset containing the pixel locations.
'''
n_chain = len(self.chain)
if n_samples == None:
n_samples = n_chain
elif n_samples > n_chain:
n_samples = n_chain
# Pick a set of samples to return
chain_idx = np.arange(n_chain)
#np.random.shuffle(chain_idx)
#chain_idx = chain_idx[:n_samples]
print chain_idx.shape
# Translate chain sample indices to pixel sample indices
sample_idx = np.array(self.chain)[chain_idx]
print sample_idx.shape
# Create a data cube with the chosen samples
# (sample, pixel, slice)
data = np.empty((n_chain, self.n_pix, self.n_slices), dtype='f8')
m = np.arange(self.n_pix)
print data.shape
print self.los_coll.los_delta_EBV.shape
for n,idx in enumerate(sample_idx):
data[n, :, :] = self.los_coll.los_delta_EBV[m, idx, :]
# Store locations to a record array
loc = np.empty(self.n_pix, dtype=[('nside', 'i4'), ('pix_idx', 'i8')])
loc['nside'][:] = self.nside
loc['pix_idx'][:] = self.pix_idx
# Write to file
f = h5py.File(fname, 'w')
dset = f.create_dataset('/Delta_EBV', data.shape, 'f4',
chunks=(2, data.shape[1], data.shape[2]),
compression='gzip',
compression_opts=9)
dset[:,:,:] = data[:,:,:]
dset.attrs['DM_min'] = self.los_coll.DM_min
dset.attrs['DM_max'] = self.los_coll.DM_max
dset = f.create_dataset('/location', loc.shape, loc.dtype,
compression='gzip', compression_opts=9)
dset[:] = loc[:]
f.close()
def test_map_resampler():
n_steps = 1000
n_neighbors = 12
processes = 4
bounds = [60., 80., -5., 5.]
import glob
fnames = glob.glob('/n/fink1/ggreen/bayestar/output/l70/l70.*.h5')
resampler = map_resampler(fnames, bounds=bounds,
processes=processes,
n_neighbors=n_neighbors)
print 'Resampling map ...'
for n in xrange(n_steps):
print 'step %d' % n
resampler.round_robin()
outfname = '/n/home09/ggreen/projects/bayestar/output/resample_test_4.h5'
resampler.save_resampled(outfname)
def test_plot_resampled_map():
infname = '/n/home09/ggreen/projects/bayestar/output/resample_test_4.h5'
plot_fname = '/nfs_pan1/www/ggreen/maps/l70/resampled_4'
size = (2000, 2000)
# Load in chain
f = h5py.File(infname, 'r')
loc = f['/location'][:]
chain = f['/Delta_EBV'][:,:,:] # (sample, pixel, slice)
DM_min = f['/Delta_EBV'].attrs['DM_min']
DM_max = f['/Delta_EBV'].attrs['DM_max']
f.close()
nside = loc[:]['nside']
pix_idx = loc[:]['pix_idx']
# Rasterize each sample and plot
import matplotlib.pyplot as plt
for n,sample in enumerate(chain):
print 'Plotting sample %d ...' % n
pix_val = np.sum(sample[:, :12], axis=1)
nside_max, pix_idx_exp, pix_val_exp = maptools.reduce_to_single_res(pix_idx,
nside,
pix_val)
img, bounds, xy_bounds = hputils.rasterize_map(pix_idx_exp, pix_val_exp,
nside_max, size)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.imshow(img.T, extent=bounds, origin='lower', aspect='auto',
interpolation='nearest')
fname = '%s.%.5d.png' % (plot_fname, n)
fig.savefig(fname, dpi=300)
plt.close(fig)
del img
print 'Done.'
def main():
#test_gc_dist()
#test_find_neighbors_adaptive_res()
test_map_resampler()
test_plot_resampled_map()
return 0
if __name__ == '__main__':
main()
| gpl-2.0 |
davidcdupuis/psf | DeepLearning/model-example-2/nn.py | 1 | 11179 | # coding: utf-8
# # San Francisco Crime prediction
# # Based on 2 layer neural net and count featurizer
# In[1]:
import pandas as pd
import numpy as np
from datetime import datetime
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
import matplotlib.pylab as plt
from sklearn.tree import DecisionTreeClassifier
from sklearn import preprocessing
from sklearn.metrics import log_loss
from sklearn.metrics import make_scorer
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.linear_model import LogisticRegression
from matplotlib.colors import LogNorm
from sklearn.decomposition import PCA
# from keras.layers.advanced_activations import PReLU
# from keras.layers.core import Dense, Dropout, Activation
# from keras.layers.normalization import BatchNormalization
# from keras.models import Sequential
# from keras.utils import np_utils
from copy import deepcopy
# %matplotlib inline
# Import data
# In[2]:
trainDF=pd.read_csv("../input/train.csv")
# Clean up wrong X and Y values (very few of them)
# In[3]:
xy_scaler=preprocessing.StandardScaler()
xy_scaler.fit(trainDF[["X","Y"]])
trainDF[["X","Y"]]=xy_scaler.transform(trainDF[["X","Y"]])
trainDF=trainDF[abs(trainDF["Y"])<100]
trainDF.index=range(len(trainDF))
# plt.plot(trainDF["X"],trainDF["Y"],'.')
# plt.show()
# Make plots for each crime label
# In[4]:
# NX=100
# NY=100
# groups = trainDF.groupby('Category')
# ii=1
# plt.figure(figsize=(20, 20))
# for name, group in groups:
# plt.subplot(8,5,ii)
# histo, xedges, yedges = np.histogram2d(np.array(group.X),np.array(group.Y), bins=(NX,NY))
# myextent =[xedges[0],xedges[-1],yedges[0],yedges[-1]]
# plt.imshow(histo.T,origin='low',extent=myextent,interpolation='nearest',aspect='auto',norm=LogNorm())
# plt.title(name)
# # plt.figure(ii)
# # plt.plot(group.X,group.Y,'.')
# ii+=1
# del groups
# # Now proceed as before
# In[5]:
def parse_time(x):
DD=datetime.strptime(x,"%Y-%m-%d %H:%M:%S")
time=DD.hour#*60+DD.minute
day=DD.day
month=DD.month
year=DD.year
return time,day,month,year
def get_season(x):
summer=0
fall=0
winter=0
spring=0
if (x in [5, 6, 7]):
summer=1
if (x in [8, 9, 10]):
fall=1
if (x in [11, 0, 1]):
winter=1
if (x in [2, 3, 4]):
spring=1
return summer, fall, winter, spring
# In[6]:
def parse_data(df,logodds,logoddsPA):
feature_list=df.columns.tolist()
if "Descript" in feature_list:
feature_list.remove("Descript")
if "Resolution" in feature_list:
feature_list.remove("Resolution")
if "Category" in feature_list:
feature_list.remove("Category")
if "Id" in feature_list:
feature_list.remove("Id")
cleanData=df[feature_list]
cleanData.index=range(len(df))
print("Creating address features")
address_features=cleanData["Address"].apply(lambda x: logodds[x])
address_features.columns=["logodds"+str(x) for x in range(len(address_features.columns))]
print("Parsing dates")
cleanData["Time"], cleanData["Day"], cleanData["Month"], cleanData["Year"]=zip(*cleanData["Dates"].apply(parse_time))
# dummy_ranks_DAY = pd.get_dummies(cleanData['DayOfWeek'], prefix='DAY')
days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
# cleanData["DayOfWeek"]=cleanData["DayOfWeek"].apply(lambda x: days.index(x)/float(len(days)))
print("Creating one-hot variables")
dummy_ranks_PD = pd.get_dummies(cleanData['PdDistrict'], prefix='PD')
dummy_ranks_DAY = pd.get_dummies(cleanData["DayOfWeek"], prefix='DAY')
cleanData["IsInterection"]=cleanData["Address"].apply(lambda x: 1 if "/" in x else 0)
cleanData["logoddsPA"]=cleanData["Address"].apply(lambda x: logoddsPA[x])
print("droping processed columns")
cleanData=cleanData.drop("PdDistrict",axis=1)
cleanData=cleanData.drop("DayOfWeek",axis=1)
cleanData=cleanData.drop("Address",axis=1)
cleanData=cleanData.drop("Dates",axis=1)
feature_list=cleanData.columns.tolist()
print("joining one-hot features")
features = cleanData[feature_list].join(dummy_ranks_PD.ix[:,:]).join(dummy_ranks_DAY.ix[:,:]).join(address_features.ix[:,:])
print("creating new features")
features["IsDup"]=pd.Series(features.duplicated()|features.duplicated(take_last=True)).apply(int)
features["Awake"]=features["Time"].apply(lambda x: 1 if (x==0 or (x>=8 and x<=23)) else 0)
features["Summer"], features["Fall"], features["Winter"], features["Spring"]=zip(*features["Month"].apply(get_season))
if "Category" in df.columns:
labels = df["Category"].astype('category')
# label_names=labels.unique()
# labels=labels.cat.rename_categories(range(len(label_names)))
else:
labels=None
return features,labels
# This part is slower than it needs to be.
# In[7]:
addresses=sorted(trainDF["Address"].unique())
categories=sorted(trainDF["Category"].unique())
C_counts=trainDF.groupby(["Category"]).size()
A_C_counts=trainDF.groupby(["Address","Category"]).size()
A_counts=trainDF.groupby(["Address"]).size()
logodds={}
logoddsPA={}
MIN_CAT_COUNTS=2
default_logodds=np.log(C_counts/len(trainDF))-np.log(1.0-C_counts/float(len(trainDF)))
for addr in addresses:
PA=A_counts[addr]/float(len(trainDF))
logoddsPA[addr]=np.log(PA)-np.log(1.-PA)
logodds[addr]=deepcopy(default_logodds)
for cat in A_C_counts[addr].keys():
if (A_C_counts[addr][cat]>MIN_CAT_COUNTS) and A_C_counts[addr][cat]<A_counts[addr]:
PA=A_C_counts[addr][cat]/float(A_counts[addr])
logodds[addr][categories.index(cat)]=np.log(PA)-np.log(1.0-PA)
logodds[addr]=pd.Series(logodds[addr])
logodds[addr].index=range(len(categories))
# In[8]:
features, labels=parse_data(trainDF,logodds,logoddsPA)
# In[9]:
print(features.columns.tolist())
print(len(features.columns))
# In[10]:
# num_feature_list=["Time","Day","Month","Year","DayOfWeek"]
collist=features.columns.tolist()
scaler = preprocessing.StandardScaler()
scaler.fit(features)
features[collist]=scaler.transform(features)
# In[11]:
new_PCA=PCA(n_components=60)
new_PCA.fit(features)
# plt.plot(new_PCA.explained_variance_ratio_)
# plt.yscale('log')
# plt.title("PCA explained ratio of features")
print(new_PCA.explained_variance_ratio_)
# In[12]:
# plt.plot(new_PCA.explained_variance_ratio_.cumsum())
# plt.title("cumsum of PCA explained ratio")
# PCA is interesting, here to play with it more
# In[13]:
# features=new_PCA.transform(features)
# features=pd.DataFrame(features)
# In[22]:
sss = StratifiedShuffleSplit(labels, train_size=0.5)
for train_index, test_index in sss:
features_train,features_test=features.iloc[train_index],features.iloc[test_index]
labels_train,labels_test=labels[train_index],labels[test_index]
features_test.index=range(len(features_test))
features_train.index=range(len(features_train))
labels_train.index=range(len(labels_train))
labels_test.index=range(len(labels_test))
features.index=range(len(features))
labels.index=range(len(labels))
# In[15]:
# def build_and_fit_model(X_train,y_train,X_test=None,y_test=None,hn=32,dp=0.5,layers=1,epochs=1,batches=64,verbose=0):
# input_dim=X_train.shape[1]
# output_dim=len(labels_train.unique())
# Y_train=np_utils.to_categorical(y_train.cat.rename_categories(range(len(y_train.unique()))))
# # print output_dim
# model = Sequential()
# model.add(Dense(input_dim, hn, init='glorot_uniform'))
# model.add(PReLU((hn,)))
# model.add(Dropout(dp))
# for i in range(layers):
# model.add(Dense(hn, hn, init='glorot_uniform'))
# model.add(PReLU((hn,)))
# model.add(BatchNormalization((hn,)))
# model.add(Dropout(dp))
# model.add(Dense(hn, output_dim, init='glorot_uniform'))
# model.add(Activation('softmax'))
# model.compile(loss='categorical_crossentropy', optimizer='adam')
# if X_test is not None:
# Y_test=np_utils.to_categorical(y_test.cat.rename_categories(range(len(y_test.unique()))))
# fitting=model.fit(X_train, Y_train, nb_epoch=epochs, batch_size=batches,verbose=verbose,validation_data=(X_test,Y_test))
# test_score = log_loss(y_test, model.predict_proba(X_test,verbose=0))
# else:
# model.fit(X_train, Y_train, nb_epoch=epochs, batch_size=batches,verbose=verbose)
# fitting=0
# test_score = 0
# return test_score, fitting, model
# In[16]:
len(features.columns)
# In[17]:
# N_EPOCHS=20
# N_HN=128
# N_LAYERS=1
# DP=0.5
# In[18]:
# score, fitting, model = build_and_fit_model(features_train.as_matrix(),labels_train,X_test=features_test.as_matrix(),y_test=labels_test,hn=N_HN,layers=N_LAYERS,epochs=N_EPOCHS,verbose=2,dp=DP)
model = LogisticRegression()
model.fit(features_train,labels_train)
# In[24]:
print("all", log_loss(labels, model.predict_proba(features.as_matrix())))
print("train", log_loss(labels_train, model.predict_proba(features_train.as_matrix())))
print("test", log_loss(labels_test, model.predict_proba(features_test.as_matrix())))
# In[28]:
# plt.plot(fitting.history['val_loss'],label="validation")
# plt.plot(fitting.history['loss'],label="train")
# # plt.xscale('log')
# plt.legend()
# Now train the final model
# In[29]:
# score, fitting, model = build_and_fit_model(features.as_matrix(),labels,hn=N_HN,layers=N_LAYERS,epochs=N_EPOCHS,verbose=2,dp=DP)
model.fit(features,labels)
# In[30]:
print("all", log_loss(labels, model.predict_proba(features.as_matrix())))
print("train", log_loss(labels_train, model.predict_proba(features_train.as_matrix())))
print("test", log_loss(labels_test, model.predict_proba(features_test.as_matrix())))
# In[31]:
testDF=pd.read_csv("../../test.csv")
testDF[["X","Y"]]=xy_scaler.transform(testDF[["X","Y"]])
#set outliers to 0
testDF["X"]=testDF["X"].apply(lambda x: 0 if abs(x)>5 else x)
testDF["Y"]=testDF["Y"].apply(lambda y: 0 if abs(y)>5 else y)
# In[32]:
new_addresses=sorted(testDF["Address"].unique())
new_A_counts=testDF.groupby("Address").size()
only_new=set(new_addresses+addresses)-set(addresses)
only_old=set(new_addresses+addresses)-set(new_addresses)
in_both=set(new_addresses).intersection(addresses)
for addr in only_new:
PA=new_A_counts[addr]/float(len(testDF)+len(trainDF))
logoddsPA[addr]=np.log(PA)-np.log(1.-PA)
logodds[addr]=deepcopy(default_logodds)
logodds[addr].index=range(len(categories))
for addr in in_both:
PA=(A_counts[addr]+new_A_counts[addr])/float(len(testDF)+len(trainDF))
logoddsPA[addr]=np.log(PA)-np.log(1.-PA)
# In[33]:
features_sub, _=parse_data(testDF,logodds,logoddsPA)
# scaler.fit(features_test)
# In[34]:
collist=features_sub.columns.tolist()
print(collist)
# In[35]:
features_sub[collist]=scaler.transform(features_sub[collist])
# In[36]:
predDF=pd.DataFrame(model.predict_proba(features_sub.as_matrix()),columns=sorted(labels.unique()))
# In[37]:
predDF.head()
# In[38]:
import gzip
with gzip.GzipFile('submission.csv.gz',mode='w',compresslevel=9) as gzfile:
predDF.to_csv(gzfile,index_label="Id",na_rep="0") | apache-2.0 |
mwv/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 130 | 22974 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-6).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:3])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd')
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
clf = RidgeClassifier(class_weight='balanced')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='balanced')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for clf in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = clf()
clf1.fit(iris.data, iris.target)
clf2 = clf(class_weight='balanced')
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
cv = KFold(n_samples, 5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
| bsd-3-clause |
ugaliguy/Intro-to-NLP | Assignment3/A.py | 1 | 2989 | from main import replace_accented
from sklearn import svm
from sklearn import neighbors
# don't change the window size
window_size = 10
# A.1
def build_s(data):
'''
Compute the context vector for each lexelt
:param data: dic with the following structure:
{
lexelt: [(instance_id, left_context, head, right_context, sense_id), ...],
...
}
:return: dic s with the following structure:
{
lexelt: [w1,w2,w3, ...],
...
}
'''
s = {}
# implement your code here
return s
# A.1
def vectorize(data, s):
'''
:param data: list of instances for a given lexelt with the following structure:
{
[(instance_id, left_context, head, right_context, sense_id), ...]
}
:param s: list of words (features) for a given lexelt: [w1,w2,w3, ...]
:return: vectors: A dictionary with the following structure
{ instance_id: [w_1 count, w_2 count, ...],
...
}
labels: A dictionary with the following structure
{ instance_id : sense_id }
'''
vectors = {}
labels = {}
# implement your code here
return vectors, labels
# A.2
def classify(X_train, X_test, y_train):
'''
Train two classifiers on (X_train, and y_train) then predict X_test labels
:param X_train: A dictionary with the following structure
{ instance_id: [w_1 count, w_2 count, ...],
...
}
:param X_test: A dictionary with the following structure
{ instance_id: [w_1 count, w_2 count, ...],
...
}
:param y_train: A dictionary with the following structure
{ instance_id : sense_id }
:return: svm_results: a list of tuples (instance_id, label) where labels are predicted by LinearSVC
knn_results: a list of tuples (instance_id, label) where labels are predicted by KNeighborsClassifier
'''
svm_results = []
knn_results = []
svm_clf = svm.LinearSVC()
knn_clf = neighbors.KNeighborsClassifier()
# implement your code here
return svm_results, knn_results
# A.3, A.4 output
def print_results(results ,output_file):
'''
:param results: A dictionary with key = lexelt and value = a list of tuples (instance_id, label)
:param output_file: file to write output
'''
# implement your code here
# don't forget to remove the accent of characters using main.replace_accented(input_str)
# you should sort results on instance_id before printing
# run part A
def run(train, test, language, knn_file, svm_file):
s = build_s(train)
svm_results = {}
knn_results = {}
for lexelt in s:
X_train, y_train = vectorize(train[lexelt], s[lexelt])
X_test, _ = vectorize(test[lexelt], s[lexelt])
svm_results[lexelt], knn_results[lexelt] = classify(X_train, X_test, y_train)
print_results(svm_results, svm_file)
print_results(knn_results, knn_file)
| mit |
posterior/treecat | treecat/e2e_test.py | 1 | 3047 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from warnings import warn
import matplotlib
import pytest
from treecat.format import guess_schema
from treecat.format import load_data
from treecat.format import load_schema
from treecat.serving import serve_model
from treecat.tables import TY_MULTINOMIAL
from treecat.tables import Table
from treecat.testutil import TESTDATA
from treecat.testutil import TINY_CONFIG
from treecat.testutil import tempdir
from treecat.training import train_ensemble
from treecat.training import train_model
# The Agg backend is required for headless testing.
matplotlib.use('Agg')
from treecat.plotting import plot_circular # noqa: E402 isort:skip
@pytest.mark.parametrize('model_type', ['single', 'ensemble'])
def test_e2e(model_type):
with tempdir() as dirname:
data_csv = os.path.join(TESTDATA, 'tiny_data.csv')
config = TINY_CONFIG.copy()
print('Guess schema.')
types_csv = os.path.join(dirname, 'types.csv')
values_csv = os.path.join(dirname, 'values.csv')
guess_schema(data_csv, types_csv, values_csv)
print('Load schema')
groups_csv = os.path.join(TESTDATA, 'tiny_groups.csv')
schema = load_schema(types_csv, values_csv, groups_csv)
ragged_index = schema['ragged_index']
tree_prior = schema['tree_prior']
print('Load data')
data = load_data(schema, data_csv)
feature_types = [TY_MULTINOMIAL] * len(schema['feature_names'])
table = Table(feature_types, ragged_index, data)
dataset = {
'schema': schema,
'table': table,
}
print('Train model')
if model_type == 'single':
model = train_model(table, tree_prior, config)
elif model_type == 'ensemble':
model = train_ensemble(table, tree_prior, config)
else:
raise ValueError(model_type)
print('Serve model')
server = serve_model(dataset, model)
print('Query model')
evidence = {'genre': 'drama'}
server.logprob([evidence])
samples = server.sample(100)
server.logprob(samples)
samples = server.sample(100, evidence)
server.logprob(samples)
try:
median = server.median([evidence])
server.logprob(median)
except NotImplementedError:
warn('{} median not implemented'.format(model_type))
pass
try:
mode = server.mode([evidence])
server.logprob(mode)
except NotImplementedError:
warn('{} mode not implemented'.format(model_type))
pass
print('Examine latent structure')
server.feature_density()
server.observed_perplexity()
server.latent_perplexity()
server.latent_correlation()
server.estimate_tree()
server.sample_tree(10)
print('Plotting latent structure')
plot_circular(server)
| apache-2.0 |
laddng/LiPlate | modules/Plate.py | 1 | 5432 | import cv2;
import numpy as np;
import logging;
import pytesseract as tes;
from PIL import Image;
from modules.TrainingCharacter import *;
from matplotlib import pyplot as plt;
from copy import deepcopy, copy;
from logging.config import fileConfig;
# logger setup
fileConfig("logging_config.ini");
logger = logging.getLogger();
class Plate:
""" Class for the license plates """
def __init__(self, image): ### Plate Class Vars ###
self.original_image = image; # original image of analysis
self.plate_located_image = deepcopy(image); # original image with plate hilighted
self.plate_image = None; # license plate cropped
self.plate_image_char = None; # license plate cropped, chars outlined
self.gray_image = None; # original image - grayscale for analysis
self.plate_number = ""; # plate number
self.roi = []; # regions of interest for plates
self.plate_characters = []; # cropped images of characters on plate
logger.info("New plate created.");
""" Converts original image to grayscale for analysis """
def grayImage(self, image):
logger.info("Image converted to grayscale");
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY);
""" Algorithm to find plate and read characters """
def plateSearch(self, characters_array):
self.findContour();
self.cropPlate();
if self.plate_image is not None:
self.readPlateNumber(characters_array);
self.showResults();
return True;
""" Searches for a contour that looks like a license plate
in the image of a car """
def findContour(self):
self.gray_image = self.grayImage(deepcopy(self.original_image));
self.gray_image = cv2.medianBlur(self.gray_image, 5);
self.gray_image = cv2.adaptiveThreshold(self.gray_image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 43,2);
_,contours,_ = cv2.findContours(self.gray_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE);
w,h,x,y = 0,0,0,0;
for contour in contours:
area = cv2.contourArea(contour);
# rough range of areas of a license plate
if area > 6000 and area < 40000:
[x,y,w,h] = cv2.boundingRect(contour);
# rough dimensions of a license plate
if w > 100 and w < 200 and h > 60 and h < 100:
self.roi.append([x,y,w,h]);
cv2.rectangle(self.plate_located_image, (x,y), (x+w, y+h), (0,255,0), 10);
logger.info("%s potential plates found.", str(len(self.roi)));
return True;
""" If a license plate contour has been found, crop
out the contour and create a new image """
def cropPlate(self):
if len(self.roi) > 1:
[x,y,w,h] = self.roi[0];
self.plate_image = self.original_image[y:y+h,x:x+w];
self.plate_image_char = deepcopy(self.plate_image);
return True;
""" Subalgorithm to read the license plate number using the
cropped image of a license plate """
def readPlateNumber(self, characters_array):
self.findCharacterContour();
self.tesseractCharacter();
return True;
""" Crops individual characters out of a plate image
and converts it to grayscale for comparison """
def cropCharacter(self, dimensions):
[x,y,w,h] = dimensions;
character = deepcopy(self.plate_image);
character = deepcopy(character[y:y+h,x:x+w]);
return character;
""" Finds contours in the cropped image of a license plate
that fit the dimension range of a letter or number """
def findCharacterContour(self):
gray_plate = self.grayImage(deepcopy(self.plate_image));
gray_plate = cv2.GaussianBlur(gray_plate, (3,3), 0);
_,threshold = cv2.threshold(gray_plate, 140, 255, 0);
_,contours,_ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE);
w,h,x,y = 0,0,0,0;
logger.info("%s contours found.", str(len(contours)));
for contour in contours:
area = cv2.contourArea(contour);
# rough range of areas of a plate number
if area > 120 and area < 2000:
[x,y,w,h] = cv2.boundingRect(contour);
# rough dimensions of a character
if h > 20 and h < 90 and w > 10 and w < 50:
character = self.cropCharacter([x,y,w,h]);
self.plate_characters.append([x, character]);
cv2.rectangle(self.plate_image_char, (x,y), (x+w, y+h), (0,0,255), 1);
logger.info("%s plate characters found", str(len(self.plate_characters)));
return True;
""" Tesseract: reads the character using the Tesseract libary """
def tesseractCharacter(self):
self.plate_characters = sorted(self.plate_characters, key=lambda x: x[0]); # sort contours left to right
for character in self.plate_characters[:8]: # only first 8 contours
char_image = Image.fromarray(character[1]);
char = tes.image_to_string(char_image, config='-psm 10');
self.plate_number += char.upper();
return True;
""" Subplot generator for images """
def plot(self, figure, subplot, image, title):
figure.subplot(subplot);
figure.imshow(image);
figure.xlabel(title);
figure.xticks([]);
figure.yticks([]);
return True;
""" Show our results """
def showResults(self):
plt.figure(self.plate_number);
self.plot(plt, 321, self.original_image, "Original image");
self.plot(plt, 322, self.gray_image, "Threshold image");
self.plot(plt, 323, self.plate_located_image, "Plate located");
if self.plate_image is not None:
self.plot(plt, 324, self.plate_image, "License plate");
self.plot(plt, 325, self.plate_image_char, "Characters outlined");
plt.subplot(326);plt.text(0,0,self.plate_number, fontsize=30);plt.xticks([]);plt.yticks([]);
plt.tight_layout();
plt.show();
return True;
| mit |
h2educ/scikit-learn | sklearn/cluster/tests/test_birch.py | 342 | 5603 | """
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
| bsd-3-clause |
michaelaye/scikit-image | doc/examples/plot_ssim.py | 15 | 2238 | """
===========================
Structural similarity index
===========================
When comparing images, the mean squared error (MSE)--while simple to
implement--is not highly indicative of perceived similarity. Structural
similarity aims to address this shortcoming by taking texture into account
[1]_, [2]_.
The example shows two modifications of the input image, each with the same MSE,
but with very different mean structural similarity indices.
.. [1] Zhou Wang; Bovik, A.C.; ,"Mean squared error: Love it or leave it? A new
look at Signal Fidelity Measures," Signal Processing Magazine, IEEE,
vol. 26, no. 1, pp. 98-117, Jan. 2009.
.. [2] Z. Wang, A. C. Bovik, H. R. Sheikh and E. P. Simoncelli, "Image quality
assessment: From error visibility to structural similarity," IEEE
Transactions on Image Processing, vol. 13, no. 4, pp. 600-612,
Apr. 2004.
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from skimage import data, img_as_float
from skimage.measure import structural_similarity as ssim
matplotlib.rcParams['font.size'] = 9
img = img_as_float(data.camera())
rows, cols = img.shape
noise = np.ones_like(img) * 0.2 * (img.max() - img.min())
noise[np.random.random(size=noise.shape) > 0.5] *= -1
def mse(x, y):
return np.linalg.norm(x - y)
img_noise = img + noise
img_const = img + abs(noise)
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(8, 4))
mse_none = mse(img, img)
ssim_none = ssim(img, img, dynamic_range=img.max() - img.min())
mse_noise = mse(img, img_noise)
ssim_noise = ssim(img, img_noise, dynamic_range=img_const.max() - img_const.min())
mse_const = mse(img, img_const)
ssim_const = ssim(img, img_const, dynamic_range=img_noise.max() - img_noise.min())
label = 'MSE: %2.f, SSIM: %.2f'
ax0.imshow(img, cmap=plt.cm.gray, vmin=0, vmax=1)
ax0.set_xlabel(label % (mse_none, ssim_none))
ax0.set_title('Original image')
ax1.imshow(img_noise, cmap=plt.cm.gray, vmin=0, vmax=1)
ax1.set_xlabel(label % (mse_noise, ssim_noise))
ax1.set_title('Image with noise')
ax2.imshow(img_const, cmap=plt.cm.gray, vmin=0, vmax=1)
ax2.set_xlabel(label % (mse_const, ssim_const))
ax2.set_title('Image plus constant')
plt.show()
| bsd-3-clause |
terkkila/scikit-learn | sklearn/tree/tests/test_export.py | 76 | 9318 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=1,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="X[1] <= -1.5\\nsamples = 3\\n' \
'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \
'[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \
'1 -> 2 ;\n' \
'3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \
'[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \
'1 -> 3 ;\n' \
'4 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 4 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \
'[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \
'4 -> 5 ;\n' \
'6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'4 -> 6 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=1,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e581397f"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
| bsd-3-clause |
ucloud/uai-sdk | examples/mxnet/insightface/train/code/train_softmax_dist.py | 1 | 26097 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import math
import random
import logging
import pickle
import numpy as np
from image_iter import FaceImageIter
from image_iter import FaceImageIterList
import mxnet as mx
from mxnet import ndarray as nd
import argparse
import mxnet.optimizer as optimizer
sys.path.append(os.path.join(os.path.dirname(__file__), 'common'))
import face_image
sys.path.append(os.path.join(os.path.dirname(__file__), 'eval'))
sys.path.append(os.path.join(os.path.dirname(__file__), 'symbols'))
import fresnet
import finception_resnet_v2
import fmobilenet
import fmobilenetv2
import fmobilefacenet
import fxception
import fdensenet
import fdpn
import fnasnet
import spherenet
import verification
import sklearn
#sys.path.append(os.path.join(os.path.dirname(__file__), 'losses'))
#import center_loss
logger = logging.getLogger()
logger.setLevel(logging.INFO)
args = None
class AccMetric(mx.metric.EvalMetric):
def __init__(self):
self.axis = 1
super(AccMetric, self).__init__(
'acc', axis=self.axis,
output_names=None, label_names=None)
self.losses = []
self.count = 0
def update(self, labels, preds):
self.count+=1
label = labels[0]
pred_label = preds[1]
if pred_label.shape != label.shape:
pred_label = mx.ndarray.argmax(pred_label, axis=self.axis)
pred_label = pred_label.asnumpy().astype('int32').flatten()
label = label.asnumpy()
if label.ndim==2:
label = label[:,0]
label = label.astype('int32').flatten()
assert label.shape==pred_label.shape
self.sum_metric += (pred_label.flat == label.flat).sum()
self.num_inst += len(pred_label.flat)
class LossValueMetric(mx.metric.EvalMetric):
def __init__(self):
self.axis = 1
super(LossValueMetric, self).__init__(
'lossvalue', axis=self.axis,
output_names=None, label_names=None)
self.losses = []
def update(self, labels, preds):
loss = preds[-1].asnumpy()[0]
self.sum_metric += loss
self.num_inst += 1.0
gt_label = preds[-2].asnumpy()
#print(gt_label)
def parse_args():
parser = argparse.ArgumentParser(description='Train face network')
# UAI Related
parser.add_argument('--work_dir', type=str, default="/data", help='Default work path')
parser.add_argument('--output_dir', type=str, default="/data/output", help="Default output path")
parser.add_argument('--log_dir', type=str, default="/data/output", help="Default log path")
parser.add_argument('--num_gpus', type=int, help="Num of avaliable gpus")
parser.add_argument('--data_dir', default='/data/data/', help='training set directory')
# Dist Train
parser.add_argument('--kv-store', type=str, default='device', help='key-value store type')
parser.add_argument('--gc-type', type=str, default='none',
help='type of gradient compression to use, takes `2bit` or `none` for now')
parser.add_argument('--optimizer', type=str, default='sgd', help='the optimizer type')
# general
parser.add_argument('--prefix', default='../model/model', help='directory to save model.')
parser.add_argument('--pretrained', default='', help='pretrained model to load')
parser.add_argument('--ckpt', type=int, default=1, help='checkpoint saving option. 0: discard saving. 1: save when necessary. 2: always save')
parser.add_argument('--loss-type', type=int, default=4, help='loss type')
parser.add_argument('--verbose', type=int, default=2000, help='do verification testing every verbose batches')
parser.add_argument('--max-steps', type=int, default=0, help='max training batches')
parser.add_argument('--end-epoch', type=int, default=100000, help='training epoch size.')
parser.add_argument('--network', default='r50', help='specify network')
parser.add_argument('--image-size', default='112,112', help='specify input image height and width')
parser.add_argument('--version-se', type=int, default=0, help='whether to use se in network')
parser.add_argument('--version-input', type=int, default=1, help='network input config')
parser.add_argument('--version-output', type=str, default='E', help='network embedding output config')
parser.add_argument('--version-unit', type=int, default=3, help='resnet unit config')
parser.add_argument('--version-multiplier', type=float, default=1.0, help='filters multiplier')
parser.add_argument('--version-act', type=str, default='prelu', help='network activation config')
parser.add_argument('--use-deformable', type=int, default=0, help='use deformable cnn in network')
parser.add_argument('--lr', type=float, default=0.1, help='start learning rate')
parser.add_argument('--lr-steps', type=str, default='', help='steps of lr changing')
parser.add_argument('--wd', type=float, default=0.0005, help='weight decay')
parser.add_argument('--fc7-wd-mult', type=float, default=1.0, help='weight decay mult for fc7')
parser.add_argument('--fc7-lr-mult', type=float, default=1.0, help='lr mult for fc7')
parser.add_argument("--fc7-no-bias", default=False, action="store_true" , help="fc7 no bias flag")
parser.add_argument('--bn-mom', type=float, default=0.9, help='bn mom')
parser.add_argument('--mom', type=float, default=0.9, help='momentum')
parser.add_argument('--emb-size', type=int, default=512, help='embedding length')
parser.add_argument('--per-batch-size', type=int, default=128, help='batch size in each context')
parser.add_argument('--margin-m', type=float, default=0.5, help='margin for loss')
parser.add_argument('--margin-s', type=float, default=64.0, help='scale for feature')
parser.add_argument('--margin-a', type=float, default=1.0, help='')
parser.add_argument('--margin-b', type=float, default=0.0, help='')
parser.add_argument('--easy-margin', type=int, default=0, help='')
parser.add_argument('--margin', type=int, default=4, help='margin for sphere')
parser.add_argument('--beta', type=float, default=1000., help='param for sphere')
parser.add_argument('--beta-min', type=float, default=5., help='param for sphere')
parser.add_argument('--power', type=float, default=1.0, help='param for sphere')
parser.add_argument('--scale', type=float, default=0.9993, help='param for sphere')
parser.add_argument('--rand-mirror', type=int, default=1, help='if do random mirror in training')
parser.add_argument('--cutoff', type=int, default=0, help='cut off aug')
parser.add_argument('--color', type=int, default=0, help='color jittering aug')
parser.add_argument('--images-filter', type=int, default=0, help='minimum images per identity filter')
parser.add_argument('--target', type=str, default='lfw,cfp_fp,agedb_30', help='verification targets')
parser.add_argument('--ce-loss', default=False, action='store_true', help='if output ce loss')
args = parser.parse_args()
return args
def get_symbol(args, arg_params, aux_params):
data_shape = (args.image_channel,args.image_h,args.image_w)
image_shape = ",".join([str(x) for x in data_shape])
margin_symbols = []
if args.network[0]=='d':
embedding = fdensenet.get_symbol(args.emb_size, args.num_layers,
version_se=args.version_se, version_input=args.version_input,
version_output=args.version_output, version_unit=args.version_unit)
elif args.network[0]=='m':
print('init mobilenet', args.num_layers)
if args.num_layers==1:
embedding = fmobilenet.get_symbol(args.emb_size,
version_input=args.version_input,
version_output=args.version_output,
version_multiplier = args.version_multiplier)
else:
embedding = fmobilenetv2.get_symbol(args.emb_size)
elif args.network[0]=='i':
print('init inception-resnet-v2', args.num_layers)
embedding = finception_resnet_v2.get_symbol(args.emb_size,
version_se=args.version_se, version_input=args.version_input,
version_output=args.version_output, version_unit=args.version_unit)
elif args.network[0]=='x':
print('init xception', args.num_layers)
embedding = fxception.get_symbol(args.emb_size,
version_se=args.version_se, version_input=args.version_input,
version_output=args.version_output, version_unit=args.version_unit)
elif args.network[0]=='p':
print('init dpn', args.num_layers)
embedding = fdpn.get_symbol(args.emb_size, args.num_layers,
version_se=args.version_se, version_input=args.version_input,
version_output=args.version_output, version_unit=args.version_unit)
elif args.network[0]=='n':
print('init nasnet', args.num_layers)
embedding = fnasnet.get_symbol(args.emb_size)
elif args.network[0]=='s':
print('init spherenet', args.num_layers)
embedding = spherenet.get_symbol(args.emb_size, args.num_layers)
elif args.network[0]=='y':
print('init mobilefacenet', args.num_layers)
embedding = fmobilefacenet.get_symbol(args.emb_size, bn_mom = args.bn_mom, version_output=args.version_output)
else:
print('init resnet', args.num_layers)
embedding = fresnet.get_symbol(args.emb_size, args.num_layers,
version_se=args.version_se, version_input=args.version_input,
version_output=args.version_output, version_unit=args.version_unit,
version_act=args.version_act)
all_label = mx.symbol.Variable('softmax_label')
gt_label = all_label
extra_loss = None
_weight = mx.symbol.Variable("fc7_weight", shape=(args.num_classes, args.emb_size), lr_mult=args.fc7_lr_mult, wd_mult=args.fc7_wd_mult)
if args.loss_type==0: #softmax
if args.fc7_no_bias:
fc7 = mx.sym.FullyConnected(data=embedding, weight = _weight, no_bias = True, num_hidden=args.num_classes, name='fc7')
else:
_bias = mx.symbol.Variable('fc7_bias', lr_mult=2.0, wd_mult=0.0)
fc7 = mx.sym.FullyConnected(data=embedding, weight = _weight, bias = _bias, num_hidden=args.num_classes, name='fc7')
elif args.loss_type==1: #sphere not suitable for dist training, exit
sys.exit(-1)
'''
_weight = mx.symbol.L2Normalization(_weight, mode='instance')
fc7 = mx.sym.LSoftmax(data=embedding, label=gt_label, num_hidden=args.num_classes,
weight = _weight,
beta=args.beta, margin=args.margin, scale=args.scale,
beta_min=args.beta_min, verbose=1000, name='fc7')
'''
elif args.loss_type==2: # CosineFace
s = args.margin_s
m = args.margin_m
assert(s>0.0)
assert(m>0.0)
_weight = mx.symbol.L2Normalization(_weight, mode='instance')
nembedding = mx.symbol.L2Normalization(embedding, mode='instance', name='fc1n')*s
fc7 = mx.sym.FullyConnected(data=nembedding, weight = _weight, no_bias = True, num_hidden=args.num_classes, name='fc7')
s_m = s*m
gt_one_hot = mx.sym.one_hot(gt_label, depth = args.num_classes, on_value = s_m, off_value = 0.0)
fc7 = fc7-gt_one_hot
elif args.loss_type==4: # ArcFace
s = args.margin_s
m = args.margin_m
assert s>0.0
assert m>=0.0
assert m<(math.pi/2)
_weight = mx.symbol.L2Normalization(_weight, mode='instance') # L2Norm(w) ||w|| = 1
nembedding = mx.symbol.L2Normalization(embedding, mode='instance', name='fc1n')*s # L2Norm(embedding) ||embedding|| = 1
fc7 = mx.sym.FullyConnected(data=nembedding, weight = _weight, no_bias = True, num_hidden=args.num_classes, name='fc7')
zy = mx.sym.pick(fc7, gt_label, axis=1)
cos_t = zy/s
cos_m = math.cos(m)
sin_m = math.sin(m)
mm = math.sin(math.pi-m)*m
#threshold = 0.0
threshold = math.cos(math.pi-m)
if args.easy_margin:
cond = mx.symbol.Activation(data=cos_t, act_type='relu')
else:
cond_v = cos_t - threshold
cond = mx.symbol.Activation(data=cond_v, act_type='relu')
body = cos_t*cos_t
body = 1.0-body
sin_t = mx.sym.sqrt(body)
new_zy = cos_t*cos_m
b = sin_t*sin_m
new_zy = new_zy - b
new_zy = new_zy*s
if args.easy_margin:
zy_keep = zy
else:
zy_keep = zy - s*mm
new_zy = mx.sym.where(cond, new_zy, zy_keep)
diff = new_zy - zy
diff = mx.sym.expand_dims(diff, 1)
gt_one_hot = mx.sym.one_hot(gt_label, depth = args.num_classes, on_value = 1.0, off_value = 0.0)
body = mx.sym.broadcast_mul(gt_one_hot, diff)
fc7 = fc7+body
elif args.loss_type==5: # Combined Margin
s = args.margin_s
m = args.margin_m
assert s>0.0
_weight = mx.symbol.L2Normalization(_weight, mode='instance')
nembedding = mx.symbol.L2Normalization(embedding, mode='instance', name='fc1n')*s
fc7 = mx.sym.FullyConnected(data=nembedding, weight = _weight, no_bias = True, num_hidden=args.num_classes, name='fc7')
if args.margin_a!=1.0 or args.margin_m!=0.0 or args.margin_b!=0.0:
if args.margin_a==1.0 and args.margin_m==0.0:
s_m = s*args.margin_b
gt_one_hot = mx.sym.one_hot(gt_label, depth = args.num_classes, on_value = s_m, off_value = 0.0)
fc7 = fc7-gt_one_hot
else:
zy = mx.sym.pick(fc7, gt_label, axis=1)
cos_t = zy/s
t = mx.sym.arccos(cos_t)
if args.margin_a!=1.0:
t = t*args.margin_a
if args.margin_m>0.0:
t = t+args.margin_m
body = mx.sym.cos(t)
if args.margin_b>0.0:
body = body - args.margin_b
new_zy = body*s
diff = new_zy - zy
diff = mx.sym.expand_dims(diff, 1)
gt_one_hot = mx.sym.one_hot(gt_label, depth = args.num_classes, on_value = 1.0, off_value = 0.0)
body = mx.sym.broadcast_mul(gt_one_hot, diff)
fc7 = fc7+body
elif args.loss_type==6:
s = args.margin_s
m = args.margin_m
assert s>0.0
assert args.margin_b>0.0
_weight = mx.symbol.L2Normalization(_weight, mode='instance')
nembedding = mx.symbol.L2Normalization(embedding, mode='instance', name='fc1n')*s
fc7 = mx.sym.FullyConnected(data=nembedding, weight = _weight, no_bias = True, num_hidden=args.num_classes, name='fc7')
zy = mx.sym.pick(fc7, gt_label, axis=1)
cos_t = zy/s
t = mx.sym.arccos(cos_t)
intra_loss = t/np.pi
intra_loss = mx.sym.mean(intra_loss)
#intra_loss = mx.sym.exp(cos_t*-1.0)
intra_loss = mx.sym.MakeLoss(intra_loss, name='intra_loss', grad_scale = args.margin_b)
if m>0.0:
t = t+m
body = mx.sym.cos(t)
new_zy = body*s
diff = new_zy - zy
diff = mx.sym.expand_dims(diff, 1)
gt_one_hot = mx.sym.one_hot(gt_label, depth = args.num_classes, on_value = 1.0, off_value = 0.0)
body = mx.sym.broadcast_mul(gt_one_hot, diff)
fc7 = fc7+body
elif args.loss_type==7:
s = args.margin_s
m = args.margin_m
assert s>0.0
assert args.margin_b>0.0
assert args.margin_a>0.0
_weight = mx.symbol.L2Normalization(_weight, mode='instance')
nembedding = mx.symbol.L2Normalization(embedding, mode='instance', name='fc1n')*s
fc7 = mx.sym.FullyConnected(data=nembedding, weight = _weight, no_bias = True, num_hidden=args.num_classes, name='fc7')
zy = mx.sym.pick(fc7, gt_label, axis=1)
cos_t = zy/s
t = mx.sym.arccos(cos_t)
#counter_weight = mx.sym.take(_weight, gt_label, axis=1)
#counter_cos = mx.sym.dot(counter_weight, _weight, transpose_a=True)
counter_weight = mx.sym.take(_weight, gt_label, axis=0)
counter_cos = mx.sym.dot(counter_weight, _weight, transpose_b=True)
#counter_cos = mx.sym.minimum(counter_cos, 1.0)
#counter_angle = mx.sym.arccos(counter_cos)
#counter_angle = counter_angle * -1.0
#counter_angle = counter_angle/np.pi #[0,1]
#inter_loss = mx.sym.exp(counter_angle)
#counter_cos = mx.sym.dot(_weight, _weight, transpose_b=True)
#counter_cos = mx.sym.minimum(counter_cos, 1.0)
#counter_angle = mx.sym.arccos(counter_cos)
#counter_angle = mx.sym.sort(counter_angle, axis=1)
#counter_angle = mx.sym.slice_axis(counter_angle, axis=1, begin=0,end=int(args.margin_a))
#inter_loss = counter_angle*-1.0 # [-1,0]
#inter_loss = inter_loss+1.0 # [0,1]
inter_loss = counter_cos
inter_loss = mx.sym.mean(inter_loss)
inter_loss = mx.sym.MakeLoss(inter_loss, name='inter_loss', grad_scale = args.margin_b)
if m>0.0:
t = t+m
body = mx.sym.cos(t)
new_zy = body*s
diff = new_zy - zy
diff = mx.sym.expand_dims(diff, 1)
gt_one_hot = mx.sym.one_hot(gt_label, depth = args.num_classes, on_value = 1.0, off_value = 0.0)
body = mx.sym.broadcast_mul(gt_one_hot, diff)
fc7 = fc7+body
out_list = [mx.symbol.BlockGrad(embedding)]
softmax = mx.symbol.SoftmaxOutput(data=fc7, label = gt_label, name='softmax', normalization='valid')
out_list.append(softmax)
if args.loss_type==6:
out_list.append(intra_loss)
if args.loss_type==7:
out_list.append(inter_loss)
#out_list.append(mx.sym.BlockGrad(counter_weight))
#out_list.append(intra_loss)
if args.ce_loss:
#ce_loss = mx.symbol.softmax_cross_entropy(data=fc7, label = gt_label, name='ce_loss')/args.per_batch_size
body = mx.symbol.SoftmaxActivation(data=fc7)
body = mx.symbol.log(body)
_label = mx.sym.one_hot(gt_label, depth = args.num_classes, on_value = -1.0, off_value = 0.0)
body = body*_label
ce_loss = mx.symbol.sum(body)/args.per_batch_size
out_list.append(mx.symbol.BlockGrad(ce_loss))
out = mx.symbol.Group(out_list)
return (out, arg_params, aux_params)
# lr scheduler for dist training
def _get_lr_scheduler(args, kv, begin_epoch, num_samples):
lr = args.lr
if len(args.lr_steps)==0:
lr_steps = [40000, 60000, 80000]
if args.loss_type>=1 and args.loss_type<=7:
lr_steps = [100000, 140000, 160000]
p = 512.0/(args.batch_size * kv.num_workers)
for l in xrange(len(lr_steps)):
lr_steps[l] = int(lr_steps[l]*p)
else:
lr_steps = [int(x) for x in args.lr_steps.split(',')]
print('lr_steps', lr_steps)
begin_step = begin_epoch * num_samples
for s in lr_steps:
if begin_step >= s:
lr *= 0.1
if lr != args.lr:
logging.info('Adjust learning rate to %e for step %d' %(lr, begin_step))
return (lr, mx.lr_scheduler.MultiFactorScheduler(step=lr_steps, factor=0.1))
#save mode on each epoch
def _save_model(args, rank=0):
if args.prefix is None:
return None
#Add UAI output_dir path
model_prefix = args.prefix
dst_dir = os.path.dirname(model_prefix)
if not os.path.isdir(dst_dir):
os.mkdir(dst_dir)
return mx.callback.do_checkpoint(model_prefix if rank == 0 else "%s-%d" % (model_prefix, rank))
def train_net(args):
# Set up kvstore
kv = mx.kvstore.create(args.kv_store)
if args.gc_type != 'none':
kv.set_gradient_compression({'type': args.gc_type,
'threshold': args.gc_threshold})
# logging
head = '%(asctime)-15s Node[' + str(kv.rank) + '] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
logging.info('start with arguments %s', args)
# Get ctx according to num_gpus, gpu id start from 0
ctx = []
ctx = [mx.cpu()] if args.num_gpus is None or args.num_gpus is 0 else [
mx.gpu(i) for i in range(args.num_gpus)]
# model prefix, In UAI Platform, should be /data/output/xxx
prefix = args.prefix
prefix_dir = os.path.dirname(prefix)
if not os.path.exists(prefix_dir):
os.makedirs(prefix_dir)
end_epoch = args.end_epoch
args.ctx_num = len(ctx)
args.num_layers = int(args.network[1:])
print('num_layers', args.num_layers)
if args.per_batch_size==0:
args.per_batch_size = 128
args.batch_size = args.per_batch_size*args.ctx_num
args.rescale_threshold = 0
args.image_channel = 3
data_dir_list = args.data_dir.split(',')
assert len(data_dir_list)==1
data_dir = data_dir_list[0]
path_imgrec = None
path_imglist = None
prop = face_image.load_property(data_dir)
args.num_classes = prop.num_classes
#image_size = prop.image_size
image_size = [int(x) for x in args.image_size.split(',')]
assert len(image_size)==2
assert image_size[0]==image_size[1]
args.image_h = image_size[0]
args.image_w = image_size[1]
print('image_size', image_size)
assert(args.num_classes>0)
print('num_classes', args.num_classes)
path_imgrec = os.path.join(data_dir, "train.rec")
path_imglist = os.path.join(data_dir, "train.lst")
num_samples = 0
for line in open(path_imglist).xreadlines():
num_samples += 1
print('Called with argument:', args)
data_shape = (args.image_channel,image_size[0],image_size[1])
mean = None
begin_epoch = 0
base_lr = args.lr
base_wd = args.wd
base_mom = args.mom
if len(args.pretrained)==0:
arg_params = None
aux_params = None
sym, arg_params, aux_params = get_symbol(args, arg_params, aux_params)
if args.network[0]=='s':
data_shape_dict = {'data' : (args.per_batch_size,)+data_shape}
spherenet.init_weights(sym, data_shape_dict, args.num_layers)
else:
# Not the mode is saved each epoch, not NUM of steps as in train_softmax.py
# args.pretrained be 'prefix,epoch'
vec = args.pretrained.split(',')
print('loading', vec)
model_prefix = vec[0]
if kv.rank > 0 and os.path.exists("%s-%d-symbol.json" % (model_prefix, kv.rank)):
model_prefix += "-%d" % (kv.rank)
logging.info('Loaded model %s_%d.params', model_prefix, int(vec[1]))
_, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, int(vec[1]))
begin_epoch = int(vec[1])
sym, arg_params, aux_params = get_symbol(args, arg_params, aux_params)
model = mx.mod.Module(
context = ctx,
symbol = sym,
)
val_dataiter = None
train_dataiter = FaceImageIter(
batch_size = args.batch_size,
data_shape = data_shape,
path_imgrec = path_imgrec,
shuffle = True,
rand_mirror = args.rand_mirror,
mean = mean,
cutoff = args.cutoff,
color_jittering = args.color,
images_filter = args.images_filter,
)
metric1 = AccMetric()
eval_metrics = [mx.metric.create(metric1)]
if args.ce_loss:
metric2 = LossValueMetric()
eval_metrics.append( mx.metric.create(metric2) )
if args.network[0]=='r' or args.network[0]=='y':
initializer = mx.init.Xavier(rnd_type='gaussian', factor_type="out", magnitude=2) #resnet style
elif args.network[0]=='i' or args.network[0]=='x':
initializer = mx.init.Xavier(rnd_type='gaussian', factor_type="in", magnitude=2) #inception
else:
initializer = mx.init.Xavier(rnd_type='uniform', factor_type="in", magnitude=2)
#initializer = mx.init.Xavier(rnd_type='gaussian', factor_type="out", magnitude=2) #resnet style
som = 20
_cb = mx.callback.Speedometer(args.batch_size, som)
ver_list = []
ver_name_list = []
for name in args.target.split(','):
path = os.path.join(data_dir,name+".bin")
if os.path.exists(path):
data_set = verification.load_bin(path, image_size)
ver_list.append(data_set)
ver_name_list.append(name)
print('ver', name)
def ver_test(nbatch):
results = []
for i in xrange(len(ver_list)):
acc1, std1, acc2, std2, xnorm, embeddings_list = verification.test(ver_list[i], model, args.batch_size, 10, None, None)
print('[%s][%d]XNorm: %f' % (ver_name_list[i], nbatch, xnorm))
#print('[%s][%d]Accuracy: %1.5f+-%1.5f' % (ver_name_list[i], nbatch, acc1, std1))
print('[%s][%d]Accuracy-Flip: %1.5f+-%1.5f' % (ver_name_list[i], nbatch, acc2, std2))
results.append(acc2)
return results
highest_acc = [0.0, 0.0] #lfw and target
#for i in xrange(len(ver_list)):
# highest_acc.append(0.0)
def _batch_callback(param):
#global global_step
mbatch = param.nbatch
_cb(param)
if mbatch%1000==0:
print('lr-batch-epoch:',param.nbatch,param.epoch)
if mbatch>=0 and mbatch%args.verbose==0:
acc_list = ver_test(mbatch)
is_highest = False
if len(acc_list)>0:
score = sum(acc_list)
if acc_list[-1]>=highest_acc[-1]:
if acc_list[-1]>highest_acc[-1]:
is_highest = True
else:
if score>=highest_acc[0]:
is_highest = True
highest_acc[0] = score
highest_acc[-1] = acc_list[-1]
#if lfw_score>=0.99:
# do_save = True
print('[%d]Accuracy-Highest: %1.5f'%(mbatch, highest_acc[-1]))
# save model
checkpoint = _save_model(args, kv.rank)
epoch_cb = checkpoint
rescale = 1.0/args.ctx_num
lr, lr_scheduler = _get_lr_scheduler(args, kv, begin_epoch, num_samples)
# learning rate
optimizer_params = {
'learning_rate': lr,
'wd' : args.wd,
'lr_scheduler': lr_scheduler,
'multi_precision': True,
'rescale_grad': rescale}
# Only a limited number of optimizers have 'momentum' property
has_momentum = {'sgd', 'dcasgd', 'nag'}
if args.optimizer in has_momentum:
optimizer_params['momentum'] = args.mom
train_dataiter = mx.io.PrefetchingIter(train_dataiter)
print('Start training')
model.fit(train_dataiter,
begin_epoch = begin_epoch,
num_epoch = end_epoch,
eval_data = val_dataiter,
eval_metric = eval_metrics,
kvstore = kv,
optimizer = args.optimizer,
optimizer_params = optimizer_params,
initializer = initializer,
arg_params = arg_params,
aux_params = aux_params,
allow_missing = True,
batch_end_callback = _batch_callback,
epoch_end_callback = epoch_cb )
def main():
#time.sleep(3600*6.5)
os.environ['MXNET_CPU_WORKER_NTHREADS'] = "24"
global args
args = parse_args()
train_net(args)
if __name__ == '__main__':
main()
| apache-2.0 |
arjoly/scikit-learn | sklearn/linear_model/ridge.py | 6 | 46528 | """
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from .sag_fast import get_max_squared_sum
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False, return_intercept=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
return_n_iter : boolean, default False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
return_intercept : boolean, default False
If True and if X is sparse, the method also returns the intercept,
and the solver is automatically changed to 'sag'. This is only a
temporary fix for fitting the intercept with sparse data. For dense
data, use sklearn.linear_model.center_data before your regression.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
intercept : float or array, shape = [n_targets]
The intercept of the model. Only returned if `return_intercept`
is True and if X is a scipy sparse array.
Notes
-----
This function won't compute the intercept.
"""
if return_intercept and sparse.issparse(X) and solver != 'sag':
warnings.warn("In Ridge, only 'sag' solver can currently fit the "
"intercept when X is sparse. Solver has been "
"automatically changed into 'sag'.")
solver = 'sag'
# SAG needs X and y columns to be C-contiguous and np.float64
if solver == 'sag':
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
y = check_array(y, dtype='numeric', ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver != 'sag':
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver == 'sag':
# precompute max_squared_sum for all targets
max_squared_sum = get_max_squared_sum(X)
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1], ))
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
start = {'coef': np.zeros(n_features + int(return_intercept))}
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i,
max_iter, tol, verbose, random_state, False, max_squared_sum,
start)
if return_intercept:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter and return_intercept:
return coef, n_iter, intercept
elif return_intercept:
return coef, intercept
elif return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
# temporary fix for fitting the intercept with sparse data using 'sag'
if sparse.issparse(X) and self.fit_intercept:
self.coef_, self.n_iter_, self.intercept_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=True)
self.intercept_ += y_mean
else:
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=False)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}, shape (n_targets)
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is faster than other solvers when both
n_samples and n_features are large.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used, else, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
desihub/desisim | py/desisim/scripts/quickgalaxies.py | 1 | 13799 | """
desisim.scripts.quickgalaxies
=============================
"""
from __future__ import absolute_import, division, print_function
import healpy as hp
import numpy as np
import os
from datetime import datetime
from abc import abstractmethod, ABCMeta
from argparse import Action, ArgumentParser
from astropy.table import Table, vstack
from desisim.templates import BGS
from desisim.scripts.quickspectra import sim_spectra
from desitarget.mock.mockmaker import BGSMaker
from desitarget.cuts import isBGS_colors
from desiutil.log import get_logger, DEBUG
from yaml import load
import matplotlib.pyplot as plt
class SetDefaultFromFile(Action, metaclass=ABCMeta):
"""Abstract interface class to set command-line arguments from a file."""
def __call__(self, parser, namespace, values, option_string=None):
config = self._get_config_from_file(values)
for key, value in config.items():
setattr(namespace, key, value)
@abstractmethod
def _get_config_from_file(self, filename):
raise NotImplementedError
class SetDefaultFromYAMLFile(SetDefaultFromFile):
"""Concrete class that sets command-line arguments from a YAML file."""
def _get_config_from_file(self, filename):
"""Implementation of configuration reader.
Parameters
----------
filename : string
Name of configuration file to read.
Returns
-------
config : dictionary
Configuration dictionary.
"""
with open(filename, 'r') as f:
config = load(f)
return config
def _get_healpixels_in_footprint(nside=64):
"""Obtain a list of HEALPix pixels in the DESI footprint.
Parameters
----------
nside : int
HEALPix nside parameter (in form nside=2**k, k=[1,2,3,...]).
Returns
-------
healpixels : ndarray
List of HEALPix pixels within the DESI footprint.
"""
from desimodel import footprint
from desimodel.io import load_tiles
# Load DESI tiles.
tile_tab = load_tiles()
npix = hp.nside2npix(nside)
pix_ids = np.arange(npix)
ra, dec = hp.pix2ang(nside, pix_ids, lonlat=True)
# Get a list of pixel IDs inside the DESI footprint.
in_desi = footprint.is_point_in_desi(tile_tab, ra, dec)
healpixels = pix_ids[in_desi]
return healpixels
def _default_wave(wavemin=None, wavemax=None, dw=0.2):
"""Generate a default wavelength vector for the output spectra."""
from desimodel.io import load_throughput
if wavemin is None:
wavemin = load_throughput('b').wavemin - 10.0
if wavemax is None:
wavemax = load_throughput('z').wavemax + 10.0
return np.arange(round(wavemin, 1), wavemax, dw)
def bgs_write_simdata(sim, overwrite=False):
"""Create a metadata table with simulation inputs.
Parameters
----------
sim : dict
Simulation parameters from command line.
overwrite : bool
Overwrite simulation data file.
Returns
-------
simdata : Table
Data table written to disk.
"""
from desispec.io.util import makepath
from desispec.io.util import write_bintable
simdatafile = os.path.join(sim.simdir,
'bgs_{}_simdata.fits'.format(sim.simid))
makepath(simdatafile)
cols = [
('SEED', 'S20'),
('NSPEC', 'i4'),
('EXPTIME', 'f4'),
('AIRMASS', 'f4'),
('SEEING', 'f4'),
('MOONFRAC', 'f4'),
('MOONSEP', 'f4'),
('MOONALT', 'f4')]
simdata = Table(np.zeros(sim.nsim, dtype=cols))
simdata['EXPTIME'].unit = 's'
simdata['SEEING'].unit = 'arcsec'
simdata['MOONSEP'].unit = 'deg'
simdata['MOONALT'].unit = 'deg'
simdata['SEED'] = sim.seed
simdata['NSPEC'] = sim.nspec
simdata['AIRMASS'] = sim.airmass
simdata['SEEING'] = sim.seeing
simdata['MOONALT'] = sim.moonalt
simdata['MOONSEP'] = sim.moonsep
simdata['MOONFRAC'] = sim.moonfrac
simdata['EXPTIME'] = sim.exptime
if overwrite or not os.path.isfile(simdatafile):
print('Writing {}'.format(simdatafile))
write_bintable(simdatafile, simdata, extname='SIMDATA', clobber=True)
return simdata
def simdata2obsconditions(sim):
"""Pack simdata observation conditions into a dictionary.
Parameters
----------
simdata : Table
Simulation data table.
Returns
-------
obs : dict
Observation conditions dictionary.
"""
obs = dict(AIRMASS=sim.airmass,
EXPTIME=sim.exptime,
MOONALT=sim.moonalt,
MOONFRAC=sim.moonfrac,
MOONSEP=sim.moonsep,
SEEING=sim.seeing)
return obs
def write_templates(filename, flux, wave, target, truth, objtruth):
"""Write galaxy templates to a FITS file.
Parameters
----------
filename : str
Path to output file.
flux : ndarray
Array of flux data for template spectra.
wave : ndarray
Array of wavelengths.
target : Table
Target information.
truth : Table
Template simulation truth.
objtruth : Table
Object-specific truth data.
"""
import astropy.units as u
from astropy.io import fits
hx = fits.HDUList()
# Write the wavelength table.
hdu_wave = fits.PrimaryHDU(wave)
hdu_wave.header['EXTNAME'] = 'WAVE'
hdu_wave.header['BUNIT'] = 'Angstrom'
hdu_wave.header['AIRORVAC'] = ('vac', 'Vacuum wavelengths')
hx.append(hdu_wave)
# Write the flux table.
fluxunits = 1e-17 * u.erg / (u.s * u.cm**2 * u.Angstrom)
hdu_flux = fits.ImageHDU(flux)
hdu_flux.header['EXTNAME'] = 'FLUX'
hdu_flux.header['BUNIT'] = str(fluxunits)
hx.append(hdu_flux)
# Write targets table.
hdu_targets = fits.table_to_hdu(target)
hdu_targets.header['EXTNAME'] = 'TARGETS'
hx.append(hdu_targets)
# Write truth table.
hdu_truth = fits.table_to_hdu(truth)
hdu_truth.header['EXTNAME'] = 'TRUTH'
hx.append(hdu_truth)
# Write objtruth table.
hdu_objtruth = fits.table_to_hdu(objtruth)
hdu_objtruth.header['EXTNAME'] = 'OBJTRUTH'
hx.append(hdu_objtruth)
print('Writing {}'.format(filename))
hx.writeto(filename, overwrite=True)
def parse(options=None):
"""Parse command-line options.
"""
parser = ArgumentParser(description='Fast galaxy simulator')
parser.add_argument('--config', action=SetDefaultFromYAMLFile)
#
# Observational conditions.
#
cond = parser.add_argument_group('Observing conditions')
cond.add_argument('--airmass', dest='airmass', type=float, default=1.,
help='Airmass [1..40].')
cond.add_argument('--exptime', dest='exptime', type=int, default=300,
help='Exposure time [s].')
cond.add_argument('--seeing', dest='seeing', type=float, default=1.1,
help='Seeing [arcsec].')
cond.add_argument('--moonalt', dest='moonalt', type=float, default=-60.,
help='Moon altitude [deg].')
cond.add_argument('--moonfrac', dest='moonfrac', type=float, default=0.,
help='Illuminated moon fraction [0..1].')
cond.add_argument('--moonsep', dest='moonsep', type=float, default=180.,
help='Moon separation angle [deg].')
#
# Galaxy simulation settings.
#
mcset = parser.add_argument_group('Simulation settings')
mcset.add_argument('--nside', dest='nside', type=int, default=64,
help='HEALPix NSIDE parameter.')
mcset.add_argument('--nspec', dest='nspec', type=int, default=100,
help='Number of spectra per HEALPix pixel.')
mcset.add_argument('--nsim', dest='nsim', type=int, default=10,
help='Number of simulations (HEALPix pixels).')
mcset.add_argument('--seed', dest='seed', type=int, default=None,
help='Random number seed')
mcset.add_argument('--addsnia', dest='addsnia', action='store_true', default=False,
help='Add SNe Ia to host spectra.')
mcset.add_argument('--addsniip', dest='addsniip', action='store_true', default=False,
help='Add SNe IIp to host spectra.')
mcset.add_argument('--snrmin', dest='snrmin', type=float, default=0.01,
help='SN/host minimum flux ratio.')
mcset.add_argument('--snrmax', dest='snrmax', type=float, default=1.00,
help='SN/host maximum flux ratio.')
#
# Output settings.
#
output = parser.add_argument_group('Output settings')
output.add_argument('--simid', dest='simid',
default=datetime.now().strftime('%Y-%m-%d'),
help='ID/name for simulations.')
output.add_argument('--simdir', dest='simdir', default='',
help='Simulation output directory absolute path.')
# Parse command line options.
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def main(args=None):
log = get_logger()
if isinstance(args, (list, tuple, type(None))):
args = parse(args)
# Save simulation output.
rng = np.random.RandomState(args.seed)
simdata = bgs_write_simdata(args)
obs = simdata2obsconditions(args)
# Generate list of HEALPix pixels to randomly sample from the mocks.
healpixels = _get_healpixels_in_footprint(nside=args.nside)
npix = np.minimum(10*args.nsim, len(healpixels))
pixels = rng.choice(healpixels, size=npix, replace=False)
ipix = iter(pixels)
# Set up the template generator.
maker = BGSMaker(seed=args.seed)
maker.template_maker = BGS(add_SNeIa=args.addsnia,add_SNeIIp=args.addsniip, wave=_default_wave())
for j in range(args.nsim):
# Loop until finding a non-empty healpixel (one with mock galaxies).
tdata = []
while len(tdata) == 0:
pixel = next(ipix)
tdata = maker.read(healpixels=pixel, nside=args.nside)
# Add SN generation options.
if args.addsnia or args.addsniip:
tdata['SNE_FLUXRATIORANGE'] = (args.snrmin, args.snrmax)
tdata['SNE_FILTER'] = 'decam2014-r'
# Generate nspec spectral templates and write them to "truth" files.
wave = None
flux, targ, truth, obj = [], [], [], []
# Generate templates until we have enough to pass brightness cuts.
ntosim = np.min((args.nspec, len(tdata['RA'])))
ngood = 0
while ngood < args.nspec:
idx = rng.choice(len(tdata['RA']), ntosim)
tflux, twave, ttarg, ttruth, tobj = \
maker.make_spectra(tdata, indx=idx)
# Apply color cuts.
is_bright = isBGS_colors(gflux=ttruth['FLUX_G'],
rflux=ttruth['FLUX_R'],
zflux=ttruth['FLUX_Z'],
w1flux=ttruth['FLUX_W1'],
w2flux=ttruth['FLUX_W2'],
targtype='bright')
is_faint = isBGS_colors(gflux=ttruth['FLUX_G'],
rflux=ttruth['FLUX_R'],
zflux=ttruth['FLUX_Z'],
w1flux=ttruth['FLUX_W1'],
w2flux=ttruth['FLUX_W2'],
targtype='faint')
is_wise = isBGS_colors(gflux=ttruth['FLUX_G'],
rflux=ttruth['FLUX_R'],
zflux=ttruth['FLUX_Z'],
w1flux=ttruth['FLUX_W1'],
w2flux=ttruth['FLUX_W2'],
targtype='wise')
keep = np.logical_or(np.logical_or(is_bright, is_faint), is_wise)
_ngood = np.count_nonzero(keep)
if _ngood > 0:
ngood += _ngood
flux.append(tflux[keep, :])
targ.append(ttarg[keep])
truth.append(ttruth[keep])
obj.append(tobj[keep])
wave = maker.wave
flux = np.vstack(flux)[:args.nspec, :]
targ = vstack(targ)[:args.nspec]
truth = vstack(truth)[:args.nspec]
obj = vstack(obj)[:args.nspec]
if args.addsnia or args.addsniip:
# TARGETID in truth table is split in two; deal with it here.
truth['TARGETID'] = truth['TARGETID_1']
# Set up and verify the TARGETID across all truth tables.
n = len(truth)
new_id = 10000000*pixel + 100000*j + np.arange(1, n+1)
truth['TARGETID'][:] = new_id
targ['TARGETID'][:] = new_id
obj['TARGETID'][:] = new_id
assert(len(truth) == args.nspec)
assert(np.all(targ['TARGETID'] == truth['TARGETID']))
assert(len(truth) == len(np.unique(truth['TARGETID'])))
assert(len(targ) == len(np.unique(targ['TARGETID'])))
assert(len(obj) == len(np.unique(obj['TARGETID'])))
truthfile = os.path.join(args.simdir,
'bgs_{}_{:03}_truth.fits'.format(args.simid, j))
write_templates(truthfile, flux, wave, targ, truth, obj)
# Generate simulated spectra, given observing conditions.
specfile = os.path.join(args.simdir,
'bgs_{}_{:03}_spectra.fits'.format(args.simid, j))
sim_spectra(wave, flux, 'bgs', specfile, obsconditions=obs,
sourcetype='bgs', targetid=truth['TARGETID'],
redshift=truth['TRUEZ'], seed=args.seed, expid=j)
| bsd-3-clause |
jrbitt/gamesresearch | tools/experimen1.py | 1 | 4311 |
import numpy as np
from scipy import stats
import pandas as pd
def loadSaveBase(loadfile,savefile):
#Ler toda a base oficial
#base = np.genfromtxt(loadfile,usecols=[0,1,2,3,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42],delimiter=",",skip_header=1, names= ['goid','code','year','platform','Strategy','Tactics','Action','Compilation','Adventure','Sports','Educational','Racing','Driving','Puzzle','Role-Playing(RPG)','DLC','Add-on','Simulation','SpecialEdition','Artgame','brightness','saturation','tamura_contrast','arousal','pleasure','dominance','hue_m','sat_m','val_m','black','blue','brown','green','gray','orange','pink','purple','red','white','yellow','entropy_r','entropy_g','entropy_b'],dtype= ['S100','S100','u4','S50','S10','S10','S10','S10','S10','S10','S10','S10','S10','S10','S10','S10','S10','S10','S10','S10','f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f4','f4'])
base = pd.read_csv(loadfile,usecols=[0,1,2,3,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42],delimiter="\t")
#np.savetxt('exemplo.csv',base,delimiter='\t', fmt="%s %i %s %.5f %.5f")
#np.savetxt('exemplo2.csv',base,delimiter='\t', fmt="%s %i %s %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f")
#np.savetxt(savefile,base,delimiter='\t', fmt="%s %s %i %s %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f")
base.to_csv(savefile,sep=';', index=False)
#Adiciona a coluna do filename
def addFilename(filename):
arq = open(filename,'r')
linhas = []
prim = True
for l in arq:
if prim:
prim = False
else:
tks = l.split(';')
tks[len(tks)-1] = tks[len(tks)-1][:-1]
tks.append(tks[1]+".jpg")
linhas.append(tks)
arq.close()
return linhas
def addShapes(shapefile):
arq2 = open(shapefile,'r')
mapa = {}
for l in arq2:
tks = l.split('\t')
tks[len(tks)-1] = tks[len(tks)-1][:-1]
mapa[tks[0]] = tks[1:]
arq2.close()
return mapa
def createBase(linhas, mapa,filename):
arq = open(filename,'w')
arq.write('goid\tcode\tyear\tplatform\tbrightness\tsaturation\ttamura_contrast\tarousal\tpleasure\tdominance\thue_m\tsat_m\tval_m\tblack\tblue\tbrown\tgreen\tgray\torange\tpink\tpurple\tred\twhite\tyellow\tentropy_r\tentropy_g\tentropy_b\tfilename\tcount\ttotal_area\tavg_size\tperc_area\tperimeter\tcirc\tsolidity\n')
cont = 0
for l in linhas:
if l[2] != '1900' and l[2] != '4444':
#if int(l[2]) >= 2010 and int(l[2]) < 2020:
#linhas originais com filename
for t in l:
arq.write(t+'\t')
#novas colunas
tk = mapa[l[len(l)-1]]
for k in tk:
arq.write(k+'\t')
arq.write('\n')
cont += 1
arq.close()
print cont
a = []
b = []
def pearson(ia,ib):
arq = open('exemplo4_10.csv','r')
a = []
b = []
cont = 0
for l in arq:
if cont == 0:
cont = 1
else:
tks = l.split('\t')
tks[len(tks)-1] = tks[len(tks)-1][:-1]
a.append(float(tks[ia]))
b.append(float(tks[ib]))
arq.close()
return a, b
def calculatePearson():
for ai in range(4,36):
for bi in range(4,36):
if ai==27 or bi==27:
continue
else:
a,b = pearson(ai,bi)
res = stats.pearsonr(a,b)
if res[0]>0.3 and ai != bi:
print str(ai)+" "+str(bi)
print res
base = None
linhas = None
mapa = None
loadSaveBase("base_oficial_final_v2.csv",'nova1_v2.csv')
linhas = addFilename('nova1_v2.csv')
mapa = addShapes('shapes40mil.csv')
createBase(linhas,mapa,'nova3_v2.csv')
#calculatePearson()
'''
arq = open('exemplo4.csv','r')
arq2 = open('Clusters_weka.txt','r')
arq3 = open('exemplo4_clusters.csv','w')
arq.readline()
for l in arq:
linha = arq2.readline()
t = linha.split(',')
nl = ""
nl += l+'\t'+t[len(t)-1][:-1]+'\n'
arq3.write(nl)
arq.close()
arq2.close()
arq3.close()
''' | apache-2.0 |
aitoralmeida/dl_activity_recognition | lstm/stateful-lstm.py | 1 | 11443 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 25 16:16:52 2016
@author: gazkune
"""
from collections import Counter
import json
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
from keras.models import Sequential
from keras.models import model_from_json
from keras.layers import Dense, Activation, Dropout
from keras.layers import LSTM
from keras.utils import np_utils
from keras.preprocessing.sequence import pad_sequences
import numpy as np
# Directory of datasets
DIR = '../sensor2vec/kasteren_dataset/'
# Dataset with vectors but without the action timestamps
DATASET_CSV = DIR + 'base_kasteren_reduced.csv'
# List of unique activities in the dataset
UNIQUE_ACTIVITIES = DIR + 'unique_activities.json'
# List of unique actions in the dataset
UNIQUE_ACTIONS = DIR + 'unique_actions.json'
# Action vectors
ACTION_VECTORS = DIR + 'actions_vectors.json'
# Maximun number of actions in an activity
ACTIVITY_MAX_LENGHT = 32
# Number of dimensions of an action vector
ACTION_MAX_LENGHT = 50
def save_model(model):
json_string = model.to_json()
model_name = 'model_activity_lstm'
open(model_name + '.json', 'w').write(json_string)
model.save_weights(model_name + '.h5', overwrite=True)
def load_model(model_file, weights_file):
model = model_from_json(open(model_file).read())
model.load_weights(weights_file)
def check_activity_distribution(y_np, unique_activities):
activities = []
for activity_np in y_np:
index = activity_np.tolist().index(1.0)
activities.append(unique_activities[index])
print Counter(activities)
"""
Function to plot accurary and loss during training
"""
def plot_training_info(metrics, save, history):
# summarize history for accuracy
if 'accuracy' in metrics:
plt.plot(history['acc'])
plt.plot(history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
if save == True:
plt.savefig('accuracy.png')
plt.gcf().clear()
else:
plt.show()
# summarize history for loss
if 'loss' in metrics:
plt.plot(history['loss'])
plt.plot(history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
#plt.ylim(1e-3, 1e-2)
plt.yscale("log")
plt.legend(['train', 'test'], loc='upper left')
if save == True:
plt.savefig('loss.png')
plt.gcf().clear()
else:
plt.show()
"""
Function to prepare the dataset with individual sequences (simple framing)
"""
def prepare_indiv_sequences(df, action_vectors, unique_activities, activity_to_int):
print 'Preparing training set...'
X = []
y = []
for index in df.index:
action = df.loc[index, 'action']
X.append(np.array(action_vectors[action]))
y.append(activity_to_int[df.loc[index, 'activity']])
y = np_utils.to_categorical(y)
return X, y
def prepare_variable_sequences(df, action_vectors, unique_activities, activity_to_int):
# New data framing
print 'Preparing training set...'
X = []
y = []
current_activity = ""
actions = []
aux_actions = []
for index in df.index:
if current_activity == "":
current_activity = df.loc[index, 'activity']
if current_activity != df.loc[index, 'activity']:
y.append(activity_to_int[current_activity])
X.append(actions)
#print current_activity, aux_actions
current_activity = df.loc[index, 'activity']
# reset auxiliary variables
actions = []
aux_actions = []
action = df.loc[index, 'action']
#print 'Current action: ', action
actions.append(np.array(action_vectors[action]))
aux_actions.append(action)
# Append the last activity
y.append(activity_to_int[current_activity])
X.append(actions)
# Use sequence padding for training samples
X = pad_sequences(X, maxlen=ACTIVITY_MAX_LENGHT, dtype='float32')
# Tranform class labels to one-hot encoding
y = np_utils.to_categorical(y)
return X, y
def main(argv):
# Load dataset from csv file
df_dataset = pd.read_csv(DATASET_CSV, parse_dates=[[0, 1]], header=None, index_col=0, sep=' ')
df_dataset.columns = ['sensor', 'action', 'event', 'activity']
df_dataset.index.names = ["timestamp"]
unique_activities = json.load(open(UNIQUE_ACTIVITIES, 'r'))
total_activities = len(unique_activities)
action_vectors = json.load(open(ACTION_VECTORS, 'r'))
print 'Preparing training set...'
# Generate the dict to transform activities to integer numbers
activity_to_int = dict((c, i) for i, c in enumerate(unique_activities))
# Generate the dict to transform integer numbers to activities
int_to_activity = dict((i, c) for i, c in enumerate(unique_activities))
# Test the simple problem framing
#X, y = prepare_indiv_sequences(df_dataset, action_vectors, unique_activities, activity_to_int)
#variable = False
# Test the varaible sequence problem framing approach
# Remember to change batch_input_size in consequence
X, y = prepare_variable_sequences(df_dataset, action_vectors, unique_activities, activity_to_int)
variable = True
total_examples = len(X)
test_per = 0.2
limit = int(test_per * total_examples)
X_train = X[limit:]
X_test = X[:limit]
y_train = y[limit:]
y_test = y[:limit]
print 'Total examples:', total_examples
print 'Train examples:', len(X_train), len(y_train)
print 'Test examples:', len(X_test), len(y_test)
sys.stdout.flush()
X = np.array(X_train)
y = np.array(y_train)
print 'Activity distribution for training:'
check_activity_distribution(y, unique_activities)
X_test = np.array(X_test)
y_test = np.array(y_test)
print 'Activity distribution for testing:'
check_activity_distribution(y_test, unique_activities)
# Current shape of X and y
print 'X:', X.shape
print 'y:', y.shape
# reshape X and X_test to be [samples, time steps, features]
# In this test we will set timesteps to 1 even though we have padded sequences
time_steps = 1
#X = X.reshape(X.shape[0], time_steps, ACTION_MAX_LENGHT)
#X_test = X_test.reshape(X_test.shape[0], time_steps, ACTION_MAX_LENGHT)
print 'Shape (X,y):'
print X.shape
print y.shape
print 'Training set prepared'
sys.stdout.flush()
# Build the model
print 'Building model...'
sys.stdout.flush()
batch_size = 1
model = Sequential()
# Test with Stateful layers
# I read that batch_input_size=(batch_size, None, features) can be used for variable length sequences
model.add(LSTM(512, return_sequences=False, stateful=True, dropout_W=0.2, dropout_U=0.2, batch_input_shape=(batch_size, time_steps, X.shape[2])))
#model.add(LSTM(512, return_sequences=False, stateful=True, batch_input_shape=(batch_size, max_sequence_length, ACTION_MAX_LENGHT)))
#model.add(Dropout(0.8))
#model.add(LSTM(512, return_sequences=False, dropout_W=0.2, dropout_U=0.2))
#model.add(Dropout(0.8))
model.add(Dense(total_activities, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy', 'mse', 'mae'])
print 'Model built'
print(model.summary())
sys.stdout.flush()
print 'Training...'
sys.stdout.flush()
# Test manual training
# we need a manual history dict with 'acc', 'val_acc', 'loss' and 'val_loss' keys
manual_training = True
history = {}
history['acc'] = []
history['val_acc'] = []
history['loss'] = []
history['val_loss'] = []
"""
for i in range(10):
print 'epoch: ', i
model.fit(X, y, nb_epoch=1, batch_size=batch_size, shuffle=False)
hist = model.fit(X, y, nb_epoch=1, batch_size=batch_size, shuffle=False, validation_data=(X_test, y_test))
history['acc'].append(hist.history['acc'])
history['val_acc'].append(hist.history['val_acc'])
history['loss'].append(hist.history['loss'])
history['val_loss'].append(hist.history['val_loss'])
model.reset_states()
print 'Saving model...'
sys.stdout.flush()
save_model(model)
print 'Model saved'
"""
# Check data format visually
print 'X train shape:', X.shape
print X
sample = np.expand_dims(np.expand_dims(X[0][0], axis=0), axis=0)
print 'sample shape:', sample.shape
print sample
other = X[0][0]
print 'other shape:', other.shape
print other
# This training process is to test variable length sequences representing an activity
# for stateful LSTMs. We train and test batch per batch
max_len = X.shape[1]
print 'max length:', max_len
epochs = 100
for epoch in range(epochs):
print '***************'
print 'Epoch', epoch, '/', epochs
mean_tr_acc = []
mean_tr_loss = []
for i in range(len(X)):
y_true = y[i]
#print 'y_true:', np.array([y_true]), np.array([y_true]).shape
for j in range(max_len):
x = np.expand_dims(np.expand_dims(X[i][j], axis=0), axis=0)
#tr_loss, tr_acc = model.train_on_batch(x, np.array([y_true]))
hist = model.fit(x, np.array([y_true]), nb_epoch=1, batch_size=1, shuffle=False, verbose=0)
mean_tr_acc.append(hist.history["acc"])
mean_tr_loss.append(hist.history["loss"])
model.reset_states()
print('accuracy training = {}'.format(np.mean(mean_tr_acc)))
print('loss training = {}'.format(np.mean(mean_tr_loss)))
print('___________________________________')
"""
# Comment for now the testing step
mean_te_acc = []
mean_te_loss = []
for i in range(len(X_test)):
for j in range(max_len):
te_loss, te_acc = model.test_on_batch(np.expand_dims(np.expand_dims(X_test[i][j], axis=0), axis=0), y_test[i])
mean_te_acc.append(te_acc)
mean_te_loss.append(te_loss)
model.reset_states()
for j in range(max_len):
y_pred = model.predict_on_batch(np.expand_dims(np.expand_dims(X_test[i][j], axis=0), axis=0))
model.reset_states()
print('accuracy testing = {}'.format(np.mean(mean_te_acc)))
print('loss testing = {}'.format(np.mean(mean_te_loss)))
print('___________________________________')
"""
# summarize performance of the model testing the evaluate function
#scores = model.evaluate(X_test, y_test, batch_size=batch_size, verbose=0)
#print("Model Accuracy: %.2f%%" % (scores[1]*100))
"""
if manual_training == True:
plot_training_info(['accuracy', 'loss'], True, history)
else:
plot_training_info(['accuracy', 'loss'], True, history.history)
"""
if __name__ == "__main__":
main(sys.argv)
| gpl-3.0 |
jameshensman/pymc3 | pymc3/tests/test_plots.py | 13 | 1721 | import matplotlib
matplotlib.use('Agg', warn=False)
import numpy as np
from .checks import close_to
import pymc3.plots
from pymc3.plots import *
from pymc3 import Slice, Metropolis, find_hessian, sample
def test_plots():
# Test single trace
from pymc3.examples import arbitrary_stochastic as asmod
with asmod.model as model:
start = model.test_point
h = find_hessian(start)
step = Metropolis(model.vars, h)
trace = sample(3000, step, start)
traceplot(trace)
forestplot(trace)
autocorrplot(trace)
def test_plots_multidimensional():
# Test single trace
from .models import multidimensional_model
start, model, _ = multidimensional_model()
with model as model:
h = np.diag(find_hessian(start))
step = Metropolis(model.vars, h)
trace = sample(3000, step, start)
traceplot(trace)
#forestplot(trace)
#autocorrplot(trace)
def test_multichain_plots():
from pymc3.examples import disaster_model as dm
with dm.model as model:
# Run sampler
step1 = Slice([dm.early_mean, dm.late_mean])
step2 = Metropolis([dm.switchpoint])
start = {'early_mean': 2., 'late_mean': 3., 'switchpoint': 50}
ptrace = sample(1000, [step1, step2], start, njobs=2)
forestplot(ptrace, vars=['early_mean', 'late_mean'])
autocorrplot(ptrace, vars=['switchpoint'])
def test_make_2d():
a = np.arange(4)
close_to(pymc3.plots.make_2d(a), a[:,None], 0)
n = 7
a = np.arange(n*4*5).reshape((n,4,5))
res = pymc3.plots.make_2d(a)
assert res.shape == (n,20)
close_to(a[:,0,0], res[:,0], 0)
close_to(a[:,3,2], res[:,2*4+3], 0)
| apache-2.0 |
gwaygenomics/pancancer | scripts/copy_burden_merge.py | 1 | 1259 | """
Gregory Way 2017
PanCancer Classifier
scripts/copy_burden_merge.py
Merge per sample classifier scores with segment based scores
Usage: Run in command line with required command argument:
python scripts/copy_burden_merge.py --classifier_folder
classifier_folder is a string pointing to the location of the classifier data
Output:
.tsv file of classifier scores merged with segment based copy number scores
"""
import os
import argparse
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--classifier_folder',
help='string of the location of classifier data')
args = parser.parse_args()
# Load command arguments
pred_fild = os.path.join(args.classifier_folder, 'classifier_decisions.tsv')
burden_file = os.path.join('data', 'seg_based_scores.tsv')
out_file = os.path.join(os.path.dirname(pred_fild), 'tables',
'copy_burden_predictions.tsv')
# Load and process data
copy_burden_df = pd.read_table(burden_file)
classifier_df = pd.read_table(pred_fild, index_col=0)
combined_df = classifier_df.merge(copy_burden_df, left_index=True,
right_on='Sample')
combined_df.index = combined_df['Sample']
combined_df.to_csv(out_file, sep='\t')
| bsd-3-clause |
waynenilsen/statsmodels | statsmodels/sandbox/nonparametric/tests/ex_gam_new.py | 34 | 3845 | # -*- coding: utf-8 -*-
"""Example for GAM with Poisson Model and PolynomialSmoother
This example was written as a test case.
The data generating process is chosen so the parameters are well identified
and estimated.
Created on Fri Nov 04 13:45:43 2011
Author: Josef Perktold
"""
from __future__ import print_function
from statsmodels.compat.python import lrange, zip
import time
import numpy as np
#import matplotlib.pyplot as plt
np.seterr(all='raise')
from scipy import stats
from statsmodels.sandbox.gam import AdditiveModel
from statsmodels.sandbox.gam import Model as GAM #?
from statsmodels.genmod.families import family
from statsmodels.genmod.generalized_linear_model import GLM
np.random.seed(8765993)
#seed is chosen for nice result, not randomly
#other seeds are pretty off in the prediction or end in overflow
#DGP: simple polynomial
order = 3
sigma_noise = 0.1
nobs = 1000
#lb, ub = -0.75, 3#1.5#0.75 #2.5
lb, ub = -3.5, 3
x1 = np.linspace(lb, ub, nobs)
x2 = np.sin(2*x1)
x = np.column_stack((x1/x1.max()*1, 1.*x2))
exog = (x[:,:,None]**np.arange(order+1)[None, None, :]).reshape(nobs, -1)
idx = lrange((order+1)*2)
del idx[order+1]
exog_reduced = exog[:,idx] #remove duplicate constant
y_true = exog.sum(1) #/ 4.
z = y_true #alias check
d = x
y = y_true + sigma_noise * np.random.randn(nobs)
example = 3
if example == 2:
print("binomial")
f = family.Binomial()
mu_true = f.link.inverse(z)
#b = np.asarray([scipy.stats.bernoulli.rvs(p) for p in f.link.inverse(y)])
b = np.asarray([stats.bernoulli.rvs(p) for p in f.link.inverse(z)])
b.shape = y.shape
m = GAM(b, d, family=f)
toc = time.time()
m.fit(b)
tic = time.time()
print(tic-toc)
#for plotting
yp = f.link.inverse(y)
p = b
if example == 3:
print("Poisson")
f = family.Poisson()
#y = y/y.max() * 3
yp = f.link.inverse(z)
#p = np.asarray([scipy.stats.poisson.rvs(p) for p in f.link.inverse(y)], float)
p = np.asarray([stats.poisson.rvs(p) for p in f.link.inverse(z)], float)
p.shape = y.shape
m = GAM(p, d, family=f)
toc = time.time()
m.fit(p)
tic = time.time()
print(tic-toc)
for ss in m.smoothers:
print(ss.params)
if example > 1:
import matplotlib.pyplot as plt
plt.figure()
for i in np.array(m.history[2:15:3]): plt.plot(i.T)
plt.figure()
plt.plot(exog)
#plt.plot(p, '.', lw=2)
plt.plot(y_true, lw=2)
y_pred = m.results.mu # + m.results.alpha #m.results.predict(d)
plt.figure()
plt.subplot(2,2,1)
plt.plot(p, '.')
plt.plot(yp, 'b-', label='true')
plt.plot(y_pred, 'r-', label='GAM')
plt.legend(loc='upper left')
plt.title('gam.GAM Poisson')
counter = 2
for ii, xx in zip(['z', 'x1', 'x2'], [z, x[:,0], x[:,1]]):
sortidx = np.argsort(xx)
#plt.figure()
plt.subplot(2, 2, counter)
plt.plot(xx[sortidx], p[sortidx], 'k.', alpha=0.5)
plt.plot(xx[sortidx], yp[sortidx], 'b.', label='true')
plt.plot(xx[sortidx], y_pred[sortidx], 'r.', label='GAM')
plt.legend(loc='upper left')
plt.title('gam.GAM Poisson ' + ii)
counter += 1
res = GLM(p, exog_reduced, family=f).fit()
#plot component, compared to true component
x1 = x[:,0]
x2 = x[:,1]
f1 = exog[:,:order+1].sum(1) - 1 #take out constant
f2 = exog[:,order+1:].sum(1) - 1
plt.figure()
#Note: need to correct for constant which is indeterminatedly distributed
#plt.plot(x1, m.smoothers[0](x1)-m.smoothers[0].params[0]+1, 'r')
#better would be subtract f(0) m.smoothers[0](np.array([0]))
plt.plot(x1, f1, linewidth=2)
plt.plot(x1, m.smoothers[0](x1)-m.smoothers[0].params[0], 'r')
plt.figure()
plt.plot(x2, f2, linewidth=2)
plt.plot(x2, m.smoothers[1](x2)-m.smoothers[1].params[0], 'r')
plt.show() | bsd-3-clause |
victorbergelin/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
louispotok/pandas | pandas/tests/series/test_rank.py | 3 | 18837 | # -*- coding: utf-8 -*-
from pandas import compat, Timestamp
import pytest
from distutils.version import LooseVersion
from numpy import nan
import numpy as np
from pandas import Series, date_range, NaT
from pandas.api.types import CategoricalDtype
from pandas.compat import product
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
from pandas._libs.tslib import iNaT
from pandas._libs.algos import Infinity, NegInfinity
from itertools import chain
import pandas.util._test_decorators as td
class TestSeriesRank(TestData):
s = Series([1, 3, 4, 2, nan, 2, 1, 5, nan, 3])
results = {
'average': np.array([1.5, 5.5, 7.0, 3.5, nan,
3.5, 1.5, 8.0, nan, 5.5]),
'min': np.array([1, 5, 7, 3, nan, 3, 1, 8, nan, 5]),
'max': np.array([2, 6, 7, 4, nan, 4, 2, 8, nan, 6]),
'first': np.array([1, 5, 7, 3, nan, 4, 2, 8, nan, 6]),
'dense': np.array([1, 3, 4, 2, nan, 2, 1, 5, nan, 3]),
}
def test_rank(self):
pytest.importorskip('scipy.stats.special')
rankdata = pytest.importorskip('scipy.stats.rankdata')
self.ts[::2] = np.nan
self.ts[:10][::3] = 4.
ranks = self.ts.rank()
oranks = self.ts.astype('O').rank()
assert_series_equal(ranks, oranks)
mask = np.isnan(self.ts)
filled = self.ts.fillna(np.inf)
# rankdata returns a ndarray
exp = Series(rankdata(filled), index=filled.index, name='ts')
exp[mask] = np.nan
tm.assert_series_equal(ranks, exp)
iseries = Series(np.arange(5).repeat(2))
iranks = iseries.rank()
exp = iseries.astype(float).rank()
assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1.0
exp = iseries / 5.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.repeat(1, 100))
exp = Series(np.repeat(0.505, 100))
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries[1] = np.nan
exp = Series(np.repeat(50.0 / 99.0, 100))
exp[1] = np.nan
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1.0
iseries[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.repeat(np.nan, 100))
exp = iseries.copy()
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1
iseries[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
rng = date_range('1/1/1990', periods=5)
iseries = Series(np.arange(5), rng) + 1
iseries.iloc[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series([1e-50, 1e-100, 1e-20, 1e-2, 1e-20 + 1e-30, 1e-1])
exp = Series([2, 1, 3, 5, 4, 6.0])
iranks = iseries.rank()
assert_series_equal(iranks, exp)
# GH 5968
iseries = Series(['3 day', '1 day 10m', '-2 day', NaT],
dtype='m8[ns]')
exp = Series([3, 2, 1, np.nan])
iranks = iseries.rank()
assert_series_equal(iranks, exp)
values = np.array(
[-50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40
], dtype='float64')
random_order = np.random.permutation(len(values))
iseries = Series(values[random_order])
exp = Series(random_order + 1.0, dtype='float64')
iranks = iseries.rank()
assert_series_equal(iranks, exp)
def test_rank_categorical(self):
# GH issue #15420 rank incorrectly orders ordered categories
# Test ascending/descending ranking for ordered categoricals
exp = Series([1., 2., 3., 4., 5., 6.])
exp_desc = Series([6., 5., 4., 3., 2., 1.])
ordered = Series(
['first', 'second', 'third', 'fourth', 'fifth', 'sixth']
).astype(CategoricalDtype(categories=['first', 'second', 'third',
'fourth', 'fifth', 'sixth'],
ordered=True))
assert_series_equal(ordered.rank(), exp)
assert_series_equal(ordered.rank(ascending=False), exp_desc)
# Unordered categoricals should be ranked as objects
unordered = Series(['first', 'second', 'third', 'fourth',
'fifth', 'sixth']).astype(
CategoricalDtype(categories=['first', 'second', 'third',
'fourth', 'fifth', 'sixth'],
ordered=False))
exp_unordered = Series([2., 4., 6., 3., 1., 5.])
res = unordered.rank()
assert_series_equal(res, exp_unordered)
unordered1 = Series(
[1, 2, 3, 4, 5, 6],
).astype(CategoricalDtype([1, 2, 3, 4, 5, 6], False))
exp_unordered1 = Series([1., 2., 3., 4., 5., 6.])
res1 = unordered1.rank()
assert_series_equal(res1, exp_unordered1)
# Test na_option for rank data
na_ser = Series(
['first', 'second', 'third', 'fourth', 'fifth', 'sixth', np.NaN]
).astype(CategoricalDtype(['first', 'second', 'third', 'fourth',
'fifth', 'sixth', 'seventh'], True))
exp_top = Series([2., 3., 4., 5., 6., 7., 1.])
exp_bot = Series([1., 2., 3., 4., 5., 6., 7.])
exp_keep = Series([1., 2., 3., 4., 5., 6., np.NaN])
assert_series_equal(na_ser.rank(na_option='top'), exp_top)
assert_series_equal(na_ser.rank(na_option='bottom'), exp_bot)
assert_series_equal(na_ser.rank(na_option='keep'), exp_keep)
# Test na_option for rank data with ascending False
exp_top = Series([7., 6., 5., 4., 3., 2., 1.])
exp_bot = Series([6., 5., 4., 3., 2., 1., 7.])
exp_keep = Series([6., 5., 4., 3., 2., 1., np.NaN])
assert_series_equal(
na_ser.rank(na_option='top', ascending=False),
exp_top
)
assert_series_equal(
na_ser.rank(na_option='bottom', ascending=False),
exp_bot
)
assert_series_equal(
na_ser.rank(na_option='keep', ascending=False),
exp_keep
)
# Test with pct=True
na_ser = Series(['first', 'second', 'third', 'fourth', np.NaN]).astype(
CategoricalDtype(['first', 'second', 'third', 'fourth'], True))
exp_top = Series([0.4, 0.6, 0.8, 1., 0.2])
exp_bot = Series([0.2, 0.4, 0.6, 0.8, 1.])
exp_keep = Series([0.25, 0.5, 0.75, 1., np.NaN])
assert_series_equal(na_ser.rank(na_option='top', pct=True), exp_top)
assert_series_equal(na_ser.rank(na_option='bottom', pct=True), exp_bot)
assert_series_equal(na_ser.rank(na_option='keep', pct=True), exp_keep)
def test_rank_signature(self):
s = Series([0, 1])
s.rank(method='average')
pytest.raises(ValueError, s.rank, 'average')
@pytest.mark.parametrize('contents,dtype', [
([-np.inf, -50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10,
2, 40, np.inf],
'float64'),
([-np.inf, -50, -1, -1e-20, -1e-25, -1e-45, 0, 1e-40, 1e-20, 1e-10,
2, 40, np.inf],
'float32'),
([np.iinfo(np.uint8).min, 1, 2, 100, np.iinfo(np.uint8).max],
'uint8'),
pytest.param([np.iinfo(np.int64).min, -100, 0, 1, 9999, 100000,
1e10, np.iinfo(np.int64).max],
'int64',
marks=pytest.mark.xfail(
reason="iNaT is equivalent to minimum value of dtype"
"int64 pending issue #16674")),
([NegInfinity(), '1', 'A', 'BA', 'Ba', 'C', Infinity()],
'object')
])
def test_rank_inf(self, contents, dtype):
dtype_na_map = {
'float64': np.nan,
'float32': np.nan,
'int64': iNaT,
'object': None
}
# Insert nans at random positions if underlying dtype has missing
# value. Then adjust the expected order by adding nans accordingly
# This is for testing whether rank calculation is affected
# when values are interwined with nan values.
values = np.array(contents, dtype=dtype)
exp_order = np.array(range(len(values)), dtype='float64') + 1.0
if dtype in dtype_na_map:
na_value = dtype_na_map[dtype]
nan_indices = np.random.choice(range(len(values)), 5)
values = np.insert(values, nan_indices, na_value)
exp_order = np.insert(exp_order, nan_indices, np.nan)
# shuffle the testing array and expected results in the same way
random_order = np.random.permutation(len(values))
iseries = Series(values[random_order])
exp = Series(exp_order[random_order], dtype='float64')
iranks = iseries.rank()
assert_series_equal(iranks, exp)
def test_rank_tie_methods(self):
s = self.s
def _check(s, expected, method='average'):
result = s.rank(method=method)
tm.assert_series_equal(result, Series(expected))
dtypes = [None, object]
disabled = set([(object, 'first')])
results = self.results
for method, dtype in product(results, dtypes):
if (dtype, method) in disabled:
continue
series = s if dtype is None else s.astype(dtype)
_check(series, results[method], method=method)
@td.skip_if_no_scipy
@pytest.mark.parametrize('ascending', [True, False])
@pytest.mark.parametrize('method', ['average', 'min', 'max', 'first',
'dense'])
@pytest.mark.parametrize('na_option', ['top', 'bottom', 'keep'])
def test_rank_tie_methods_on_infs_nans(self, method, na_option, ascending):
dtypes = [('object', None, Infinity(), NegInfinity()),
('float64', np.nan, np.inf, -np.inf)]
chunk = 3
disabled = set([('object', 'first')])
def _check(s, method, na_option, ascending):
exp_ranks = {
'average': ([2, 2, 2], [5, 5, 5], [8, 8, 8]),
'min': ([1, 1, 1], [4, 4, 4], [7, 7, 7]),
'max': ([3, 3, 3], [6, 6, 6], [9, 9, 9]),
'first': ([1, 2, 3], [4, 5, 6], [7, 8, 9]),
'dense': ([1, 1, 1], [2, 2, 2], [3, 3, 3])
}
ranks = exp_ranks[method]
if na_option == 'top':
order = [ranks[1], ranks[0], ranks[2]]
elif na_option == 'bottom':
order = [ranks[0], ranks[2], ranks[1]]
else:
order = [ranks[0], [np.nan] * chunk, ranks[1]]
expected = order if ascending else order[::-1]
expected = list(chain.from_iterable(expected))
result = s.rank(method=method, na_option=na_option,
ascending=ascending)
tm.assert_series_equal(result, Series(expected, dtype='float64'))
for dtype, na_value, pos_inf, neg_inf in dtypes:
in_arr = [neg_inf] * chunk + [na_value] * chunk + [pos_inf] * chunk
iseries = Series(in_arr, dtype=dtype)
if (dtype, method) in disabled:
continue
_check(iseries, method, na_option, ascending)
def test_rank_desc_mix_nans_infs(self):
# GH 19538
# check descending ranking when mix nans and infs
iseries = Series([1, np.nan, np.inf, -np.inf, 25])
result = iseries.rank(ascending=False)
exp = Series([3, np.nan, 1, 4, 2], dtype='float64')
tm.assert_series_equal(result, exp)
def test_rank_methods_series(self):
pytest.importorskip('scipy.stats.special')
rankdata = pytest.importorskip('scipy.stats.rankdata')
import scipy
xs = np.random.randn(9)
xs = np.concatenate([xs[i:] for i in range(0, 9, 2)]) # add duplicates
np.random.shuffle(xs)
index = [chr(ord('a') + i) for i in range(len(xs))]
for vals in [xs, xs + 1e6, xs * 1e-6]:
ts = Series(vals, index=index)
for m in ['average', 'min', 'max', 'first', 'dense']:
result = ts.rank(method=m)
sprank = rankdata(vals, m if m != 'first' else 'ordinal')
expected = Series(sprank, index=index)
if LooseVersion(scipy.__version__) >= LooseVersion('0.17.0'):
expected = expected.astype('float64')
tm.assert_series_equal(result, expected)
def test_rank_dense_method(self):
dtypes = ['O', 'f8', 'i8']
in_out = [([1], [1]),
([2], [1]),
([0], [1]),
([2, 2], [1, 1]),
([1, 2, 3], [1, 2, 3]),
([4, 2, 1], [3, 2, 1],),
([1, 1, 5, 5, 3], [1, 1, 3, 3, 2]),
([-5, -4, -3, -2, -1], [1, 2, 3, 4, 5])]
for ser, exp in in_out:
for dtype in dtypes:
s = Series(ser).astype(dtype)
result = s.rank(method='dense')
expected = Series(exp).astype(result.dtype)
assert_series_equal(result, expected)
def test_rank_descending(self):
dtypes = ['O', 'f8', 'i8']
for dtype, method in product(dtypes, self.results):
if 'i' in dtype:
s = self.s.dropna()
else:
s = self.s.astype(dtype)
res = s.rank(ascending=False)
expected = (s.max() - s).rank()
assert_series_equal(res, expected)
if method == 'first' and dtype == 'O':
continue
expected = (s.max() - s).rank(method=method)
res2 = s.rank(method=method, ascending=False)
assert_series_equal(res2, expected)
def test_rank_int(self):
s = self.s.dropna().astype('i8')
for method, res in compat.iteritems(self.results):
result = s.rank(method=method)
expected = Series(res).dropna()
expected.index = result.index
assert_series_equal(result, expected)
def test_rank_object_bug(self):
# GH 13445
# smoke tests
Series([np.nan] * 32).astype(object).rank(ascending=True)
Series([np.nan] * 32).astype(object).rank(ascending=False)
def test_rank_modify_inplace(self):
# GH 18521
# Check rank does not mutate series
s = Series([Timestamp('2017-01-05 10:20:27.569000'), NaT])
expected = s.copy()
s.rank()
result = s
assert_series_equal(result, expected)
# GH15630, pct should be on 100% basis when method='dense'
@pytest.mark.parametrize('dtype', ['O', 'f8', 'i8'])
@pytest.mark.parametrize('ser, exp', [
([1], [1.]),
([1, 2], [1. / 2, 2. / 2]),
([2, 2], [1., 1.]),
([1, 2, 3], [1. / 3, 2. / 3, 3. / 3]),
([1, 2, 2], [1. / 2, 2. / 2, 2. / 2]),
([4, 2, 1], [3. / 3, 2. / 3, 1. / 3],),
([1, 1, 5, 5, 3], [1. / 3, 1. / 3, 3. / 3, 3. / 3, 2. / 3]),
([1, 1, 3, 3, 5, 5], [1. / 3, 1. / 3, 2. / 3, 2. / 3, 3. / 3, 3. / 3]),
([-5, -4, -3, -2, -1], [1. / 5, 2. / 5, 3. / 5, 4. / 5, 5. / 5])])
def test_rank_dense_pct(dtype, ser, exp):
s = Series(ser).astype(dtype)
result = s.rank(method='dense', pct=True)
expected = Series(exp).astype(result.dtype)
assert_series_equal(result, expected)
@pytest.mark.parametrize('dtype', ['O', 'f8', 'i8'])
@pytest.mark.parametrize('ser, exp', [
([1], [1.]),
([1, 2], [1. / 2, 2. / 2]),
([2, 2], [1. / 2, 1. / 2]),
([1, 2, 3], [1. / 3, 2. / 3, 3. / 3]),
([1, 2, 2], [1. / 3, 2. / 3, 2. / 3]),
([4, 2, 1], [3. / 3, 2. / 3, 1. / 3],),
([1, 1, 5, 5, 3], [1. / 5, 1. / 5, 4. / 5, 4. / 5, 3. / 5]),
([1, 1, 3, 3, 5, 5], [1. / 6, 1. / 6, 3. / 6, 3. / 6, 5. / 6, 5. / 6]),
([-5, -4, -3, -2, -1], [1. / 5, 2. / 5, 3. / 5, 4. / 5, 5. / 5])])
def test_rank_min_pct(dtype, ser, exp):
s = Series(ser).astype(dtype)
result = s.rank(method='min', pct=True)
expected = Series(exp).astype(result.dtype)
assert_series_equal(result, expected)
@pytest.mark.parametrize('dtype', ['O', 'f8', 'i8'])
@pytest.mark.parametrize('ser, exp', [
([1], [1.]),
([1, 2], [1. / 2, 2. / 2]),
([2, 2], [1., 1.]),
([1, 2, 3], [1. / 3, 2. / 3, 3. / 3]),
([1, 2, 2], [1. / 3, 3. / 3, 3. / 3]),
([4, 2, 1], [3. / 3, 2. / 3, 1. / 3],),
([1, 1, 5, 5, 3], [2. / 5, 2. / 5, 5. / 5, 5. / 5, 3. / 5]),
([1, 1, 3, 3, 5, 5], [2. / 6, 2. / 6, 4. / 6, 4. / 6, 6. / 6, 6. / 6]),
([-5, -4, -3, -2, -1], [1. / 5, 2. / 5, 3. / 5, 4. / 5, 5. / 5])])
def test_rank_max_pct(dtype, ser, exp):
s = Series(ser).astype(dtype)
result = s.rank(method='max', pct=True)
expected = Series(exp).astype(result.dtype)
assert_series_equal(result, expected)
@pytest.mark.parametrize('dtype', ['O', 'f8', 'i8'])
@pytest.mark.parametrize('ser, exp', [
([1], [1.]),
([1, 2], [1. / 2, 2. / 2]),
([2, 2], [1.5 / 2, 1.5 / 2]),
([1, 2, 3], [1. / 3, 2. / 3, 3. / 3]),
([1, 2, 2], [1. / 3, 2.5 / 3, 2.5 / 3]),
([4, 2, 1], [3. / 3, 2. / 3, 1. / 3],),
([1, 1, 5, 5, 3], [1.5 / 5, 1.5 / 5, 4.5 / 5, 4.5 / 5, 3. / 5]),
([1, 1, 3, 3, 5, 5],
[1.5 / 6, 1.5 / 6, 3.5 / 6, 3.5 / 6, 5.5 / 6, 5.5 / 6]),
([-5, -4, -3, -2, -1], [1. / 5, 2. / 5, 3. / 5, 4. / 5, 5. / 5])])
def test_rank_average_pct(dtype, ser, exp):
s = Series(ser).astype(dtype)
result = s.rank(method='average', pct=True)
expected = Series(exp).astype(result.dtype)
assert_series_equal(result, expected)
@pytest.mark.parametrize('dtype', ['f8', 'i8'])
@pytest.mark.parametrize('ser, exp', [
([1], [1.]),
([1, 2], [1. / 2, 2. / 2]),
([2, 2], [1. / 2, 2. / 2.]),
([1, 2, 3], [1. / 3, 2. / 3, 3. / 3]),
([1, 2, 2], [1. / 3, 2. / 3, 3. / 3]),
([4, 2, 1], [3. / 3, 2. / 3, 1. / 3],),
([1, 1, 5, 5, 3], [1. / 5, 2. / 5, 4. / 5, 5. / 5, 3. / 5]),
([1, 1, 3, 3, 5, 5], [1. / 6, 2. / 6, 3. / 6, 4. / 6, 5. / 6, 6. / 6]),
([-5, -4, -3, -2, -1], [1. / 5, 2. / 5, 3. / 5, 4. / 5, 5. / 5])])
def test_rank_first_pct(dtype, ser, exp):
s = Series(ser).astype(dtype)
result = s.rank(method='first', pct=True)
expected = Series(exp).astype(result.dtype)
assert_series_equal(result, expected)
| bsd-3-clause |
peterfpeterson/mantid | scripts/HFIR_4Circle_Reduction/mplgraphicsview3d.py | 3 | 10025 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=R0901,R0902,R0904
import numpy as np
import os
from qtpy.QtWidgets import QSizePolicy
from mantidqt.MPLwidgets import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from mpl_toolkits.mplot3d import Axes3D
class MplPlot3dCanvas(FigureCanvas):
"""
Matplotlib 3D canvas class
"""
def __init__(self, parent=None):
"""
Initialization
:return:
"""
#
self._myParentWindow = parent
# Initialize the figure
self._myFigure = Figure()
# Init canvas
FigureCanvas.__init__(self, self._myFigure)
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
# Axes
self._myAxes = Axes3D(self._myFigure) # Canvas figure must be created for mouse rotation
self.format_coord_org = self._myAxes.format_coord
self._myAxes.format_coord = self.report_pixel
# color
self._colorMap = [0.5, 0.5, 0.5]
# Others
self._dataKey = 0
self._dataDict = dict()
# List of plots on canvas NOW
self._currPlotList = list()
self._currSurfaceList = list() # [{"xx":,"yy:","val:"}]
return
def clear_3d_plots(self):
"""
Clear all the figures from canvas
:return:
"""
for plt in self._currPlotList:
# del plt
self._myAxes.collections.remove(plt)
self._currPlotList = []
return
def get_data(self, data_key):
""" Get data by data key
:param data_key:
:return:
"""
assert data_key in self._dataDict, 'Data key %s does not exist in %s.' % (str(data_key),
str(self._dataDict.keys()))
return self._dataDict[data_key]
def import_3d_data(self, points, intensities):
"""
:param points:
:param intensities:
:return:
"""
# check
assert isinstance(points, np.ndarray) and points.shape[1] == 3, 'Shape is %s.' % str(points.shape)
assert isinstance(intensities, np.ndarray) and len(points) == len(intensities)
# set
self._dataDict[self._dataKey] = (points, intensities)
# update
r_value = self._dataKey
self._dataKey += 1
return r_value
def import_data_from_file(self, file_name):
""" File will have more than 4 columns, as X, Y, Z, Intensity, ...
:param file_name:
:return:
"""
# check
assert isinstance(file_name, str) and os.path.exists(file_name)
# parse
data_file = open(file_name, 'r')
raw_lines = data_file.readlines()
data_file.close()
# construct ND data array
xyz_points = np.zeros((len(raw_lines), 3))
intensities = np.zeros((len(raw_lines), ))
# parse
for i in range(len(raw_lines)):
line = raw_lines[i].strip()
# skip empty line
if len(line) == 0:
continue
# set value
terms = line.split(',')
for j in range(3):
xyz_points[i][j] = float(terms[j])
intensities[i] = float(terms[3])
# END-FOR
# Add to data structure for managing
self._dataDict[self._dataKey] = (xyz_points, intensities)
return_value = self._dataKey
self._dataKey += 1
return return_value
def plot_scatter(self, points, color_list):
"""
Plot points with colors in scatter mode
:param points:
:param color_list:
:return:
"""
# check: [TO DO] need MORE!
assert isinstance(points, np.ndarray)
assert len(points) == len(color_list)
assert points.shape[1] == 3, '3D data %s.' % str(points.shape)
#
# plot scatters
plt = self._myAxes.scatter(points[:, 0], points[:, 1], points[:, 2],
zdir='z', c=color_list)
self._currPlotList.append(plt)
self.draw()
return
def plot_scatter_auto(self, data_key, base_color=None):
"""
Plot data in scatter plot in an automatic mode
:param data_key: key to locate the data stored to this class
:param base_color: None or a list of 3 elements from 0 to 1 for RGB
:return:
"""
# Check
assert isinstance(data_key, int) and data_key >= 0
assert base_color is None or len(base_color) == 3
# get data and check
points = self._dataDict[data_key][0]
intensities = self._dataDict[data_key][1]
assert isinstance(points, np.ndarray)
assert isinstance(points.shape, tuple)
assert points.shape[1] == 3, '3D data %s.' % str(points.shape)
if len(points) > 1:
# set x, y and z limit
x_min = min(points[:, 0])
x_max = max(points[:, 0])
d_x = x_max - x_min
# print(x_min, x_max)
y_min = min(points[:, 1])
y_max = max(points[:, 1])
d_y = y_max - y_min
# print(y_min, y_max)
z_min = min(points[:, 2])
z_max = max(points[:, 2])
d_z = z_max - z_min
print(z_min, z_max)
# use default setup
self._myAxes.set_xlim(x_min-d_x, x_max+d_x)
self._myAxes.set_ylim(y_min-d_y, y_max+d_y)
self._myAxes.set_zlim(z_min-d_z, z_max+d_z)
# END-IF
# color map for intensity
color_list = list()
if base_color is None:
color_r = self._colorMap[0]
color_g = self._colorMap[1]
else:
color_r = base_color[0]
color_g = base_color[1]
if len(intensities) > 1:
min_intensity = min(intensities)
max_intensity = max(intensities)
diff = max_intensity - min_intensity
b_list = intensities - min_intensity
b_list = b_list/diff
num_points = len(points[:, 2])
for index in range(num_points):
color_tup = (color_r, color_g, b_list[index])
color_list.append(color_tup)
else:
color_list.append((color_r, color_g, 0.5))
# plot scatters
self._myAxes.scatter(points[:, 0], points[:, 1], points[:, 2], zdir='z', c=color_list)
self.draw()
def plot_surface(self):
"""
Plot surface
:return:
"""
print('Number of surf = ', len(self._currSurfaceList))
for surf in self._currSurfaceList:
plt = self._myAxes.plot_surface(surf["xx"], surf["yy"], surf["val"],
rstride=5, cstride=5, # color map??? cmap=cm.jet,
linewidth=1, antialiased=True)
self._currPlotList.append(plt)
# END-FOR
return
def report_pixel(self, x_d, y_d):
report = self.format_coord_org(x_d, y_d)
report = report.replace(",", " ")
return report
def set_axes_labels(self, x_label, y_label, z_label):
"""
:return:
"""
if x_label is not None:
self._myAxes.set_xlabel(x_label)
if y_label is not None:
self._myAxes.set_ylabel(y_label)
if z_label is not None:
self._myAxes.set_zlabel(z_label)
return
def set_color_map(self, color_r, color_g, color_b):
"""
Set the base line of color map
:param color_r:
:param color_g:
:param color_b:
:return:
"""
# Set color map
assert isinstance(color_r, float), 0 <= color_r < 1.
assert isinstance(color_g, float), 0 <= color_g < 1.
assert isinstance(color_b, float), 0 <= color_b < 1.
self._colorMap = [color_r, color_g, color_b]
def set_title(self, title, font_size):
"""
Set super title
:param title:
:return:
"""
self._myFigure.suptitle(title, fontsize=font_size)
return
def set_xyz_limits(self, points, limits=None):
""" Set XYZ axes limits
:param points:
:param limits: if None, then use default; otherwise, 3-tuple of 2-tuple
:return:
"""
# check
assert isinstance(points, np.ndarray)
# get limit
if limits is None:
limits = get_auto_xyz_limit(points)
# set limit to axes
self._myAxes.set_xlim(limits[0][0], limits[0][1])
self._myAxes.set_ylim(limits[1][0], limits[1][1])
self._myAxes.set_zlim(limits[2][0], limits[2][1])
return
def get_auto_xyz_limit(points):
""" Get default limit on X, Y, Z
Requirements: number of data points must be larger than 0.
:param points:
:return: 3-tuple of 2-tuple as (min, max) for X, Y and Z respectively
"""
# check
assert isinstance(points, np.ndarray)
dim = points.shape[1]
assert dim == 3
# set x, y and z limit
x_min = min(points[:, 0])
x_max = max(points[:, 0])
d_x = x_max - x_min
# print(x_min, x_max)
y_min = min(points[:, 1])
y_max = max(points[:, 1])
d_y = y_max - y_min
# print(y_min, y_max)
z_min = min(points[:, 2])
z_max = max(points[:, 2])
d_z = z_max - z_min
print(z_min, z_max)
# use default setup
x_lim = (x_min-d_x, x_max+d_x)
y_lim = (y_min-d_y, y_max+d_y)
z_lim = (z_min-d_z, z_max+d_z)
return x_lim, y_lim, z_lim
| gpl-3.0 |
femtotrader/pyfolio | pyfolio/capacity.py | 1 | 9548 | from __future__ import division
import pandas as pd
import numpy as np
from . import pos
import empyrical
def daily_txns_with_bar_data(transactions, market_data):
"""
Sums the absolute value of shares traded in each name on each day.
Adds columns containing the closing price and total daily volume for
each day-ticker combination.
Parameters
----------
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet
market_data : pd.Panel
Contains "volume" and "price" DataFrames for the tickers
in the passed positions DataFrames
Returns
-------
txn_daily : pd.DataFrame
Daily totals for transacted shares in each traded name.
price and volume columns for close price and daily volume for
the corresponding ticker, respectively.
"""
transactions.index.name = 'date'
txn_daily = pd.DataFrame(transactions.assign(
amount=abs(transactions.amount)).groupby(
['symbol', pd.TimeGrouper('D')]).sum()['amount'])
txn_daily['price'] = market_data['price'].unstack()
txn_daily['volume'] = market_data['volume'].unstack()
txn_daily = txn_daily.reset_index().set_index('date')
return txn_daily
def days_to_liquidate_positions(positions, market_data,
max_bar_consumption=0.2,
capital_base=1e6,
mean_volume_window=5):
"""
Compute the number of days that would have been required
to fully liquidate each position on each day based on the
trailing n day mean daily bar volume and a limit on the proportion
of a daily bar that we are allowed to consume.
This analysis uses portfolio allocations and a provided capital base
rather than the dollar values in the positions DataFrame to remove the
effect of compounding on days to liquidate. In other words, this function
assumes that the net liquidation portfolio value will always remain
constant at capital_base.
Parameters
----------
positions: pd.DataFrame
Contains daily position values including cash
- See full explanation in tears.create_full_tear_sheet
market_data : pd.Panel
Panel with items axis of 'price' and 'volume' DataFrames.
The major and minor axes should match those of the
the passed positions DataFrame (same dates and symbols).
max_bar_consumption : float
Max proportion of a daily bar that can be consumed in the
process of liquidating a position.
capital_base : integer
Capital base multiplied by portfolio allocation to compute
position value that needs liquidating.
mean_volume_window : float
Trailing window to use in mean volume calculation.
Returns
-------
days_to_liquidate : pd.DataFrame
Number of days required to fully liquidate daily positions.
Datetime index, symbols as columns.
"""
DV = market_data['volume'] * market_data['price']
roll_mean_dv = DV.rolling(window=mean_volume_window,
center=False).mean().shift()
roll_mean_dv = roll_mean_dv.replace(0, np.nan)
positions_alloc = pos.get_percent_alloc(positions)
positions_alloc = positions_alloc.drop('cash', axis=1)
days_to_liquidate = (positions_alloc * capital_base) / \
(max_bar_consumption * roll_mean_dv)
return days_to_liquidate.iloc[mean_volume_window:]
def get_max_days_to_liquidate_by_ticker(positions, market_data,
max_bar_consumption=0.2,
capital_base=1e6,
mean_volume_window=5,
last_n_days=None):
"""
Finds the longest estimated liquidation time for each traded
name over the course of backtest (or last n days of the backtest).
Parameters
----------
positions: pd.DataFrame
Contains daily position values including cash
- See full explanation in tears.create_full_tear_sheet
market_data : pd.Panel
Panel with items axis of 'price' and 'volume' DataFrames.
The major and minor axes should match those of the
the passed positions DataFrame (same dates and symbols).
max_bar_consumption : float
Max proportion of a daily bar that can be consumed in the
process of liquidating a position.
capital_base : integer
Capital base multiplied by portfolio allocation to compute
position value that needs liquidating.
mean_volume_window : float
Trailing window to use in mean volume calculation.
last_n_days : integer
Compute for only the last n days of the passed backtest data.
Returns
-------
days_to_liquidate : pd.DataFrame
Max Number of days required to fully liquidate each traded name.
Index of symbols. Columns for days_to_liquidate and the corresponding
date and position_alloc on that day.
"""
dtlp = days_to_liquidate_positions(positions, market_data,
max_bar_consumption=max_bar_consumption,
capital_base=capital_base,
mean_volume_window=mean_volume_window)
if last_n_days is not None:
dtlp = dtlp.loc[dtlp.index.max() - pd.Timedelta(days=last_n_days):]
pos_alloc = pos.get_percent_alloc(positions)
pos_alloc = pos_alloc.drop('cash', axis=1)
liq_desc = pd.DataFrame()
liq_desc['days_to_liquidate'] = dtlp.unstack()
liq_desc['pos_alloc_pct'] = pos_alloc.unstack() * 100
liq_desc.index.levels[0].name = 'symbol'
liq_desc.index.levels[1].name = 'date'
worst_liq = liq_desc.reset_index().sort_values(
'days_to_liquidate', ascending=False).groupby('symbol').first()
return worst_liq
def get_low_liquidity_transactions(transactions, market_data,
last_n_days=None):
"""
For each traded name, find the daily transaction total that consumed
the greatest proportion of available daily bar volume.
Parameters
----------
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
market_data : pd.Panel
Panel with items axis of 'price' and 'volume' DataFrames.
The major and minor axes should match those of the
the passed positions DataFrame (same dates and symbols).
last_n_days : integer
Compute for only the last n days of the passed backtest data.
"""
txn_daily_w_bar = daily_txns_with_bar_data(transactions, market_data)
txn_daily_w_bar.index.name = 'date'
txn_daily_w_bar = txn_daily_w_bar.reset_index()
if last_n_days is not None:
md = txn_daily_w_bar.date.max() - pd.Timedelta(days=last_n_days)
txn_daily_w_bar = txn_daily_w_bar[txn_daily_w_bar.date > md]
bar_consumption = txn_daily_w_bar.assign(
max_pct_bar_consumed=(
txn_daily_w_bar.amount/txn_daily_w_bar.volume)*100
).sort_values('max_pct_bar_consumed', ascending=False)
max_bar_consumption = bar_consumption.groupby('symbol').first()
return max_bar_consumption[['date', 'max_pct_bar_consumed']]
def apply_slippage_penalty(returns, txn_daily, simulate_starting_capital,
backtest_starting_capital, impact=0.1):
"""
Applies quadratic volumeshare slippage model to daily returns based
on the proportion of the observed historical daily bar dollar volume
consumed by the strategy's trades. Scales the size of trades based
on the ratio of the starting capital we wish to test to the starting
capital of the passed backtest data.
Parameters
----------
returns : pd.Series
Time series of daily returns.
txn_daily : pd.Series
Daily transaciton totals, closing price, and daily volume for
each traded name. See price_volume_daily_txns for more details.
simulate_starting_capital : integer
capital at which we want to test
backtest_starting_capital: capital base at which backtest was
origionally run. impact: See Zipline volumeshare slippage model
impact : float
Scales the size of the slippage penalty.
Returns
-------
adj_returns : pd.Series
Slippage penalty adjusted daily returns.
"""
mult = simulate_starting_capital / backtest_starting_capital
simulate_traded_shares = abs(mult * txn_daily.amount)
simulate_traded_dollars = txn_daily.price * simulate_traded_shares
simulate_pct_volume_used = simulate_traded_shares / txn_daily.volume
penalties = simulate_pct_volume_used**2 \
* impact * simulate_traded_dollars
daily_penalty = penalties.resample('D').sum()
daily_penalty = daily_penalty.reindex(returns.index).fillna(0)
# Since we are scaling the numerator of the penalties linearly
# by capital base, it makes the most sense to scale the denominator
# similarly. In other words, since we aren't applying compounding to
# simulate_traded_shares, we shouldn't apply compounding to pv.
portfolio_value = empyrical.cum_returns(
returns, starting_value=backtest_starting_capital) * mult
adj_returns = returns - (daily_penalty / portfolio_value)
return adj_returns
| apache-2.0 |
tudo-astroparticlephysics/pydisteval | disteval/visualization/comparison_plotter/functions/legend_entries.py | 1 | 3779 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
class DataObject(object):
def __init__(self, fc_t='w', ec_t='k', fc_c='w', ec_c='k', lw=1.):
self.fc_t = fc_t
self.ec_t = ec_t
self.fc_c = fc_c
self.ec_c = ec_c
self.lw = lw
class data_handler(object):
def legend_artist(self, legend, orig_handle, fontsize, handlebox):
scale = fontsize / 22
x0, y0 = handlebox.xdescent, handlebox.ydescent
width, height = handlebox.width, handlebox.height
radius = height / 2 * scale
xt_0 = x0 + width - height / 2
xc_0 = x0 + height / 2 + radius
yc_0 = y0 + height / 2 * (1 - scale) + radius
patch_tri = mpatches.RegularPolygon(
[xt_0, yc_0],
3,
radius=radius * 1.5,
orientation=np.pi,
facecolor=orig_handle.fc_t,
edgecolor=orig_handle.ec_t,
transform=handlebox.get_transform(),
linewidth=orig_handle.lw)
patch_circ = mpatches.Circle([xc_0, yc_0], radius,
facecolor=orig_handle.fc_c,
edgecolor=orig_handle.ec_c,
transform=handlebox.get_transform(),
linewidth=orig_handle.lw)
handlebox.add_artist(patch_tri)
handlebox.add_artist(patch_circ)
return patch_circ
class UncertObject(object):
def __init__(self, colors, linecolor):
self.colors = colors
self.linecolor = linecolor
class uncert_handler(object):
def legend_artist(self, legend, orig_handle, fontsize, handlebox):
n_alphas = len(orig_handle.colors)
x0, y0 = handlebox.xdescent, handlebox.ydescent
x0 = x0 + 0.1
width, height = handlebox.width, handlebox.height
y_mid = y0 + height / 2
step_size = (0.5 * height) / n_alphas
for i, c in enumerate(orig_handle.colors[::-1]):
j = n_alphas - i
y0_i = y_mid - (j * step_size)
height_i = j * 2 * step_size
rec = mpatches.Rectangle([x0, y0_i], width,
height_i,
facecolor=c,
edgecolor=c,
transform=handlebox.get_transform(),
zorder=10)
handlebox.add_artist(rec)
line = plt.Line2D([x0, x0 + width],
[y_mid, y_mid],
color=orig_handle.linecolor,
linestyle='-',
linewidth=1)
handlebox.add_artist(line)
return line
class UncertObject_single(object):
def __init__(self, color):
self.color = color
class uncert_handler_single(object):
def legend_artist(self, legend, orig_handle, fontsize, handlebox):
x0, y0 = handlebox.xdescent, handlebox.ydescent
width, height = handlebox.width, handlebox.height
x0 = x0 + 0.5 * width
rec = mpatches.Rectangle([x0, y0], 0.5 * width,
height,
facecolor=orig_handle.color,
edgecolor=orig_handle.color,
transform=handlebox.get_transform(),
zorder=10)
handlebox.add_artist(rec)
return rec
handler_mapper = {DataObject: data_handler(),
UncertObject: uncert_handler(),
UncertObject_single: uncert_handler_single()}
| mit |
jguhlin/nn-replicon-identification | replicon_identification_cnn.py | 1 | 17393 | # Next step is to add filename processed to text summary
import tensorflow as tf
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.sparse import csr_matrix
from collections import Counter
import collections
import random
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import Bio
from Bio import SeqIO
import os
import concurrent.futures
import functools
from functools import partial
import math
import threading
import time
import random
from random import shuffle
import pickle
import ntpath
import os.path
import sys
from tensorflow.python.summary import summary
# k-mer size to use
k = 9
#
# NOTE!!!!!!!!!!!!!!!!
#
# We can reduce problem space if we get the reverse complement, and add a bit to indicate reversed or not...
# Not really.... revcomp just doubles it back up again....
#
# Also -- Build a recurrent network to predict sequences that come after a given kmer?
# Look at word2vec, dna2vec, bag of words, skip-gram
#
# Problem space
space = 5 ** k
def partition(n, step, coll):
for i in range(0, len(coll), step):
if (i+n > len(coll)):
break # raise StopIteration...
yield coll[i:i+n]
def get_kmers(k):
return lambda sequence: partition(k, k, sequence)
def convert_nt(c):
return {"N": 0, "A": 1, "C": 2, "T": 3, "G": 4}.get(c, 0)
def convert_nt_complement(c):
return {"N": 0, "A": 3, "C": 4, "T": 1, "G": 2}.get(c, 0)
def convert_kmer_to_int(kmer):
return int(''.join(str(x) for x in (map(convert_nt, kmer))), 5)
def convert_kmer_to_int_complement(kmer):
return int(''.join(str(x) for x in reversed(list(map(convert_nt_complement, kmer)))), 5)
def convert_base5(n):
return {"0": "N", "1": "A", "2": "C", "3": "T", "4": "G"}.get(n,"N")
def convert_to_kmer(kmer):
return ''.join(map(convert_base5, str(np.base_repr(kmer, 5))))
# Not using sparse tensors anymore.
tf.logging.set_verbosity(tf.logging.INFO)
# Get all kmers, in order, with a sliding window of k (but sliding 1bp for each iteration up to k)
# Also get RC for all....
def kmer_processor(seq,offset):
return list(map(convert_kmer_to_int, get_kmers(k)(seq[offset:])))
def get_kmers_from_seq(sequence):
kmers_from_seq = list()
kp = functools.partial(kmer_processor, sequence)
for i in map(kp, range(0,k)):
kmers_from_seq.append(i)
rev = sequence[::-1]
kpr = functools.partial(kmer_processor, rev)
for i in map(kpr, range(0,k)):
kmers_from_seq.append(i)
# for i in range(0,k):
# kmers_from_seq.append(kmer_processor(sequence,i))
# for i in range(0,k):
# kmers_from_seq.append(kmer_processor(rev, i))
return kmers_from_seq
data = list()
def load_fasta(filename):
# tf.summary.text("File", tf.as_string(filename))
data = dict()
file_base_name = ntpath.basename(filename)
picklefilename = file_base_name + ".picklepickle"
if os.path.isfile(picklefilename):
print("Loading from pickle: " + filename)
data = pickle.load(open(picklefilename, "rb"))
else:
print("File not found, generating new sequence: " + picklefilename)
for seq_record in SeqIO.parse(filename, "fasta"):
data.update({seq_record.id:
get_kmers_from_seq(seq_record.seq.upper())})
pickle.dump(data, open(picklefilename, "wb"))
sys.stdout.flush()
return(data)
def get_kmers_from_file(filename):
kmer_list = list()
for seq_record in SeqIO.parse(filename, "fasta"):
kmer_list.extend(get_kmers_from_seq(seq_record.seq.upper()))
return set([item for sublist in kmer_list for item in sublist])
all_kmers = set()
# Very slow, should make this part concurrent...
def find_all_kmers(directory):
kmer_master_list = list()
files = [directory + "/" + f for f in os.listdir(directory)]
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
for i in executor.map(get_kmers_from_file, files):
kmer_master_list.extend(list(i))
kmer_master_list = list(set(kmer_master_list))
print("Total unique kmers: " + str(len(set(kmer_master_list))))
return set(kmer_master_list)
def get_categories(directory):
data = list()
files = os.listdir(directory)
for filename in files:
for seq_record in SeqIO.parse(directory + "/" + filename, "fasta"):
data.append(seq_record.id)
data = sorted(list(set(data)))
return(data)
def training_file_generator(directory):
files = [directory + "/" + f for f in os.listdir(directory)]
random.shuffle(files)
def gen():
nonlocal files
if (len(files) == 0):
files = [directory + "/" + f for f in os.listdir(directory)]
random.shuffle(files)
return(files.pop())
return gen
def gen_random_training_data(input_data, window_size):
rname = random.choice(list(input_data.keys()))
rdata = random.choice(input_data[rname])
idx = random.randrange(window_size + 1, len(rdata) - window_size - 1)
tdata = list();
for i in range(idx - window_size - 1, idx + window_size):
if (i < 0): continue
if (i >= len(rdata)): break
if type(rdata[idx]) == list: break;
if type(rdata[i]) == list: break
tdata.append(kmer_dict[rdata[i]])
return tdata, rname
# The current state is, each training batch is from a single FASTA file (strain, usually)
# This can be ok, as long as training batch is a large number
# Need to speed up reading of FASTA files though, maybe pyfaidx or something?
# Define the one-hot dictionary...
replicons_list = get_categories("training-files/")
oh = dict()
a = 0
for i in replicons_list:
oh[i] = tf.one_hot(a, len(replicons_list))
a += 1
oh = dict()
a = 0
for i in replicons_list:
oh[i] = a
a += 1
oh = dict()
oh['Main'] = [1.0, 0.0, 0.0]
oh['pSymA'] = [0.0, 1.0, 0.0]
oh['pSymB'] = [0.0, 0.0, 1.0]
def generate_training_batch(data, batch_size, window_size):
training_batch_data = list();
while len(training_batch_data) < batch_size:
training_batch_data.append(gen_random_training_data(data,
window_size))
return training_batch_data
batch_size = 1024
embedding_size = 128
window_size = 7
replicons_list = get_categories("training-files/")
filegen = training_file_generator("training-files/")
repdict = dict()
a = 0
for i in replicons_list:
repdict[i] = a
a += 1
def test_input_fn(data):
tbatch = generate_training_batch(data, 1, window_size)
dat = {'x': tf.convert_to_tensor([tf.convert_to_tensor(get_kmer_embeddings(tbatch[0][0]))])}
lab = tf.convert_to_tensor([repdict[tbatch[0][1]]])
return dat, lab
def train_input_fn_raw(data):
tbatch = generate_training_batch(data, 1, window_size)
dat = {'x': (get_kmer_embeddings(tbatch[0][0]))}
lab = [repdict[tbatch[0][1]]]
return dat, lab
# Because this was run at work on a smaller sample of files....
# with open("all_kmers_subset.txt", "w") as f:
# for s in all_kmers:
# f.write(str(s) +"\n")
sess = tf.Session()
# Because this was run at work on a smaller sample of files....
all_kmers = list()
# with open("all_kmers_subset.txt", "r") as f:
# for line in f:
# all_kmers.append(int(line.strip()))
all_kmers = pickle.load(open("all_kmers.p", "rb"))
all_kmers = set(all_kmers)
len(all_kmers)
# len(data)
# all_kmers = set([item for sublist in data for item in sublist])
unused_kmers = set(range(0, space)) - all_kmers
kmer_dict = dict()
reverse_kmer_dict = dict();
a = 0
for i in all_kmers:
kmer_dict[i] = a
reverse_kmer_dict[a] = i
a += 1
kmer_count = len(all_kmers)
[len(all_kmers), len(unused_kmers), space]
# This fn now generates all possible combinations of training data....
def gen_training_data(input_data, window_size):
total_data = list()
for k in input_data.keys():
for kdata in input_data[k]:
for i in range(window_size + 1, len(kdata) - window_size):
kentry = list()
for x in range(i - window_size - 1, i + window_size):
kentry.append(kmer_dict[kdata[x]])
total_data.append([kentry, k])
return total_data
feature_columns = [tf.feature_column.numeric_column("x", shape=[15,128])]
embeddings = np.load("final_embeddings.npy")
def get_kmer_embeddings(kmers):
a = list() # numpy.empty(128 * 15)
for k in kmers:
a.append(embeddings[k])
return a
#return np.hstack(a)
def gen_training_data_generator(input_data, window_size, repdict):
for k in input_data.keys():
for kdata in input_data[k]:
for i in range(window_size + 1, len(kdata) - window_size):
kentry = list()
for x in range(i - window_size - 1, i + window_size):
kentry.append(kmer_dict[kdata[x]])
yield(get_kmer_embeddings(kentry), [repdict[k]])
# Not infinite
def kmer_generator(directory, window_size):
files = [directory + "/" + f for f in os.listdir(directory)]
random.shuffle(files)
replicons_list = get_categories("training-files/")
repdict = dict()
a = 0
for i in replicons_list:
repdict[i] = a
a += 1
for f in files:
yield from gen_training_data_generator(load_fasta(f), window_size, repdict)
# Plan to use tf.data.Dataset.from_generator
# ds = tf.contrib.data.Dataset.list_files("training-files/").map(tf_load_fasta)
def my_input_fn():
kmer_gen = functools.partial(kmer_generator, "training-files/", window_size)
ds = tf.data.Dataset.from_generator(kmer_gen,
(tf.float32,
tf.int64),
(tf.TensorShape([15,128]),
tf.TensorShape(None)))
# Numbers reduced to run on my desktop
ds = ds.repeat(2)
ds = ds.prefetch(400000)
ds = ds.shuffle(buffer_size=200000)
ds = ds.batch(5000)
# ds = ds.repeat(1)
# ds = ds.prefetch(1000)
# ds = ds.shuffle(buffer_size=500)
# ds = ds.batch(250)
def add_labels(arr, lab):
return({"x": arr}, lab)
ds = ds.map(add_labels)
iterator = ds.make_one_shot_iterator()
batch_features, batch_labels = iterator.get_next()
return batch_features, batch_labels
#next_batch = my_input_fn()
# with tf.Session() as sess:
# first_batch = sess.run(next_batch)
# print(first_batch)
# nn = tf.estimator.DNNClassifier(feature_columns=feature_columns,
# hidden_units = [256, 128, len(replicons_list) + 10],
# activation_fn=tf.nn.tanh,
# dropout=0.1,
# model_dir="classifier",
# n_classes=len(replicons_list),
# optimizer="Momentum")
# Have to know the names of the tensors to do this level of logging....
# Custom estimator would allow it....
# tensors_to_log = {"probabilities": "softmax_tensor"}
# logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=100)
# nn.train(input_fn = my_input_fn)
# Trained on l1 and l2 as 0.001 and learning_rate 0.1
# Changing learning rate to 0.2 for additional run
# dnn = tf.estimator.DNNClassifier(feature_columns=feature_columns,
# hidden_units = [256, 45],
# activation_fn=tf.nn.relu,
# dropout=0.05,
# model_dir="classifier.relu.dropout0.05.proximaladagrad.lrecoli",
# n_classes=len(replicons_list),
# optimizer=tf.train.ProximalAdagradOptimizer(
# learning_rate=0.2,
# l1_regularization_strength=0.001,
# l2_regularization_strength=0.001))
#acc = dnn.evaluate(input_fn = my_input_fn, steps=1000)
#print("Accuracy: " + acc["accuracy"] + "\n");
#print("Loss: %s" % acc["loss"])
#print("Root Mean Squared Error: %s" % acc["rmse"])
# dnn.train(input_fn = my_input_fn)
# CNN experiment for training
# Based off of kmer embeddings
def _add_layer_summary(value, tag):
summary.scalar('%s/fraction_of_zero_values' % tag, tf.nn.zero_fraction(value))
summary.histogram('%s/activation' % tag, value)
# Based off of: https://www.tensorflow.org/tutorials/layers
def cnn_model_fn(features, labels, mode):
"""Model fn for CNN"""
# Input layer
# So inputs are 1920, or 15 * 128, and "1" deep (which is a float)
input_layer = tf.reshape(features["x"], [-1, 15, 128, 1])
# filters * kernelsize[0] * kernel_size[1] must be > input_layer_size
# So 1920 <= 32 * 5 * 12
# 32 dimensions, 5 x 12 sliding window over entire dataset
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5,12],
padding="same",
activation=tf.nn.relu)
#conv1_img = tf.unstack(conv1, axis=3)
#tf.summary.image("Visualize_conv1", conv1_img)
# Our input to pool1 is 5, 128, 32 now....
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2, padding="same")
# So output is....
# -1 8 x 64 x 32
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# conv2_img = tf.expand_dims(tf.unstack(conv2, axis=3), axis=3)
# tf.summary.image("Visualize_conv2", conv2_img)
# SO output here is 4 x 60 x 64
# So now should be -1, 8, 64, 64
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# pool2 reduces by half again
# So -1, 4, 32, 64
pool2_flat = tf.reshape(pool2, [-1, 4 * 32 * 64])
# 1,024 neurons
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
_add_layer_summary(dense, "Dense")
# Gonna try this but dropout is very high (was 0.4, now 0.3)
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Must have len(replicons_list) neurons
logits = tf.layers.dense(inputs=dropout, units=len(replicons_list))
_add_layer_summary(logits, "Logits")
predictions = {
"classes": tf.argmax(input=logits, axis=1),
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=len(replicons_list))
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#f.summary.text("LogitsArgMax", tf.as_string(tf.argmax(logits, 1)))
#tf.summary.text("Labels", tf.as_string(labels))
#tf.summary.text("Prediction", tf.as_string(tf.argmax(labels, 1)))
# tf.summary.text("Onehot", tf.as_string(onehot_labels))
# tf.summary.text("Predictions", tf.as_string(correct_prediction))
tf.summary.scalar('Accuracy', accuracy)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
eval_metric_ops=eval_metric_ops,
summary_op=tf.summary.merge_all())
def main(unused_argv):
classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn,
model_dir="classifier_cnn_firsttry",
config=tf.contrib.learn.RunConfig(
save_checkpoints_steps=1500,
save_checkpoints_secs=None,
save_summary_steps=300))
# tensors_to_log = {"probabilities": "softmax_tensor"}
# logging_hook = tf.train.LoggingTensorHook(
# tensors=tensors_to_log, every_n_iter=50)
classifier.train(input_fn=my_input_fn)
# steps=10000
#hooks=[logging_hook])
eval_results = classifier.evaluate(input_fn=my_input_fn, steps=1000)
print(eval_results)
if __name__ == "__main__":
tf.app.run()
| epl-1.0 |
dgasmith/EEX_scratch | tests/test_amber_reference.py | 1 | 1942 | """
Tests to compare EEX energies to reference energies computed by amber
"""
import os
import eex
import numpy as np
import pandas as pd
import pytest
import eex_find_files
import glob
import eex_build_dl
# Build out file list
_test_directories = ["alkanes", "alcohols", "cyclic"]
_test_systems = []
for test_dir in _test_directories:
print(test_dir)
test_dir = eex_find_files.get_example_filename("amber", test_dir)
systems = glob.glob(test_dir + "/*.prmtop")
for s in systems:
path, file_name = os.path.split(s)
# Grab test dir and system name
_test_systems.append((path, file_name))
# List current energy tests
_energy_types = {"two-body" : "bond", "three-body": "angle", "four-body": "dihedral"}
def test_references(amber_references):
test_dir, system_name = amber_references
molecule = str(system_name.split("/")[-1]).split(".")[0]
data, dl = eex_build_dl.build_dl("amber", test_dir, molecule)
dl_energies = dl.evaluate(utype='kcal * mol ** -1')
reference_file = eex_find_files.get_example_filename("amber", test_dir, "energies.csv")
reference_energies = pd.read_csv(reference_file, header=0)
reference = reference_energies.loc[reference_energies['molecule'] == molecule]
for k in _energy_types:
k_reference = reference[_energy_types[k]].get_values()[0]
# Test against reference values in CSV -- absolute toloerance of 0.001 is used since amber only
# outputs energies to third decimal place
success = dl_energies[k] == pytest.approx(k_reference, abs=0.001)
if not success:
raise ValueError("AMBER Test failed, energy type %s.\n"
" Test description: %s" % (k, molecule))
# Loop over amber test directories
@pytest.fixture(scope="module", params=_test_systems)
def amber_references(request):
test_dir, test_system = request.param
return (test_dir, test_system)
| bsd-3-clause |
hrjn/scikit-learn | sklearn/linear_model/ransac.py | 16 | 19158 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
import warnings
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
from ..utils.validation import has_fit_parameter
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <ransac_regression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
max_skips : int, optional
Maximum number of iterations that can be skipped due to finding zero
inliers or invalid data defined by ``is_data_valid`` or invalid models
defined by ``is_model_valid``.
.. versionadded:: 0.19
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
NOTE: residual_metric is deprecated from 0.18 and will be removed in 0.20
Use ``loss`` instead.
loss : string, callable, optional, default "absolute_loss"
String inputs, "absolute_loss" and "squared_loss" are supported which
find the absolute loss and squared loss per sample
respectively.
If ``loss`` is a callable, then it should be a function that takes
two arrays as inputs, the true and predicted value and returns a 1-D
array with the ``i``th value of the array corresponding to the loss
on `X[i]`.
If the loss on a sample is greater than the ``residual_threshold``, then
this sample is classified as an outlier.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
n_skips_no_inliers_ : int
Number of iterations skipped due to finding zero inliers.
.. versionadded:: 0.19
n_skips_invalid_data_ : int
Number of iterations skipped due to invalid data defined by
``is_data_valid``.
.. versionadded:: 0.19
n_skips_invalid_model_ : int
Number of iterations skipped due to an invalid model defined by
``is_model_valid``.
.. versionadded:: 0.19
References
----------
.. [1] https://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100, max_skips=np.inf,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
loss='absolute_loss', random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.max_skips = max_skips
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
self.loss = loss
def fit(self, X, y, sample_weight=None):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
sample_weight : array-like, shape = [n_samples]
Individual weights for each sample
raises error if sample_weight is passed and base_estimator
fit method does not support it.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is not None:
warnings.warn(
"'residual_metric' was deprecated in version 0.18 and "
"will be removed in version 0.20. Use 'loss' instead.",
DeprecationWarning)
if self.loss == "absolute_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: np.abs(y_true - y_pred)
else:
loss_function = lambda \
y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
elif self.loss == "squared_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2
else:
loss_function = lambda \
y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
elif callable(self.loss):
loss_function = self.loss
else:
raise ValueError(
"loss should be 'absolute_loss', 'squared_loss' or a callable."
"Got %s. " % self.loss)
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
estimator_fit_has_sample_weight = has_fit_parameter(base_estimator,
"sample_weight")
estimator_name = type(base_estimator).__name__
if (sample_weight is not None and not
estimator_fit_has_sample_weight):
raise ValueError("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
n_inliers_best = 1
score_best = -np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
self.n_skips_no_inliers_ = 0
self.n_skips_invalid_data_ = 0
self.n_skips_invalid_model_ = 0
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
if (self.n_skips_no_inliers_ + self.n_skips_invalid_data_ +
self.n_skips_invalid_model_) > self.max_skips:
break
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
self.n_skips_invalid_data_ += 1
continue
# fit model for current random sample set
if sample_weight is None:
base_estimator.fit(X_subset, y_subset)
else:
base_estimator.fit(X_subset, y_subset,
sample_weight=sample_weight[subset_idxs])
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
self.n_skips_invalid_model_ += 1
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
# XXX: Deprecation: Remove this if block in 0.20
if self.residual_metric is not None:
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = self.residual_metric(diff)
else:
residuals_subset = loss_function(y, y_pred)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
self.n_skips_no_inliers_ += 1
continue
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
if ((self.n_skips_no_inliers_ + self.n_skips_invalid_data_ +
self.n_skips_invalid_model_) > self.max_skips):
raise ValueError(
"RANSAC skipped more iterations than `max_skips` without"
" finding a valid consensus set. Iterations were skipped"
" because each randomly chosen sub-sample failed the"
" passing criteria. See estimator attributes for"
" diagnostics (n_skips*).")
else:
raise ValueError(
"RANSAC could not find a valid consensus set. All"
" `max_trials` iterations were skipped because each"
" randomly chosen sub-sample failed the passing criteria."
" See estimator attributes for diagnostics (n_skips*).")
else:
if (self.n_skips_no_inliers_ + self.n_skips_invalid_data_ +
self.n_skips_invalid_model_) > self.max_skips:
warnings.warn("RANSAC found a valid consensus set but exited"
" early due to skipping more iterations than"
" `max_skips`. See estimator attributes for"
" diagnostics (n_skips*).",
UserWarning)
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
| bsd-3-clause |
vivekmishra1991/scikit-learn | examples/cluster/plot_affinity_propagation.py | 349 | 2304 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
CodeMonkeyJan/hyperspy | hyperspy/drawing/utils.py | 1 | 47617 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import copy
import itertools
import textwrap
from traits import trait_base
from mpl_toolkits.axes_grid1 import make_axes_locatable
import warnings
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import hyperspy as hs
from distutils.version import LooseVersion
import logging
from hyperspy.defaults_parser import preferences
_logger = logging.getLogger(__name__)
def contrast_stretching(data, saturated_pixels):
"""Calculate bounds that leaves out a given percentage of the data.
Parameters
----------
data: numpy array
saturated_pixels: scalar
The percentage of pixels that are left out of the bounds. For example,
the low and high bounds of a value of 1 are the 0.5% and 99.5%
percentiles. It must be in the [0, 100] range.
Returns
-------
vmin, vmax: scalar
The low and high bounds
Raises
------
ValueError if the value of `saturated_pixels` is out of the valid range.
"""
# Sanity check
if not 0 <= saturated_pixels <= 100:
raise ValueError(
"saturated_pixels must be a scalar in the range[0, 100]")
nans = np.isnan(data)
if nans.any():
data = data[~nans]
vmin = np.percentile(data, saturated_pixels / 2.)
vmax = np.percentile(data, 100 - saturated_pixels / 2.)
return vmin, vmax
MPL_DIVERGING_COLORMAPS = [
"BrBG",
"bwr",
"coolwarm",
"PiYG",
"PRGn",
"PuOr",
"RdBu",
"RdGy",
"RdYIBu",
"RdYIGn",
"seismic",
"Spectral", ]
# Add reversed colormaps
MPL_DIVERGING_COLORMAPS += [cmap + "_r" for cmap in MPL_DIVERGING_COLORMAPS]
def centre_colormap_values(vmin, vmax):
"""Calculate vmin and vmax to set the colormap midpoint to zero.
Parameters
----------
vmin, vmax : scalar
The range of data to display.
Returns
-------
cvmin, cvmax : scalar
The values to obtain a centre colormap.
"""
absmax = max(abs(vmin), abs(vmax))
return -absmax, absmax
def create_figure(window_title=None,
_on_figure_window_close=None,
**kwargs):
"""Create a matplotlib figure.
This function adds the possibility to execute another function
when the figure is closed and to easily set the window title. Any
keyword argument is passed to the plt.figure function
Parameters
----------
window_title : string
_on_figure_window_close : function
Returns
-------
fig : plt.figure
"""
fig = plt.figure(**kwargs)
if window_title is not None:
fig.canvas.set_window_title(window_title)
if _on_figure_window_close is not None:
on_figure_window_close(fig, _on_figure_window_close)
return fig
def on_figure_window_close(figure, function):
"""Connects a close figure signal to a given function.
Parameters
----------
figure : mpl figure instance
function : function
"""
def function_wrapper(evt):
function()
figure.canvas.mpl_connect('close_event', function_wrapper)
def plot_RGB_map(im_list, normalization='single', dont_plot=False):
"""Plot 2 or 3 maps in RGB.
Parameters
----------
im_list : list of Signal2D instances
normalization : {'single', 'global'}
dont_plot : bool
Returns
-------
array: RGB matrix
"""
# from widgets import cursors
height, width = im_list[0].data.shape[:2]
rgb = np.zeros((height, width, 3))
rgb[:, :, 0] = im_list[0].data.squeeze()
rgb[:, :, 1] = im_list[1].data.squeeze()
if len(im_list) == 3:
rgb[:, :, 2] = im_list[2].data.squeeze()
if normalization == 'single':
for i in range(len(im_list)):
rgb[:, :, i] /= rgb[:, :, i].max()
elif normalization == 'global':
rgb /= rgb.max()
rgb = rgb.clip(0, rgb.max())
if not dont_plot:
figure = plt.figure()
ax = figure.add_subplot(111)
ax.frameon = False
ax.set_axis_off()
ax.imshow(rgb, interpolation='nearest')
# cursors.set_mpl_ax(ax)
figure.canvas.draw_idle()
else:
return rgb
def subplot_parameters(fig):
"""Returns a list of the subplot parameters of a mpl figure.
Parameters
----------
fig : mpl figure
Returns
-------
tuple : (left, bottom, right, top, wspace, hspace)
"""
wspace = fig.subplotpars.wspace
hspace = fig.subplotpars.hspace
left = fig.subplotpars.left
right = fig.subplotpars.right
top = fig.subplotpars.top
bottom = fig.subplotpars.bottom
return left, bottom, right, top, wspace, hspace
class ColorCycle:
_color_cycle = [mpl.colors.colorConverter.to_rgba(color) for color
in ('b', 'g', 'r', 'c', 'm', 'y', 'k')]
def __init__(self):
self.color_cycle = copy.copy(self._color_cycle)
def __call__(self):
if not self.color_cycle:
self.color_cycle = copy.copy(self._color_cycle)
return self.color_cycle.pop(0)
def plot_signals(signal_list, sync=True, navigator="auto",
navigator_list=None, **kwargs):
"""Plot several signals at the same time.
Parameters
----------
signal_list : list of BaseSignal instances
If sync is set to True, the signals must have the
same navigation shape, but not necessarily the same signal shape.
sync : True or False, default "True"
If True: the signals will share navigation, all the signals
must have the same navigation shape for this to work, but not
necessarily the same signal shape.
navigator : {"auto", None, "spectrum", "slider", BaseSignal}, default "auto"
See signal.plot docstring for full description
navigator_list : {List of navigator arguments, None}, default None
Set different navigator options for the signals. Must use valid
navigator arguments: "auto", None, "spectrum", "slider", or a
hyperspy Signal. The list must have the same size as signal_list.
If None, the argument specified in navigator will be used.
**kwargs
Any extra keyword arguments are passed to each signal `plot` method.
Example
-------
>>> s_cl = hs.load("coreloss.dm3")
>>> s_ll = hs.load("lowloss.dm3")
>>> hs.plot.plot_signals([s_cl, s_ll])
Specifying the navigator:
>>> s_cl = hs.load("coreloss.dm3")
>>> s_ll = hs.load("lowloss.dm3")
>>> hs.plot.plot_signals([s_cl, s_ll], navigator="slider")
Specifying the navigator for each signal:
>>> s_cl = hs.load("coreloss.dm3")
>>> s_ll = hs.load("lowloss.dm3")
>>> s_edx = hs.load("edx.dm3")
>>> s_adf = hs.load("adf.dm3")
>>> hs.plot.plot_signals(
[s_cl, s_ll, s_edx], navigator_list=["slider",None,s_adf])
"""
import hyperspy.signal
if navigator_list:
if not (len(signal_list) == len(navigator_list)):
raise ValueError(
"signal_list and navigator_list must"
" have the same size")
if sync:
axes_manager_list = []
for signal in signal_list:
axes_manager_list.append(signal.axes_manager)
if not navigator_list:
navigator_list = []
if navigator is None:
navigator_list.extend([None] * len(signal_list))
elif navigator is "slider":
navigator_list.append("slider")
navigator_list.extend([None] * (len(signal_list) - 1))
elif isinstance(navigator, hyperspy.signal.BaseSignal):
navigator_list.append(navigator)
navigator_list.extend([None] * (len(signal_list) - 1))
elif navigator is "spectrum":
navigator_list.extend(["spectrum"] * len(signal_list))
elif navigator is "auto":
navigator_list.extend(["auto"] * len(signal_list))
else:
raise ValueError(
"navigator must be one of \"spectrum\",\"auto\","
" \"slider\", None, a Signal instance")
# Check to see if the spectra have the same navigational shapes
temp_shape_first = axes_manager_list[0].navigation_shape
for i, axes_manager in enumerate(axes_manager_list):
temp_shape = axes_manager.navigation_shape
if not (temp_shape_first == temp_shape):
raise ValueError(
"The spectra does not have the same navigation shape")
axes_manager_list[i] = axes_manager.deepcopy()
if i > 0:
for axis0, axisn in zip(axes_manager_list[0].navigation_axes,
axes_manager_list[i].navigation_axes):
axes_manager_list[i]._axes[axisn.index_in_array] = axis0
del axes_manager
for signal, navigator, axes_manager in zip(signal_list,
navigator_list,
axes_manager_list):
signal.plot(axes_manager=axes_manager,
navigator=navigator,
**kwargs)
# If sync is False
else:
if not navigator_list:
navigator_list = []
navigator_list.extend([navigator] * len(signal_list))
for signal, navigator in zip(signal_list, navigator_list):
signal.plot(navigator=navigator,
**kwargs)
def _make_heatmap_subplot(spectra):
from hyperspy._signals.signal2d import Signal2D
im = Signal2D(spectra.data, axes=spectra.axes_manager._get_axes_dicts())
im.metadata.General.title = spectra.metadata.General.title
im.plot()
return im._plot.signal_plot.ax
def _make_overlap_plot(spectra, ax, color="blue", line_style='-'):
if isinstance(color, str):
color = [color] * len(spectra)
if isinstance(line_style, str):
line_style = [line_style] * len(spectra)
for spectrum_index, (spectrum, color, line_style) in enumerate(
zip(spectra, color, line_style)):
x_axis = spectrum.axes_manager.signal_axes[0]
ax.plot(x_axis.axis, spectrum.data, color=color, ls=line_style)
_set_spectrum_xlabel(spectra if isinstance(spectra, hs.signals.BaseSignal)
else spectra[-1], ax)
ax.set_ylabel('Intensity')
ax.autoscale(tight=True)
def _make_cascade_subplot(
spectra, ax, color="blue", line_style='-', padding=1):
max_value = 0
for spectrum in spectra:
spectrum_yrange = (np.nanmax(spectrum.data) -
np.nanmin(spectrum.data))
if spectrum_yrange > max_value:
max_value = spectrum_yrange
if isinstance(color, str):
color = [color] * len(spectra)
if isinstance(line_style, str):
line_style = [line_style] * len(spectra)
for spectrum_index, (spectrum, color, line_style) in enumerate(
zip(spectra, color, line_style)):
x_axis = spectrum.axes_manager.signal_axes[0]
data_to_plot = ((spectrum.data - spectrum.data.min()) /
float(max_value) + spectrum_index * padding)
ax.plot(x_axis.axis, data_to_plot, color=color, ls=line_style)
_set_spectrum_xlabel(spectra if isinstance(spectra, hs.signals.BaseSignal)
else spectra[-1], ax)
ax.set_yticks([])
ax.autoscale(tight=True)
def _plot_spectrum(spectrum, ax, color="blue", line_style='-'):
x_axis = spectrum.axes_manager.signal_axes[0]
ax.plot(x_axis.axis, spectrum.data, color=color, ls=line_style)
def _set_spectrum_xlabel(spectrum, ax):
x_axis = spectrum.axes_manager.signal_axes[0]
ax.set_xlabel("%s (%s)" % (x_axis.name, x_axis.units))
def plot_images(images,
cmap=None,
no_nans=False,
per_row=3,
label='auto',
labelwrap=30,
suptitle=None,
suptitle_fontsize=18,
colorbar='multi',
centre_colormap="auto",
saturated_pixels=0,
scalebar=None,
scalebar_color='white',
axes_decor='all',
padding=None,
tight_layout=False,
aspect='auto',
min_asp=0.1,
namefrac_thresh=0.4,
fig=None,
vmin=None,
vmax=None,
*args,
**kwargs):
"""Plot multiple images as sub-images in one figure.
Parameters
----------
images : list
`images` should be a list of Signals (Images) to plot
If any signal is not an image, a ValueError will be raised
multi-dimensional images will have each plane plotted as a separate
image
cmap : matplotlib colormap, optional
The colormap used for the images, by default read from pyplot
no_nans : bool, optional
If True, set nans to zero for plotting.
per_row : int, optional
The number of plots in each row
label : None, str, or list of str, optional
Control the title labeling of the plotted images.
If None, no titles will be shown.
If 'auto' (default), function will try to determine suitable titles
using Signal2D titles, falling back to the 'titles' option if no good
short titles are detected.
Works best if all images to be plotted have the same beginning
to their titles.
If 'titles', the title from each image's metadata.General.title
will be used.
If any other single str, images will be labeled in sequence using
that str as a prefix.
If a list of str, the list elements will be used to determine the
labels (repeated, if necessary).
labelwrap : int, optional
integer specifying the number of characters that will be used on
one line
If the function returns an unexpected blank figure, lower this
value to reduce overlap of the labels between each figure
suptitle : str, optional
Title to use at the top of the figure. If called with label='auto',
this parameter will override the automatically determined title.
suptitle_fontsize : int, optional
Font size to use for super title at top of figure
colorbar : {'multi', None, 'single'}
Controls the type of colorbars that are plotted.
If None, no colorbar is plotted.
If 'multi' (default), individual colorbars are plotted for each
(non-RGB) image
If 'single', all (non-RGB) images are plotted on the same scale,
and one colorbar is shown for all
centre_colormap : {"auto", True, False}
If True the centre of the color scheme is set to zero. This is
specially useful when using diverging color schemes. If "auto"
(default), diverging color schemes are automatically centred.
saturated_pixels: scalar
The percentage of pixels that are left out of the bounds. For
example, the low and high bounds of a value of 1 are the 0.5% and
99.5% percentiles. It must be in the [0, 100] range.
scalebar : {None, 'all', list of ints}, optional
If None (or False), no scalebars will be added to the images.
If 'all', scalebars will be added to all images.
If list of ints, scalebars will be added to each image specified.
scalebar_color : str, optional
A valid MPL color string; will be used as the scalebar color
axes_decor : {'all', 'ticks', 'off', None}, optional
Controls how the axes are displayed on each image; default is 'all'
If 'all', both ticks and axis labels will be shown
If 'ticks', no axis labels will be shown, but ticks/labels will
If 'off', all decorations and frame will be disabled
If None, no axis decorations will be shown, but ticks/frame will
padding : None or dict, optional
This parameter controls the spacing between images.
If None, default options will be used
Otherwise, supply a dictionary with the spacing options as
keywords and desired values as values
Values should be supplied as used in pyplot.subplots_adjust(),
and can be:
'left', 'bottom', 'right', 'top', 'wspace' (width),
and 'hspace' (height)
tight_layout : bool, optional
If true, hyperspy will attempt to improve image placement in
figure using matplotlib's tight_layout
If false, repositioning images inside the figure will be left as
an exercise for the user.
aspect : str or numeric, optional
If 'auto', aspect ratio is auto determined, subject to min_asp.
If 'square', image will be forced onto square display.
If 'equal', aspect ratio of 1 will be enforced.
If float (or int/long), given value will be used.
min_asp : float, optional
Minimum aspect ratio to be used when plotting images
namefrac_thresh : float, optional
Threshold to use for auto-labeling. This parameter controls how
much of the titles must be the same for the auto-shortening of
labels to activate. Can vary from 0 to 1. Smaller values
encourage shortening of titles by auto-labeling, while larger
values will require more overlap in titles before activing the
auto-label code.
fig : mpl figure, optional
If set, the images will be plotted to an existing MPL figure
vmin, vmax : scalar or list of scalar, optional, default: None
If list of scalar, the length should match the number of images to
show.
A list of scalar is not compatible with a single colorbar.
See vmin, vmax of matplotlib.imshow() for more details.
*args, **kwargs, optional
Additional arguments passed to matplotlib.imshow()
Returns
-------
axes_list : list
a list of subplot axes that hold the images
See Also
--------
plot_spectra : Plotting of multiple spectra
plot_signals : Plotting of multiple signals
plot_histograms : Compare signal histograms
Notes
-----
`interpolation` is a useful parameter to provide as a keyword
argument to control how the space between pixels is interpolated. A
value of ``'nearest'`` will cause no interpolation between pixels.
`tight_layout` is known to be quite brittle, so an option is provided
to disable it. Turn this option off if output is not as expected,
or try adjusting `label`, `labelwrap`, or `per_row`
"""
from hyperspy.drawing.widgets import ScaleBar
from hyperspy.misc import rgb_tools
from hyperspy.signal import BaseSignal
if isinstance(images, BaseSignal) and len(images) is 1:
images.plot()
ax = plt.gca()
return ax
elif not isinstance(images, (list, tuple, BaseSignal)):
raise ValueError("images must be a list of image signals or "
"multi-dimensional signal."
" " + repr(type(images)) + " was given.")
# Get default colormap from pyplot:
if cmap is None:
cmap = plt.get_cmap().name
elif isinstance(cmap, mpl.colors.Colormap):
cmap = cmap.name
if centre_colormap == "auto":
if cmap in MPL_DIVERGING_COLORMAPS:
centre_colormap = True
else:
centre_colormap = False
# If input is >= 1D signal (e.g. for multi-dimensional plotting),
# copy it and put it in a list so labeling works out as (x,y) when plotting
if isinstance(images,
BaseSignal) and images.axes_manager.navigation_dimension > 0:
images = [images._deepcopy_with_new_data(images.data)]
n = 0
for i, sig in enumerate(images):
if sig.axes_manager.signal_dimension != 2:
raise ValueError("This method only plots signals that are images. "
"The signal dimension must be equal to 2. "
"The signal at position " + repr(i) +
" was " + repr(sig) + ".")
# increment n by the navigation size, or by 1 if the navigation size is
# <= 0
n += (sig.axes_manager.navigation_size
if sig.axes_manager.navigation_size > 0
else 1)
if isinstance(vmin, list):
if len(vmin) != n:
_logger.warning('The provided vmin values are ignored because the '
'length of the list does not match the number of '
'images')
vmin = [None] * n
else:
vmin = [vmin] * n
if isinstance(vmax, list):
if len(vmax) != n:
_logger.warning('The provided vmax values are ignored because the '
'length of the list does not match the number of '
'images')
vmax = [None] * n
else:
vmax = [vmax] * n
# Sort out the labeling:
div_num = 0
all_match = False
shared_titles = False
user_labels = False
if label is None:
pass
elif label is 'auto':
# Use some heuristics to try to get base string of similar titles
label_list = [x.metadata.General.title for x in images]
# Find the shortest common string between the image titles
# and pull that out as the base title for the sequence of images
# array in which to store arrays
res = np.zeros((len(label_list), len(label_list[0]) + 1))
res[:, 0] = 1
# j iterates the strings
for j in range(len(label_list)):
# i iterates length of substring test
for i in range(1, len(label_list[0]) + 1):
# stores whether or not characters in title match
res[j, i] = label_list[0][:i] in label_list[j]
# sum up the results (1 is True, 0 is False) and create
# a substring based on the minimum value (this will be
# the "smallest common string" between all the titles
if res.all():
basename = label_list[0]
div_num = len(label_list[0])
all_match = True
else:
div_num = int(min(np.sum(res, 1)))
basename = label_list[0][:div_num - 1]
all_match = False
# trim off any '(' or ' ' characters at end of basename
if div_num > 1:
while True:
if basename[len(basename) - 1] == '(':
basename = basename[:-1]
elif basename[len(basename) - 1] == ' ':
basename = basename[:-1]
else:
break
# namefrac is ratio of length of basename to the image name
# if it is high (e.g. over 0.5), we can assume that all images
# share the same base
if len(label_list[0]) > 0:
namefrac = float(len(basename)) / len(label_list[0])
else:
# If label_list[0] is empty, it means there was probably no
# title set originally, so nothing to share
namefrac = 0
if namefrac > namefrac_thresh:
# there was a significant overlap of label beginnings
shared_titles = True
# only use new suptitle if one isn't specified already
if suptitle is None:
suptitle = basename
else:
# there was not much overlap, so default back to 'titles' mode
shared_titles = False
label = 'titles'
div_num = 0
elif label is 'titles':
# Set label_list to each image's pre-defined title
label_list = [x.metadata.General.title for x in images]
elif isinstance(label, str):
# Set label_list to an indexed list, based off of label
label_list = [label + " " + repr(num) for num in range(n)]
elif isinstance(label, list) and all(
isinstance(x, str) for x in label):
label_list = label
user_labels = True
# If list of labels is longer than the number of images, just use the
# first n elements
if len(label_list) > n:
del label_list[n:]
if len(label_list) < n:
label_list *= (n // len(label_list)) + 1
del label_list[n:]
else:
raise ValueError("Did not understand input of labels.")
# Determine appropriate number of images per row
rows = int(np.ceil(n / float(per_row)))
if n < per_row:
per_row = n
# Set overall figure size and define figure (if not pre-existing)
if fig is None:
k = max(plt.rcParams['figure.figsize']) / max(per_row, rows)
f = plt.figure(figsize=(tuple(k * i for i in (per_row, rows))))
else:
f = fig
# Initialize list to hold subplot axes
axes_list = []
# Initialize list of rgb tags
isrgb = [False] * len(images)
# Check to see if there are any rgb images in list
# and tag them using the isrgb list
for i, img in enumerate(images):
if rgb_tools.is_rgbx(img.data):
isrgb[i] = True
# Determine how many non-rgb Images there are
non_rgb = list(itertools.compress(images, [not j for j in isrgb]))
if len(non_rgb) is 0 and colorbar is not None:
colorbar = None
warnings.warn("Sorry, colorbar is not implemented for RGB images.")
# Find global min and max values of all the non-rgb images for use with
# 'single' scalebar
if colorbar is 'single':
g_vmin, g_vmax = contrast_stretching(np.concatenate(
[i.data.flatten() for i in non_rgb]), saturated_pixels)
if isinstance(vmin, list):
_logger.warning('vmin have to be a scalar to be compatible with a '
'single colorbar')
else:
g_vmin = vmin if vmin is not None else g_vmin
if isinstance(vmax, list):
_logger.warning('vmax have to be a scalar to be compatible with a '
'single colorbar')
else:
g_vmax = vmax if vmax is not None else g_vmax
if centre_colormap:
g_vmin, g_vmax = centre_colormap_values(g_vmin, g_vmax)
# Check if we need to add a scalebar for some of the images
if isinstance(scalebar, list) and all(isinstance(x, int)
for x in scalebar):
scalelist = True
else:
scalelist = False
idx = 0
ax_im_list = [0] * len(isrgb)
# Loop through each image, adding subplot for each one
for i, ims in enumerate(images):
# Get handles for the signal axes and axes_manager
axes_manager = ims.axes_manager
if axes_manager.navigation_dimension > 0:
ims = ims._deepcopy_with_new_data(ims.data)
for j, im in enumerate(ims):
idx += 1
ax = f.add_subplot(rows, per_row, idx)
axes_list.append(ax)
data = im.data
# Enable RGB plotting
if rgb_tools.is_rgbx(data):
data = rgb_tools.rgbx2regular_array(data, plot_friendly=True)
l_vmin, l_vmax = None, None
else:
data = im.data
# Find min and max for contrast
l_vmin, l_vmax = contrast_stretching(data, saturated_pixels)
l_vmin = vmin[idx - 1] if vmin[idx - 1] is not None else l_vmin
l_vmax = vmax[idx - 1] if vmax[idx - 1] is not None else l_vmax
if centre_colormap:
l_vmin, l_vmax = centre_colormap_values(l_vmin, l_vmax)
# Remove NaNs (if requested)
if no_nans:
data = np.nan_to_num(data)
# Get handles for the signal axes and axes_manager
axes_manager = im.axes_manager
axes = axes_manager.signal_axes
# Set dimensions of images
xaxis = axes[0]
yaxis = axes[1]
extent = (
xaxis.low_value,
xaxis.high_value,
yaxis.high_value,
yaxis.low_value,
)
if not isinstance(aspect, (int, float)) and aspect not in [
'auto', 'square', 'equal']:
print('Did not understand aspect ratio input. '
'Using \'auto\' as default.')
aspect = 'auto'
if aspect is 'auto':
if float(yaxis.size) / xaxis.size < min_asp:
factor = min_asp * float(xaxis.size) / yaxis.size
elif float(yaxis.size) / xaxis.size > min_asp ** -1:
factor = min_asp ** -1 * float(xaxis.size) / yaxis.size
else:
factor = 1
asp = np.abs(factor * float(xaxis.scale) / yaxis.scale)
elif aspect is 'square':
asp = abs(extent[1] - extent[0]) / abs(extent[3] - extent[2])
elif aspect is 'equal':
asp = 1
elif isinstance(aspect, (int, float)):
asp = aspect
if 'interpolation' not in kwargs.keys():
kwargs['interpolation'] = 'nearest'
# Plot image data, using vmin and vmax to set bounds,
# or allowing them to be set automatically if using individual
# colorbars
if colorbar is 'single' and not isrgb[i]:
axes_im = ax.imshow(data,
cmap=cmap, extent=extent,
vmin=g_vmin, vmax=g_vmax,
aspect=asp,
*args, **kwargs)
ax_im_list[i] = axes_im
else:
axes_im = ax.imshow(data,
cmap=cmap,
extent=extent,
vmin=l_vmin,
vmax=l_vmax,
aspect=asp,
*args, **kwargs)
ax_im_list[i] = axes_im
# If an axis trait is undefined, shut off :
if isinstance(xaxis.units, trait_base._Undefined) or \
isinstance(yaxis.units, trait_base._Undefined) or \
isinstance(xaxis.name, trait_base._Undefined) or \
isinstance(yaxis.name, trait_base._Undefined):
if axes_decor is 'all':
warnings.warn(
'Axes labels were requested, but one '
'or both of the '
'axes units and/or name are undefined. '
'Axes decorations have been set to '
'\'ticks\' instead.')
axes_decor = 'ticks'
# If all traits are defined, set labels as appropriate:
else:
ax.set_xlabel(axes[0].name + " axis (" + axes[0].units + ")")
ax.set_ylabel(axes[1].name + " axis (" + axes[1].units + ")")
if label:
if all_match:
title = ''
elif shared_titles:
title = label_list[i][div_num - 1:]
else:
if len(ims) == n:
# This is true if we are plotting just 1
# multi-dimensional Signal2D
title = label_list[idx - 1]
elif user_labels:
title = label_list[idx - 1]
else:
title = label_list[i]
if ims.axes_manager.navigation_size > 1 and not user_labels:
title += " %s" % str(ims.axes_manager.indices)
ax.set_title(textwrap.fill(title, labelwrap))
# Set axes decorations based on user input
set_axes_decor(ax, axes_decor)
# If using independent colorbars, add them
if colorbar is 'multi' and not isrgb[i]:
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.05)
plt.colorbar(axes_im, cax=cax)
# Add scalebars as necessary
if (scalelist and idx - 1 in scalebar) or scalebar is 'all':
ax.scalebar = ScaleBar(
ax=ax,
units=axes[0].units,
color=scalebar_color,
)
# If using a single colorbar, add it, and do tight_layout, ensuring that
# a colorbar is only added based off of non-rgb Images:
if colorbar is 'single':
foundim = None
for i in range(len(isrgb)):
if (not isrgb[i]) and foundim is None:
foundim = i
if foundim is not None:
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([0.9, 0.1, 0.03, 0.8])
f.colorbar(ax_im_list[foundim], cax=cbar_ax)
if tight_layout:
# tight_layout, leaving room for the colorbar
plt.tight_layout(rect=[0, 0, 0.9, 1])
elif tight_layout:
plt.tight_layout()
elif tight_layout:
plt.tight_layout()
# Set top bounds for shared titles and add suptitle
if suptitle:
f.subplots_adjust(top=0.85)
f.suptitle(suptitle, fontsize=suptitle_fontsize)
# If we want to plot scalebars, loop through the list of axes and add them
if scalebar is None or scalebar is False:
# Do nothing if no scalebars are called for
pass
elif scalebar is 'all':
# scalebars were taken care of in the plotting loop
pass
elif scalelist:
# scalebars were taken care of in the plotting loop
pass
else:
raise ValueError("Did not understand scalebar input. Must be None, "
"\'all\', or list of ints.")
# Adjust subplot spacing according to user's specification
if padding is not None:
plt.subplots_adjust(**padding)
return axes_list
def set_axes_decor(ax, axes_decor):
if axes_decor is 'off':
ax.axis('off')
elif axes_decor is 'ticks':
ax.set_xlabel('')
ax.set_ylabel('')
elif axes_decor is 'all':
pass
elif axes_decor is None:
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_xticklabels([])
ax.set_yticklabels([])
def plot_spectra(
spectra,
style='overlap',
color=None,
line_style=None,
padding=1.,
legend=None,
legend_picking=True,
legend_loc='upper right',
fig=None,
ax=None,
**kwargs):
"""Plot several spectra in the same figure.
Extra keyword arguments are passed to `matplotlib.figure`.
Parameters
----------
spectra : iterable object
Ordered spectra list to plot. If `style` is "cascade" or "mosaic"
the spectra can have different size and axes.
style : {'overlap', 'cascade', 'mosaic', 'heatmap'}
The style of the plot.
color : matplotlib color or a list of them or `None`
Sets the color of the lines of the plots (no action on 'heatmap').
If a list, if its length is less than the number of spectra to plot,
the colors will be cycled. If `None`, use default matplotlib color
cycle.
line_style: matplotlib line style or a list of them or `None`
Sets the line style of the plots (no action on 'heatmap').
The main line style are '-','--','steps','-.',':'.
If a list, if its length is less than the number of
spectra to plot, line_style will be cycled. If
If `None`, use continuous lines, eg: ('-','--','steps','-.',':')
padding : float, optional, default 0.1
Option for "cascade". 1 guarantees that there is not overlapping.
However, in many cases a value between 0 and 1 can produce a tighter
plot without overlapping. Negative values have the same effect but
reverse the order of the spectra without reversing the order of the
colors.
legend: None or list of str or 'auto'
If list of string, legend for "cascade" or title for "mosaic" is
displayed. If 'auto', the title of each spectra (metadata.General.title)
is used.
legend_picking: bool
If true, a spectrum can be toggle on and off by clicking on
the legended line.
legend_loc : str or int
This parameter controls where the legend is placed on the figure;
see the pyplot.legend docstring for valid values
fig : matplotlib figure or None
If None, a default figure will be created. Specifying fig will
not work for the 'heatmap' style.
ax : matplotlib ax (subplot) or None
If None, a default ax will be created. Will not work for 'mosaic'
or 'heatmap' style.
**kwargs
remaining keyword arguments are passed to matplotlib.figure() or
matplotlib.subplots(). Has no effect on 'heatmap' style.
Example
-------
>>> s = hs.load("some_spectra")
>>> hs.plot.plot_spectra(s, style='cascade', color='red', padding=0.5)
To save the plot as a png-file
>>> hs.plot.plot_spectra(s).figure.savefig("test.png")
Returns
-------
ax: matplotlib axes or list of matplotlib axes
An array is returned when `style` is "mosaic".
"""
import hyperspy.signal
def _reverse_legend(ax_, legend_loc_):
"""
Reverse the ordering of a matplotlib legend (to be more consistent
with the default ordering of plots in the 'cascade' and 'overlap'
styles
Parameters
----------
ax_: matplotlib axes
legend_loc_: str or int
This parameter controls where the legend is placed on the
figure; see the pyplot.legend docstring for valid values
"""
l = ax_.get_legend()
labels = [lb.get_text() for lb in list(l.get_texts())]
handles = l.legendHandles
ax_.legend(handles[::-1], labels[::-1], loc=legend_loc_)
# Before v1.3 default would read the value from prefereces.
if style == "default":
style = "overlap"
if color is not None:
if isinstance(color, str):
color = itertools.cycle([color])
elif hasattr(color, "__iter__"):
color = itertools.cycle(color)
else:
raise ValueError("Color must be None, a valid matplotlib color "
"string or a list of valid matplotlib colors.")
else:
if LooseVersion(mpl.__version__) >= "1.5.3":
color = itertools.cycle(
plt.rcParams['axes.prop_cycle'].by_key()["color"])
else:
color = itertools.cycle(plt.rcParams['axes.color_cycle'])
if line_style is not None:
if isinstance(line_style, str):
line_style = itertools.cycle([line_style])
elif hasattr(line_style, "__iter__"):
line_style = itertools.cycle(line_style)
else:
raise ValueError("line_style must be None, a valid matplotlib"
" line_style string or a list of valid matplotlib"
" line_style.")
else:
line_style = ['-'] * len(spectra)
if legend is not None:
if isinstance(legend, str):
if legend == 'auto':
legend = [spec.metadata.General.title for spec in spectra]
else:
raise ValueError("legend must be None, 'auto' or a list of"
" string")
elif hasattr(legend, "__iter__"):
legend = itertools.cycle(legend)
if style == 'overlap':
if fig is None:
fig = plt.figure(**kwargs)
if ax is None:
ax = fig.add_subplot(111)
_make_overlap_plot(spectra,
ax,
color=color,
line_style=line_style,)
if legend is not None:
plt.legend(legend, loc=legend_loc)
_reverse_legend(ax, legend_loc)
if legend_picking is True:
animate_legend(figure=fig)
elif style == 'cascade':
if fig is None:
fig = plt.figure(**kwargs)
if ax is None:
ax = fig.add_subplot(111)
_make_cascade_subplot(spectra,
ax,
color=color,
line_style=line_style,
padding=padding)
if legend is not None:
plt.legend(legend, loc=legend_loc)
_reverse_legend(ax, legend_loc)
elif style == 'mosaic':
default_fsize = plt.rcParams["figure.figsize"]
figsize = (default_fsize[0], default_fsize[1] * len(spectra))
fig, subplots = plt.subplots(
len(spectra), 1, figsize=figsize, **kwargs)
if legend is None:
legend = [legend] * len(spectra)
for spectrum, ax, color, line_style, legend in zip(
spectra, subplots, color, line_style, legend):
_plot_spectrum(spectrum, ax, color=color, line_style=line_style)
ax.set_ylabel('Intensity')
if legend is not None:
ax.set_title(legend)
if not isinstance(spectra, hyperspy.signal.BaseSignal):
_set_spectrum_xlabel(spectrum, ax)
if isinstance(spectra, hyperspy.signal.BaseSignal):
_set_spectrum_xlabel(spectrum, ax)
fig.tight_layout()
elif style == 'heatmap':
if not isinstance(spectra, hyperspy.signal.BaseSignal):
import hyperspy.utils
spectra = hyperspy.utils.stack(spectra)
with spectra.unfolded():
ax = _make_heatmap_subplot(spectra)
ax.set_ylabel('Spectra')
ax = ax if style != "mosaic" else subplots
return ax
def animate_legend(figure='last'):
"""Animate the legend of a figure.
A spectrum can be toggle on and off by clicking on the legended line.
Parameters
----------
figure: 'last' | matplotlib.figure
If 'last' pick the last figure
Note
----
Code inspired from legend_picking.py in the matplotlib gallery
"""
if figure == 'last':
figure = plt.gcf()
ax = plt.gca()
else:
ax = figure.axes[0]
lines = ax.lines
lined = dict()
leg = ax.get_legend()
for legline, origline in zip(leg.get_lines(), lines):
legline.set_picker(5) # 5 pts tolerance
lined[legline] = origline
def onpick(event):
# on the pick event, find the orig line corresponding to the
# legend proxy line, and toggle the visibility
legline = event.artist
origline = lined[legline]
vis = not origline.get_visible()
origline.set_visible(vis)
# Change the alpha on the line in the legend so we can see what lines
# have been toggled
if vis:
legline.set_alpha(1.0)
else:
legline.set_alpha(0.2)
figure.canvas.draw_idle()
figure.canvas.mpl_connect('pick_event', onpick)
def plot_histograms(signal_list,
bins='freedman',
range_bins=None,
color=None,
line_style=None,
legend='auto',
fig=None,
**kwargs):
"""Plot the histogram of every signal in the list in the same figure.
This function creates a histogram for each signal and plot the list with
the `utils.plot.plot_spectra` function.
Parameters
----------
signal_list : iterable
Ordered spectra list to plot. If `style` is "cascade" or "mosaic"
the spectra can have different size and axes.
bins : int or list or str, optional
If bins is a string, then it must be one of:
'knuth' : use Knuth's rule to determine bins
'scotts' : use Scott's rule to determine bins
'freedman' : use the Freedman-diaconis rule to determine bins
'blocks' : use bayesian blocks for dynamic bin widths
range_bins : tuple or None, optional.
the minimum and maximum range for the histogram. If not specified,
it will be (x.min(), x.max())
color : valid matplotlib color or a list of them or `None`, optional.
Sets the color of the lines of the plots. If a list, if its length is
less than the number of spectra to plot, the colors will be cycled. If
If `None`, use default matplotlib color cycle.
line_style: valid matplotlib line style or a list of them or `None`,
optional.
The main line style are '-','--','steps','-.',':'.
If a list, if its length is less than the number of
spectra to plot, line_style will be cycled. If
If `None`, use continuous lines, eg: ('-','--','steps','-.',':')
legend: None or list of str or 'auto', optional.
Display a legend. If 'auto', the title of each spectra
(metadata.General.title) is used.
legend_picking: bool, optional.
If true, a spectrum can be toggle on and off by clicking on
the legended line.
fig : matplotlib figure or None, optional.
If None, a default figure will be created.
**kwargs
other keyword arguments (weight and density) are described in
np.histogram().
Example
-------
Histograms of two random chi-square distributions
>>> img = hs.signals.Signal2D(np.random.chisquare(1,[10,10,100]))
>>> img2 = hs.signals.Signal2D(np.random.chisquare(2,[10,10,100]))
>>> hs.plot.plot_histograms([img,img2],legend=['hist1','hist2'])
Returns
-------
ax: matplotlib axes or list of matplotlib axes
An array is returned when `style` is "mosaic".
"""
hists = []
for obj in signal_list:
hists.append(obj.get_histogram(bins=bins,
range_bins=range_bins, **kwargs))
if line_style is None:
line_style = 'steps'
return plot_spectra(hists, style='overlap', color=color,
line_style=line_style, legend=legend, fig=fig)
| gpl-3.0 |
antiface/mne-python | examples/visualization/plot_topo_compare_conditions.py | 7 | 2375 | """
=================================================
Compare evoked responses for different conditions
=================================================
In this example, an Epochs object for visual and
auditory responses is created. Both conditions
are then accessed by their respective names to
create a sensor layout plot of the related
evoked responses.
"""
# Authors: Denis Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.io import Raw
from mne.viz import plot_topo
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = 1
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = Raw(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: MEG + STI 014 - bad channels (modify to your needs)
include = [] # or stim channels ['STI 014']
# bad channels in raw.info['bads'] will be automatically excluded
# Set up amplitude-peak rejection values for MEG channels
reject = dict(grad=4000e-13, mag=4e-12)
# pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
include=include, exclude='bads')
# Create epochs including different events
event_id = {'audio/left': 1, 'audio/right': 2,
'visual/left': 3, 'visual/right': 4}
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0), reject=reject)
# Generate list of evoked objects from conditions names
evokeds = [epochs[name].average() for name in ('left', 'right')]
###############################################################################
# Show topography for two different conditions
colors = 'yellow', 'green'
title = 'MNE sample data - left vs right (A/V combined)'
plot_topo(evokeds, color=colors, title=title)
conditions = [e.comment for e in evokeds]
for cond, col, pos in zip(conditions, colors, (0.025, 0.07)):
plt.figtext(0.99, pos, cond, color=col, fontsize=12,
horizontalalignment='right')
plt.show()
| bsd-3-clause |
nathancfox/autocatalytic-modeling | BurstBasicExperiment/BURST_Analysis.py | 1 | 5129 | import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.lines as lines
pathprefix = 'AnalysisFigures/'
ts = pd.read_csv('BURST_testStatistics.csv', header=0)
e_mean = plt.figure(figsize=(32,18))
em_ax = e_mean.add_subplot(111)
em_ax.plot(ts['Burst'], ts['EMean'], 'ro', label='Enzyme Mean')
em_ax.set_xlim([0, 52])
em_ax.set_ylim([35, 45])
em_ax.set_yticks([35, 37, 39, 41, 43, 45])
em_ax.set_title('Mean Enzyme Population by Burst Size')
em_ax.set_xlabel('Burst Size')
em_ax.set_ylabel('Mean Enzyme Population')
em_ax.legend()
em_ax.grid(b=True)
e_mean.savefig(pathprefix + 'EMean.png', dpi=300)
s_mean = plt.figure(figsize=(32,18))
sm_ax = s_mean.add_subplot(111)
sm_ax.plot(ts['Burst'], ts['SMean'], 'bo', label='Substrate Mean')
sm_ax.set_xlim([0, 52])
sm_ax.set_ylim([0, 15])
sm_ax.set_yticks([0, 3, 6, 9, 12, 15])
sm_ax.set_title('Mean Substrate Population by Burst Size')
sm_ax.set_xlabel('Burst Size')
sm_ax.set_ylabel('Mean Substrate Population')
sm_ax.legend()
sm_ax.grid(b=True)
s_mean.savefig(pathprefix + 'SMean.png', dpi=300)
c_mean = plt.figure(figsize=(32,18))
cm_ax = c_mean.add_subplot(111)
cm_ax.plot(ts['Burst'], ts['CMean'], 'go', label='Complex Mean')
cm_ax.set_xlim([0, 52])
cm_ax.set_ylim([15, 25])
cm_ax.set_yticks([15, 17, 19, 21, 23, 25])
cm_ax.set_title('Mean Complex Population by Burst Size')
cm_ax.set_xlabel('Burst Size')
cm_ax.set_ylabel('Mean Complex Population')
cm_ax.legend()
cm_ax.grid(b=True)
c_mean.savefig(pathprefix + 'CMean.png', dpi=300)
totale_mean = plt.figure(figsize=(32, 18))
totem_ax = totale_mean.add_subplot(111)
totem_ax.plot(ts['Burst'], ts['EMean'] + ts['CMean'], 'ro',
label='Total E (E + C)')
totem_ax.set_xlim([0, 52])
totem_ax.set_ylim([50, 65])
totem_ax.set_yticks([50, 53, 56, 59, 62, 65])
totem_ax.set_title('Total Enzyme (Bound and Unbound)')
totem_ax.set_xlabel('Burst Size')
totem_ax.set_ylabel('Total Enzyme')
totem_ax.legend()
totem_ax.grid(b=True)
totale_mean.savefig(pathprefix + 'TotalEMean.png', dpi=300)
totals_mean = plt.figure(figsize=(32, 18))
totsm_ax = totals_mean.add_subplot(111)
totsm_ax.plot(ts['Burst'], ts['SMean'] + ts['CMean'], 'bo',
label='Total S (S + C)')
totsm_ax.set_xlim([0, 52])
totsm_ax.set_ylim([20, 30])
totsm_ax.set_yticks([20, 22, 24, 26, 28, 30])
totsm_ax.set_title('Total Substrate (Bound and Unbound)')
totsm_ax.set_xlabel('Burst Size')
totsm_ax.set_ylabel('Total Substrate')
totsm_ax.legend()
totsm_ax.grid(b=True)
totals_mean.savefig(pathprefix + 'TotalSMean.png', dpi=300)
plt.close('all')
##############################################################################
e_var = plt.figure(figsize=(32,18))
ev_ax = e_var.add_subplot(111)
ev_ax.plot(ts['Burst'], ts['EVar'], 'r^', label='Enzyme Variance')
ev_ax.set_xlim([0, 52])
ev_ax.set_ylim([0, 1000])
ev_ax.set_yticks([0, 200, 400, 600, 800, 1000])
ev_ax.set_title('Enzyme Population Variance by Burst Size')
ev_ax.set_xlabel('Burst Size')
ev_ax.set_ylabel('Enzyme Population Variance')
ev_ax.legend()
ev_ax.grid(b=True)
e_var.savefig(pathprefix + 'EVariance.png', dpi=300)
s_var = plt.figure(figsize=(32,18))
sv_ax = s_var.add_subplot(111)
sv_ax.plot(ts['Burst'], ts['SVar'], 'b^', label='Substrate Variance')
sv_ax.set_xlim([0, 52])
sv_ax.set_ylim([0, 600])
sv_ax.set_yticks([0, 100, 200, 300, 400, 500, 600])
sv_ax.set_title('Substrate Population Variance by Burst Size')
sv_ax.set_xlabel('Burst Size')
sv_ax.set_ylabel('Substrate Population Variance')
sv_ax.legend()
sv_ax.grid(b=True)
s_var.savefig(pathprefix + 'SVariance.png', dpi=300)
c_var = plt.figure(figsize=(32,18))
cv_ax = c_var.add_subplot(111)
cv_ax.plot(ts['Burst'], ts['CVar'], 'g^', label='Complex Variance')
cv_ax.set_xlim([0, 52])
cv_ax.set_ylim([0, 500])
cv_ax.set_yticks([0, 100, 200, 300, 400, 500])
cv_ax.set_title('Complex Population Variance by Burst Size')
cv_ax.set_xlabel('Burst Size')
cv_ax.set_ylabel('Complex Population Variance')
cv_ax.legend()
cv_ax.grid(b=True)
c_var.savefig(pathprefix + 'CVariance.png', dpi=300)
totale_var = plt.figure(figsize=(32, 18))
totev_ax = totale_var.add_subplot(111)
totev_ax.plot(ts['Burst'], ts['EVar'] + ts['CVar'], 'r^',
label='Total E (E + C) Variance')
totev_ax.set_xlim([0, 52])
totev_ax.set_ylim([0, 1400])
totev_ax.set_yticks([0, 200, 400, 600, 800, 1000, 1200, 1400])
totev_ax.set_title('Total Enzyme Variance (Bound and Unbound)')
totev_ax.set_xlabel('Burst Size')
totev_ax.set_ylabel('Total Enzyme Variance')
totev_ax.legend()
totev_ax.grid(b=True)
totale_var.savefig(pathprefix + 'TotalEVariance.png', dpi=300)
totals_var = plt.figure(figsize=(32, 18))
totsv_ax = totals_var.add_subplot(111)
totsv_ax.plot(ts['Burst'], ts['SVar'] + ts['CVar'], 'b^',
label='Total S (S + C) Variance')
totsv_ax.set_xlim([0, 52])
totsv_ax.set_ylim([0, 1000])
totsv_ax.set_yticks([0, 200, 400, 600, 800, 1000])
totsv_ax.set_title('Total Substrate (Bound and Unbound) Variance')
totsv_ax.set_xlabel('Burst Size')
totsv_ax.set_ylabel('Total Substrate Variance')
totsv_ax.legend()
totsv_ax.grid(b=True)
totals_var.savefig(pathprefix + 'TotalSVariance.png', dpi=300)
plt.close('all')
| mit |
yousrabk/mne-python | examples/preprocessing/plot_eog_artifact_histogram.py | 11 | 1465 | """
========================
Show EOG artifact timing
========================
Compute the distribution of timing for EOG artifacts.
"""
# Authors: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# Setup for reading the raw data
raw = io.Raw(raw_fname, preload=True)
events = mne.find_events(raw, 'STI 014')
eog_event_id = 512
eog_events = mne.preprocessing.find_eog_events(raw, eog_event_id)
raw.add_events(eog_events, 'STI 014')
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=True, eog=False)
tmin, tmax = -0.2, 0.5
event_ids = {'AudL': 1, 'AudR': 2, 'VisL': 3, 'VisR': 4}
epochs = mne.Epochs(raw, events, event_ids, tmin, tmax, picks=picks)
# Get the stim channel data
pick_ch = mne.pick_channels(epochs.ch_names, ['STI 014'])[0]
data = epochs.get_data()[:, pick_ch, :].astype(int)
data = np.sum((data.astype(int) & 512) == 512, axis=0)
###############################################################################
# Plot EOG artifact distribution
plt.stem(1e3 * epochs.times, data)
plt.xlabel('Times (ms)')
plt.ylabel('Blink counts (from %s trials)' % len(epochs))
plt.show()
| bsd-3-clause |
tongwang01/tensorflow | tensorflow/contrib/learn/python/learn/estimators/classifier_test.py | 16 | 5175 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Classifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib import learn
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.session_bundle import manifest_pb2
def iris_input_fn(num_epochs=None):
iris = tf.contrib.learn.datasets.load_iris()
features = tf.train.limit_epochs(
tf.reshape(tf.constant(iris.data), [-1, 4]), num_epochs=num_epochs)
labels = tf.reshape(tf.constant(iris.target), [-1])
return features, labels
def logistic_model_fn(features, labels, unused_mode):
labels = tf.one_hot(labels, 3, 1, 0)
prediction, loss = tf.contrib.learn.models.logistic_regression_zero_init(
features, labels)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return prediction, loss, train_op
def logistic_model_params_fn(features, labels, unused_mode, params):
labels = tf.one_hot(labels, 3, 1, 0)
prediction, loss = tf.contrib.learn.models.logistic_regression_zero_init(
features, labels)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
class ClassifierTest(tf.test.TestCase):
def testIrisAll(self):
est = tf.contrib.learn.Classifier(model_fn=logistic_model_fn, n_classes=3)
self._runIrisAll(est)
def testIrisAllWithParams(self):
est = tf.contrib.learn.Classifier(model_fn=logistic_model_params_fn,
n_classes=3,
params={'learning_rate': 0.01})
self._runIrisAll(est)
def testIrisInputFn(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Classifier(model_fn=logistic_model_fn, n_classes=3)
est.fit(input_fn=iris_input_fn, steps=100)
est.evaluate(input_fn=iris_input_fn, steps=1, name='eval')
predict_input_fn = functools.partial(iris_input_fn, num_epochs=1)
predictions = list(est.predict(input_fn=predict_input_fn))
self.assertEqual(len(predictions), iris.target.shape[0])
def _runIrisAll(self, est):
iris = tf.contrib.learn.datasets.load_iris()
est.fit(iris.data, iris.target, steps=100)
scores = est.evaluate(x=iris.data, y=iris.target, name='eval')
predictions = list(est.predict(x=iris.data))
predictions_proba = list(est.predict_proba(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
self.assertAllEqual(predictions, np.argmax(predictions_proba, axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions)
self.assertAllClose(other_score, scores['accuracy'])
def _get_default_signature(self, export_meta_filename):
"""Gets the default signature from the export.meta file."""
with tf.Session():
save = tf.train.import_meta_graph(export_meta_filename)
meta_graph_def = save.export_meta_graph()
collection_def = meta_graph_def.collection_def
signatures_any = collection_def['serving_signatures'].any_list.value
self.assertEquals(len(signatures_any), 1)
signatures = manifest_pb2.Signatures()
signatures_any[0].Unpack(signatures)
default_signature = signatures.default_signature
return default_signature
# Disable this test case until b/31032996 is fixed.
def _testExportMonitorRegressionSignature(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Classifier(model_fn=logistic_model_fn, n_classes=3)
export_dir = tempfile.mkdtemp() + 'export/'
export_monitor = learn.monitors.ExportMonitor(
every_n_steps=1,
export_dir=export_dir,
exports_to_keep=1,
signature_fn=tf.contrib.learn.classifier.classification_signature_fn)
est.fit(iris.data, iris.target, steps=2, monitors=[export_monitor])
self.assertTrue(tf.gfile.Exists(export_dir))
self.assertFalse(tf.gfile.Exists(export_dir + '00000000/export'))
self.assertTrue(tf.gfile.Exists(export_dir + '00000002/export'))
# Validate the signature
signature = self._get_default_signature(export_dir + '00000002/export.meta')
self.assertTrue(signature.HasField('classification_signature'))
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
cbertinato/pandas | pandas/compat/numpy/function.py | 1 | 14392 | """
For compatibility with numpy libraries, pandas functions or
methods have to accept '*args' and '**kwargs' parameters to
accommodate numpy arguments that are not actually used or
respected in the pandas implementation.
To ensure that users do not abuse these parameters, validation
is performed in 'validators.py' to make sure that any extra
parameters passed correspond ONLY to those in the numpy signature.
Part of that validation includes whether or not the user attempted
to pass in non-default values for these extraneous parameters. As we
want to discourage users from relying on these parameters when calling
the pandas implementation, we want them only to pass in the default values
for these parameters.
This module provides a set of commonly used default arguments for functions
and methods that are spread throughout the codebase. This module will make it
easier to adjust to future upstream changes in the analogous numpy signatures.
"""
from collections import OrderedDict
from distutils.version import LooseVersion
from typing import Any, Dict, Optional, Union
from numpy import __version__ as _np_version, ndarray
from pandas._libs.lib import is_bool, is_integer
from pandas.errors import UnsupportedFunctionCall
from pandas.util._validators import (
validate_args, validate_args_and_kwargs, validate_kwargs)
class CompatValidator:
def __init__(self, defaults, fname=None, method=None,
max_fname_arg_count=None):
self.fname = fname
self.method = method
self.defaults = defaults
self.max_fname_arg_count = max_fname_arg_count
def __call__(self, args, kwargs, fname=None,
max_fname_arg_count=None, method=None):
if args or kwargs:
fname = self.fname if fname is None else fname
max_fname_arg_count = (self.max_fname_arg_count if
max_fname_arg_count is None
else max_fname_arg_count)
method = self.method if method is None else method
if method == 'args':
validate_args(fname, args, max_fname_arg_count, self.defaults)
elif method == 'kwargs':
validate_kwargs(fname, kwargs, self.defaults)
elif method == 'both':
validate_args_and_kwargs(fname, args, kwargs,
max_fname_arg_count,
self.defaults)
else:
raise ValueError("invalid validation method "
"'{method}'".format(method=method))
ARGMINMAX_DEFAULTS = dict(out=None)
validate_argmin = CompatValidator(ARGMINMAX_DEFAULTS, fname='argmin',
method='both', max_fname_arg_count=1)
validate_argmax = CompatValidator(ARGMINMAX_DEFAULTS, fname='argmax',
method='both', max_fname_arg_count=1)
def process_skipna(skipna, args):
if isinstance(skipna, ndarray) or skipna is None:
args = (skipna,) + args
skipna = True
return skipna, args
def validate_argmin_with_skipna(skipna, args, kwargs):
"""
If 'Series.argmin' is called via the 'numpy' library,
the third parameter in its signature is 'out', which
takes either an ndarray or 'None', so check if the
'skipna' parameter is either an instance of ndarray or
is None, since 'skipna' itself should be a boolean
"""
skipna, args = process_skipna(skipna, args)
validate_argmin(args, kwargs)
return skipna
def validate_argmax_with_skipna(skipna, args, kwargs):
"""
If 'Series.argmax' is called via the 'numpy' library,
the third parameter in its signature is 'out', which
takes either an ndarray or 'None', so check if the
'skipna' parameter is either an instance of ndarray or
is None, since 'skipna' itself should be a boolean
"""
skipna, args = process_skipna(skipna, args)
validate_argmax(args, kwargs)
return skipna
ARGSORT_DEFAULTS = OrderedDict() \
# type: OrderedDict[str, Optional[Union[int, str]]]
ARGSORT_DEFAULTS['axis'] = -1
ARGSORT_DEFAULTS['kind'] = 'quicksort'
ARGSORT_DEFAULTS['order'] = None
if LooseVersion(_np_version) >= LooseVersion("1.17.0"):
# GH-26361. NumPy added radix sort and changed default to None.
ARGSORT_DEFAULTS['kind'] = None
validate_argsort = CompatValidator(ARGSORT_DEFAULTS, fname='argsort',
max_fname_arg_count=0, method='both')
# two different signatures of argsort, this second validation
# for when the `kind` param is supported
ARGSORT_DEFAULTS_KIND = OrderedDict() \
# type: OrderedDict[str, Optional[int]]
ARGSORT_DEFAULTS_KIND['axis'] = -1
ARGSORT_DEFAULTS_KIND['order'] = None
validate_argsort_kind = CompatValidator(ARGSORT_DEFAULTS_KIND, fname='argsort',
max_fname_arg_count=0, method='both')
def validate_argsort_with_ascending(ascending, args, kwargs):
"""
If 'Categorical.argsort' is called via the 'numpy' library, the
first parameter in its signature is 'axis', which takes either
an integer or 'None', so check if the 'ascending' parameter has
either integer type or is None, since 'ascending' itself should
be a boolean
"""
if is_integer(ascending) or ascending is None:
args = (ascending,) + args
ascending = True
validate_argsort_kind(args, kwargs, max_fname_arg_count=3)
return ascending
CLIP_DEFAULTS = dict(out=None) # type Dict[str, Any]
validate_clip = CompatValidator(CLIP_DEFAULTS, fname='clip',
method='both', max_fname_arg_count=3)
def validate_clip_with_axis(axis, args, kwargs):
"""
If 'NDFrame.clip' is called via the numpy library, the third
parameter in its signature is 'out', which can takes an ndarray,
so check if the 'axis' parameter is an instance of ndarray, since
'axis' itself should either be an integer or None
"""
if isinstance(axis, ndarray):
args = (axis,) + args
axis = None
validate_clip(args, kwargs)
return axis
COMPRESS_DEFAULTS = OrderedDict() # type: OrderedDict[str, Any]
COMPRESS_DEFAULTS['axis'] = None
COMPRESS_DEFAULTS['out'] = None
validate_compress = CompatValidator(COMPRESS_DEFAULTS, fname='compress',
method='both', max_fname_arg_count=1)
CUM_FUNC_DEFAULTS = OrderedDict() # type: OrderedDict[str, Any]
CUM_FUNC_DEFAULTS['dtype'] = None
CUM_FUNC_DEFAULTS['out'] = None
validate_cum_func = CompatValidator(CUM_FUNC_DEFAULTS, method='both',
max_fname_arg_count=1)
validate_cumsum = CompatValidator(CUM_FUNC_DEFAULTS, fname='cumsum',
method='both', max_fname_arg_count=1)
def validate_cum_func_with_skipna(skipna, args, kwargs, name):
"""
If this function is called via the 'numpy' library, the third
parameter in its signature is 'dtype', which takes either a
'numpy' dtype or 'None', so check if the 'skipna' parameter is
a boolean or not
"""
if not is_bool(skipna):
args = (skipna,) + args
skipna = True
validate_cum_func(args, kwargs, fname=name)
return skipna
ALLANY_DEFAULTS = OrderedDict() # type: OrderedDict[str, Optional[bool]]
ALLANY_DEFAULTS['dtype'] = None
ALLANY_DEFAULTS['out'] = None
ALLANY_DEFAULTS['keepdims'] = False
validate_all = CompatValidator(ALLANY_DEFAULTS, fname='all',
method='both', max_fname_arg_count=1)
validate_any = CompatValidator(ALLANY_DEFAULTS, fname='any',
method='both', max_fname_arg_count=1)
LOGICAL_FUNC_DEFAULTS = dict(out=None, keepdims=False)
validate_logical_func = CompatValidator(LOGICAL_FUNC_DEFAULTS, method='kwargs')
MINMAX_DEFAULTS = dict(out=None, keepdims=False)
validate_min = CompatValidator(MINMAX_DEFAULTS, fname='min',
method='both', max_fname_arg_count=1)
validate_max = CompatValidator(MINMAX_DEFAULTS, fname='max',
method='both', max_fname_arg_count=1)
RESHAPE_DEFAULTS = dict(order='C') # type: Dict[str, str]
validate_reshape = CompatValidator(RESHAPE_DEFAULTS, fname='reshape',
method='both', max_fname_arg_count=1)
REPEAT_DEFAULTS = dict(axis=None) # type: Dict[str, Any]
validate_repeat = CompatValidator(REPEAT_DEFAULTS, fname='repeat',
method='both', max_fname_arg_count=1)
ROUND_DEFAULTS = dict(out=None) # type: Dict[str, Any]
validate_round = CompatValidator(ROUND_DEFAULTS, fname='round',
method='both', max_fname_arg_count=1)
SORT_DEFAULTS = OrderedDict() \
# type: OrderedDict[str, Optional[Union[int, str]]]
SORT_DEFAULTS['axis'] = -1
SORT_DEFAULTS['kind'] = 'quicksort'
SORT_DEFAULTS['order'] = None
validate_sort = CompatValidator(SORT_DEFAULTS, fname='sort',
method='kwargs')
STAT_FUNC_DEFAULTS = OrderedDict() # type: OrderedDict[str, Optional[Any]]
STAT_FUNC_DEFAULTS['dtype'] = None
STAT_FUNC_DEFAULTS['out'] = None
PROD_DEFAULTS = SUM_DEFAULTS = STAT_FUNC_DEFAULTS.copy()
SUM_DEFAULTS['keepdims'] = False
SUM_DEFAULTS['initial'] = None
MEDIAN_DEFAULTS = STAT_FUNC_DEFAULTS.copy()
MEDIAN_DEFAULTS['overwrite_input'] = False
MEDIAN_DEFAULTS['keepdims'] = False
STAT_FUNC_DEFAULTS['keepdims'] = False
validate_stat_func = CompatValidator(STAT_FUNC_DEFAULTS,
method='kwargs')
validate_sum = CompatValidator(SUM_DEFAULTS, fname='sum',
method='both', max_fname_arg_count=1)
validate_prod = CompatValidator(PROD_DEFAULTS, fname="prod",
method="both", max_fname_arg_count=1)
validate_mean = CompatValidator(STAT_FUNC_DEFAULTS, fname='mean',
method='both', max_fname_arg_count=1)
validate_median = CompatValidator(MEDIAN_DEFAULTS, fname='median',
method='both', max_fname_arg_count=1)
STAT_DDOF_FUNC_DEFAULTS = OrderedDict() \
# type: OrderedDict[str, Optional[bool]]
STAT_DDOF_FUNC_DEFAULTS['dtype'] = None
STAT_DDOF_FUNC_DEFAULTS['out'] = None
STAT_DDOF_FUNC_DEFAULTS['keepdims'] = False
validate_stat_ddof_func = CompatValidator(STAT_DDOF_FUNC_DEFAULTS,
method='kwargs')
TAKE_DEFAULTS = OrderedDict() # type: OrderedDict[str, Optional[str]]
TAKE_DEFAULTS['out'] = None
TAKE_DEFAULTS['mode'] = 'raise'
validate_take = CompatValidator(TAKE_DEFAULTS, fname='take',
method='kwargs')
def validate_take_with_convert(convert, args, kwargs):
"""
If this function is called via the 'numpy' library, the third
parameter in its signature is 'axis', which takes either an
ndarray or 'None', so check if the 'convert' parameter is either
an instance of ndarray or is None
"""
if isinstance(convert, ndarray) or convert is None:
args = (convert,) + args
convert = True
validate_take(args, kwargs, max_fname_arg_count=3, method='both')
return convert
TRANSPOSE_DEFAULTS = dict(axes=None)
validate_transpose = CompatValidator(TRANSPOSE_DEFAULTS, fname='transpose',
method='both', max_fname_arg_count=0)
def validate_window_func(name, args, kwargs):
numpy_args = ('axis', 'dtype', 'out')
msg = ("numpy operations are not "
"valid with window objects. "
"Use .{func}() directly instead ".format(func=name))
if len(args) > 0:
raise UnsupportedFunctionCall(msg)
for arg in numpy_args:
if arg in kwargs:
raise UnsupportedFunctionCall(msg)
def validate_rolling_func(name, args, kwargs):
numpy_args = ('axis', 'dtype', 'out')
msg = ("numpy operations are not "
"valid with window objects. "
"Use .rolling(...).{func}() instead ".format(func=name))
if len(args) > 0:
raise UnsupportedFunctionCall(msg)
for arg in numpy_args:
if arg in kwargs:
raise UnsupportedFunctionCall(msg)
def validate_expanding_func(name, args, kwargs):
numpy_args = ('axis', 'dtype', 'out')
msg = ("numpy operations are not "
"valid with window objects. "
"Use .expanding(...).{func}() instead ".format(func=name))
if len(args) > 0:
raise UnsupportedFunctionCall(msg)
for arg in numpy_args:
if arg in kwargs:
raise UnsupportedFunctionCall(msg)
def validate_groupby_func(name, args, kwargs, allowed=None):
"""
'args' and 'kwargs' should be empty, except for allowed
kwargs because all of
their necessary parameters are explicitly listed in
the function signature
"""
if allowed is None:
allowed = []
kwargs = set(kwargs) - set(allowed)
if len(args) + len(kwargs) > 0:
raise UnsupportedFunctionCall((
"numpy operations are not valid "
"with groupby. Use .groupby(...)."
"{func}() instead".format(func=name)))
RESAMPLER_NUMPY_OPS = ('min', 'max', 'sum', 'prod',
'mean', 'std', 'var')
def validate_resampler_func(method, args, kwargs):
"""
'args' and 'kwargs' should be empty because all of
their necessary parameters are explicitly listed in
the function signature
"""
if len(args) + len(kwargs) > 0:
if method in RESAMPLER_NUMPY_OPS:
raise UnsupportedFunctionCall((
"numpy operations are not valid "
"with resample. Use .resample(...)."
"{func}() instead".format(func=method)))
else:
raise TypeError("too many arguments passed in")
def validate_minmax_axis(axis):
"""
Ensure that the axis argument passed to min, max, argmin, or argmax is
zero or None, as otherwise it will be incorrectly ignored.
Parameters
----------
axis : int or None
Raises
------
ValueError
"""
ndim = 1 # hard-coded for Index
if axis is None:
return
if axis >= ndim or (axis < 0 and ndim + axis < 0):
raise ValueError("`axis` must be fewer than the number of "
"dimensions ({ndim})".format(ndim=ndim))
| bsd-3-clause |
harish-garg/Machine-Learning | udacity/evaluation_metrics/evaluate_accuracy.py | 1 | 1541 | #
# In this and the following exercises, you'll be adding train test splits to the data
# to see how it changes the performance of each classifier
#
# The code provided will load the Titanic dataset like you did in project 0, then train
# a decision tree (the method you used in your project) and a Bayesian classifier (as
# discussed in the introduction videos). You don't need to worry about how these work for
# now.
#
# What you do need to do is import a train/test split, train the classifiers on the
# training data, and store the resulting accuracy scores in the dictionary provided.
import numpy as np
import pandas as pd
# Load the dataset
X = pd.read_csv('titanic_data.csv')
# Limit to numeric data
X = X._get_numeric_data()
# Separate the labels
y = X['Survived']
# Remove labels from the inputs, and age due to missing data
del X['Age'], X['Survived']
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import GaussianNB
# TODO: split the data into training and testing sets,
# using the standard settings for train_test_split.
# Then, train and test the classifiers with your newly split data instead of X and y.
# The decision tree classifier
clf1 = DecisionTreeClassifier()
clf1.fit(X,y)
print "Decision Tree has accuracy: ",accuracy_score(clf1.predict(X),y)
# The naive Bayes classifier
clf2 = GaussianNB()
clf2.fit(X,y)
print "GaussianNB has accuracy: ",accuracy_score(clf2.predict(X),y)
answer = {
"Naive Bayes Score": 0,
"Decision Tree Score": 0
}
| mit |
prasadtalasila/MailingListParser | test/integration_test/lib/analysis/author/test_curve_fiting.py | 1 | 5027 | from lib.analysis.author.curve_fitting import *
import unittest
import mock
def test_inv_func():
x = 5
a = 10
b = 25
c = 7
assert inv_func(x, a, b, c) == 10
def test_generate_crt_dist():
csv_filename = './test/integration_test/data/conversation_refresh_times.csv'
req_re_times = ([106.01, 272.03000000000003, 438.05000000000007, 604.07, 770.09, 936.1100000000001, 1102.13, 1268.15, 1434.17, 1600.19, 1766.21, 1932.23, 2098.25, 2264.2700000000004, 2430.29, 2596.3100000000004, 2762.33, 2928.3500000000004, 3094.37, 3260.3900000000003, 3426.41, 3592.4300000000003, 3758.45, 3924.4700000000003, 4090.4900000000002, 4256.51, 4422.530000000001, 4588.55, 4754.57, 4920.59, 5086.610000000001, 5252.63, 5418.650000000001, 5584.67, 5750.6900000000005, 5916.710000000001, 6082.7300000000005, 6248.75, 6414.77, 6580.790000000001, 6746.81, 6912.83, 7078.85, 7244.870000000001, 7410.89, 7576.91, 7742.93, 7908.950000000001, 8074.970000000001, 8240.99], [0.375, 0.0, 0.0, 0.0, 0.125, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.125, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.125, 0.0, 0.0, 0.0, 0.0, 0.0, 0.125, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.125])
generate_crt_dist(csv_filename) == req_re_times
@mock.patch("matplotlib.pyplot.figure")
@mock.patch("matplotlib.pyplot.plot")
@mock.patch("matplotlib.pyplot.legend")
@mock.patch("matplotlib.pyplot.ylabel")
@mock.patch("matplotlib.pyplot.xlabel")
@mock.patch("matplotlib.pyplot.savefig")
def test_generate_crt_curve_fits(mock_figure, mock_plot, mock_legend, mock_ylabel, mock_xlabel, mock_savefig):
foldername = './test/integration_test/data/curve_fitting/'
generate_crt_curve_fits(foldername)
def test_generate_cl_dist():
csv_filename = './test/integration_test/data/conversation_length.csv'
req_lengths = ([7861.65, 22400.949999999997, 36940.25, 51479.549999999996, 66018.85, 80558.15, 95097.44999999998, 109636.75, 124176.05, 138715.35, 153254.65, 167793.94999999998, 182333.25, 196872.55, 211411.84999999998, 225951.15, 240490.44999999998, 255029.75, 269569.05000000005, 284108.35, 298647.65, 313186.94999999995, 327726.25, 342265.54999999993, 356804.85, 371344.15, 385883.44999999995, 400422.75, 414962.04999999993, 429501.35, 444040.65, 458579.94999999995, 473119.25, 487658.54999999993, 502197.85, 516737.15, 531276.45, 545815.75, 560355.05, 574894.35, 589433.6499999999, 603972.95, 618512.25, 633051.55, 647590.85, 662130.1499999999,676669.45, 691208.75, 705748.0499999999, 720287.35], [0.4444444444444444, 0.0, 0.0, 0.2222222222222222, 0.0, 0.0, 0.1111111111111111, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1111111111111111, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1111111111111111])
assert generate_cl_dist(csv_filename) == req_lengths
@mock.patch("matplotlib.pyplot.figure")
@mock.patch("matplotlib.pyplot.plot")
@mock.patch("matplotlib.pyplot.legend")
@mock.patch("matplotlib.pyplot.ylabel")
@mock.patch("matplotlib.pyplot.xlabel")
@mock.patch("matplotlib.pyplot.savefig")
def test_generate_cl_curve_fits(mock_figure, mock_plot, mock_legend, mock_ylabel, mock_xlabel, mock_savefig):
foldername = './test/integration_test/data/curve_fitting/'
generate_cl_curve_fits(foldername)
def test_generate_rt_dist():
csv_filename = './test/integration_test/data/response_time.csv'
req_times = ([783.76, 1441.28, 2098.8, 2756.3199999999997, 3413.84, 4071.3599999999997, 4728.879999999999, 5386.4, 6043.92, 6701.4400000000005, 7358.959999999999, 8016.48, 8674.0, 9331.52, 9989.039999999999, 10646.56, 11304.08, 11961.6, 12619.119999999999, 13276.64, 13934.16, 14591.68, 15249.199999999999, 15906.72, 16564.239999999998, 17221.760000000002, 17879.28, 18536.8, 19194.32, 19851.839999999997, 20509.36, 21166.879999999997, 21824.4, 22481.92, 23139.440000000002, 23796.96, 24454.48, 25112.0, 25769.519999999997, 26427.04, 27084.559999999998, 27742.08, 28399.6, 29057.12, 29714.64, 30372.159999999996, 31029.68, 31687.199999999997, 32344.72, 33002.24], [0.1111111111111111, 0.2222222222222222, 0.2222222222222222, 0.0, 0.1111111111111111, 0.2222222222222222, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1111111111111111])
assert generate_rt_dist(csv_filename) == req_times
@mock.patch("matplotlib.pyplot.figure")
@mock.patch("matplotlib.pyplot.plot")
@mock.patch("matplotlib.pyplot.legend")
@mock.patch("matplotlib.pyplot.ylabel")
@mock.patch("matplotlib.pyplot.xlabel")
@mock.patch("matplotlib.pyplot.savefig")
def test_generate_rt_curve_fits(mock_figure, mock_plot, mock_legend, mock_ylabel, mock_xlabel, mock_savefig):
foldername = './test/integration_test/data/curve_fitting/'
generate_rt_curve_fits(foldername)
| gpl-3.0 |
kmike/scikit-learn | examples/covariance/plot_sparse_cov.py | 4 | 5035 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweeked to
improve readibility of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD Style
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import pylab as pl
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
pl.figure(figsize=(10, 6))
pl.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
pl.subplot(2, 4, i + 1)
pl.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=pl.cm.RdBu_r)
pl.xticks(())
pl.yticks(())
pl.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = pl.subplot(2, 4, i + 5)
pl.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=pl.cm.RdBu_r)
pl.xticks(())
pl.yticks(())
pl.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
pl.figure(figsize=(4, 3))
pl.axes([.2, .15, .75, .7])
pl.plot(model.cv_alphas_, np.mean(model.cv_scores, axis=1), 'o-')
pl.axvline(model.alpha_, color='.5')
pl.title('Model selection')
pl.ylabel('Cross-validation score')
pl.xlabel('alpha')
pl.show()
| bsd-3-clause |
sureshthalamati/spark | examples/src/main/python/sql/arrow.py | 13 | 3997 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A simple example demonstrating Arrow in Spark.
Run with:
./bin/spark-submit examples/src/main/python/sql/arrow.py
"""
from __future__ import print_function
from pyspark.sql import SparkSession
from pyspark.sql.utils import require_minimum_pandas_version, require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
def dataframe_with_arrow_example(spark):
# $example on:dataframe_with_arrow$
import numpy as np
import pandas as pd
# Enable Arrow-based columnar data transfers
spark.conf.set("spark.sql.execution.arrow.enabled", "true")
# Generate a Pandas DataFrame
pdf = pd.DataFrame(np.random.rand(100, 3))
# Create a Spark DataFrame from a Pandas DataFrame using Arrow
df = spark.createDataFrame(pdf)
# Convert the Spark DataFrame back to a Pandas DataFrame using Arrow
result_pdf = df.select("*").toPandas()
# $example off:dataframe_with_arrow$
print("Pandas DataFrame result statistics:\n%s\n" % str(result_pdf.describe()))
def scalar_pandas_udf_example(spark):
# $example on:scalar_pandas_udf$
import pandas as pd
from pyspark.sql.functions import col, pandas_udf
from pyspark.sql.types import LongType
# Declare the function and create the UDF
def multiply_func(a, b):
return a * b
multiply = pandas_udf(multiply_func, returnType=LongType())
# The function for a pandas_udf should be able to execute with local Pandas data
x = pd.Series([1, 2, 3])
print(multiply_func(x, x))
# 0 1
# 1 4
# 2 9
# dtype: int64
# Create a Spark DataFrame, 'spark' is an existing SparkSession
df = spark.createDataFrame(pd.DataFrame(x, columns=["x"]))
# Execute function as a Spark vectorized UDF
df.select(multiply(col("x"), col("x"))).show()
# +-------------------+
# |multiply_func(x, x)|
# +-------------------+
# | 1|
# | 4|
# | 9|
# +-------------------+
# $example off:scalar_pandas_udf$
def grouped_map_pandas_udf_example(spark):
# $example on:grouped_map_pandas_udf$
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = spark.createDataFrame(
[(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
("id", "v"))
@pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP)
def substract_mean(pdf):
# pdf is a pandas.DataFrame
v = pdf.v
return pdf.assign(v=v - v.mean())
df.groupby("id").apply(substract_mean).show()
# +---+----+
# | id| v|
# +---+----+
# | 1|-0.5|
# | 1| 0.5|
# | 2|-3.0|
# | 2|-1.0|
# | 2| 4.0|
# +---+----+
# $example off:grouped_map_pandas_udf$
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("Python Arrow-in-Spark example") \
.getOrCreate()
print("Running Pandas to/from conversion example")
dataframe_with_arrow_example(spark)
print("Running pandas_udf scalar example")
scalar_pandas_udf_example(spark)
print("Running pandas_udf grouped map example")
grouped_map_pandas_udf_example(spark)
spark.stop()
| apache-2.0 |
telecombcn-dl/2017-cfis | sessions/utils.py | 1 | 2978 | import matplotlib.pyplot as plt
import numpy as np
import itertools
import keras.backend as K
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def plot_samples(X_train,N=5):
'''
Plots N**2 randomly selected images from training data in a NxN grid
'''
import random
ps = random.sample(range(0,X_train.shape[0]), N**2)
f, axarr = plt.subplots(N, N)
p = 0
for i in range(N):
for j in range(N):
if len(X_train.shape) == 3:
axarr[i,j].imshow(X_train[ps[p]],cmap='gray')
else:
im = X_train[ps[p]]
axarr[i,j].imshow(im)
axarr[i,j].axis('off')
p+=1
def plot_curves(history,nb_epoch):
"""
Plots accuracy and loss curves given model history and number of epochs
"""
fig, ax1 = plt.subplots()
t = np.arange(0, nb_epoch, 1)
ax1.plot(t,history.history['acc'],'b-')
ax1.plot(t,history.history['val_acc'],'b*')
ax1.set_xlabel('epoch')
ax1.set_ylabel('acc', color='b')
ax1.tick_params('y', colors='b')
plt.legend(['train_acc', 'test_acc'], loc='lower left')
ax2 = ax1.twinx()
ax2.plot(t, history.history['loss'], 'r-')
ax2.plot(t, history.history['val_loss'], 'r*')
ax2.set_ylabel('loss', color='r')
ax2.tick_params('y', colors='r')
plt.legend(['train_loss','test_loss'], loc='upper left')
# util function to convert a tensor into a valid image
def deprocess_image(x):
# normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
if K.image_dim_ordering() == 'th':
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
| mit |
pyrrho314/recipesystem | trunk/gempy/scripts/zp_histogram.py | 1 | 3432 | #!/usr/bin/env python
# This tool plots some useful plots for seeing what's going on with zeropoint
# estimates from the QAP.
#
# This was developed as a quick analysis tool to asess QAP ZP and CC numbers
# and to understand comparisons with certain people's skygazing results, but I'm sure
# it would be useful for the DA/SOSs.
# It can be run on any file with a sextractor style OBJCAT in it, for example
# the _forStack files output by the QAP. If no REFCAT is present, it bails out as no refmags
#
# Paul Hirst 20120321
import math
import sys
from astrodata import AstroData
import numpy as np
import matplotlib.pyplot as plt
from random import random
filename = sys.argv[1]
ad = AstroData(filename)
objcat = ad['OBJCAT']
if (ad['REFCAT'] is None):
print "No Reference Catalog in this file, thus no Zeropoints. Sorry"
sys.exit(0)
mag = objcat.data.field("MAG_AUTO")
magerr = objcat.data.field("MAGERR_AUTO")
refmag = objcat.data.field("REF_MAG")
refmagerr = objcat.data.field("REF_MAG_ERR")
sxflag = objcat.data.field("FLAGS")
dqflag = objcat.data.field("IMAFLAGS_ISO")
# set mag to None where we don't want to use the object
mag = np.where((mag==-999), None, mag)
mag = np.where((mag==99), None, mag)
mag = np.where((refmag==-999), None, mag)
mag = np.where((np.isnan(refmag)), None, mag)
mag = np.where((sxflag==0), mag, None)
mag = np.where((dqflag==0), mag, None)
# Now ditch the values out of the arrays where mag is None
# NB do mag last
magerr = magerr[np.flatnonzero(mag)]
refmag = refmag[np.flatnonzero(mag)]
refmagerr = refmagerr[np.flatnonzero(mag)]
mag = mag[np.flatnonzero(mag)]
if(len(mag) == 0):
print "No good sources to plot"
sys.exit(1)
# Now apply the exposure time and nom_at_ext corrections to mag
et = float(ad.exposure_time())
if(ad.is_type('GMOS_NODANDSHUFFLE')):
print "Imaging Nod-And-Shuffle. Photometry may be dubious"
et /= 2.0
etmag = 2.5*math.log10(et)
nom_at_ext = float(ad.nominal_atmospheric_extinction())
mag += etmag
mag += nom_at_ext
# Can now calculate the zp array
zp = refmag - mag
zperr = np.sqrt(refmagerr*refmagerr + magerr*magerr)
# Trim values out of zp where the zeropoint error is > 0.1
zp_trim = np.where((zperr<0.1), zp, None)
zperr_trim = zperr[np.flatnonzero(zp_trim)]
refmag_trim = refmag[np.flatnonzero(zp_trim)]
refmagerr_trim = refmagerr[np.flatnonzero(zp_trim)]
zp_trim = zp[np.flatnonzero(zp_trim)]
nzp = float(ad.nominal_photometric_zeropoint())
plt.figure(1)
# Plot the mag-mag plot
plt.subplot(2,2,1)
plt.scatter(refmag, mag)
plt.errorbar(refmag, mag, xerr=refmagerr, yerr=magerr, fmt=None)
plt.xlabel('Reference Magnitude')
plt.ylabel('Instrumental Magnitdute')
# Plot the mag - zp plot
plt.subplot(2,2,2)
plt.scatter(refmag, zp)
plt.errorbar(refmag, zp, xerr=refmagerr, yerr=zperr, fmt=None)
plt.scatter(refmag_trim, zp_trim, color='g')
plt.errorbar(refmag_trim, zp_trim, xerr=refmagerr_trim, yerr=zperr_trim, c='g', fmt=None)
plt.axhline(y=nzp)
plt.xlabel('Reference Magnitude')
plt.ylabel('Zeropoint')
# Plot the zp histogram
plt.subplot(2,2,3)
plt.hist(zp, bins=40)
plt.hist(zp_trim, bins=40, range=(zp.min(), zp.max()))
plt.axvline(x=nzp)
plt.xlabel('Zeropoint')
plt.ylabel('Number')
# Now plot in CC extinction space
zp -= nzp
zp_trim -= nzp
zp *=-1
zp_trim *= -1
plt.subplot(2,2,4)
plt.hist(zp, bins=40)
plt.hist(zp_trim, bins=40, range=(zp.min(), zp.max()))
plt.xlabel('Cloud Extinction')
plt.ylabel('Number')
plt.show()
| mpl-2.0 |
pprett/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
lmcinnes/hdbscan | hdbscan/plots.py | 1 | 32608 | # -*- coding: utf-8 -*-
# Author: Leland McInnes <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy.cluster.hierarchy import dendrogram
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from sklearn.metrics import pairwise_distances
from warnings import warn
from ._hdbscan_tree import compute_stability, labelling_at_cut
CB_LEFT = 0
CB_RIGHT = 1
CB_BOTTOM = 2
CB_TOP = 3
def _bfs_from_cluster_tree(tree, bfs_root):
"""
Perform a breadth first search on a tree in condensed tree format
"""
result = []
to_process = [bfs_root]
while to_process:
result.extend(to_process)
to_process = tree['child'][np.in1d(tree['parent'], to_process)].tolist()
return result
def _recurse_leaf_dfs(cluster_tree, current_node):
children = cluster_tree[cluster_tree['parent'] == current_node]['child']
if len(children) == 0:
return [current_node,]
else:
return sum([_recurse_leaf_dfs(cluster_tree, child) for child in children], [])
def _get_leaves(condensed_tree):
cluster_tree = condensed_tree[condensed_tree['child_size'] > 1]
root = cluster_tree['parent'].min()
return _recurse_leaf_dfs(cluster_tree, root)
class CondensedTree(object):
"""The condensed tree structure, which provides a simplified or smoothed version
of the :class:`~hdbscan.plots.SingleLinkageTree`.
Parameters
----------
condensed_tree_array : numpy recarray from :class:`~hdbscan.HDBSCAN`
The raw numpy rec array version of the condensed tree as produced
internally by hdbscan.
cluster_selection_method : string, optional (default 'eom')
The method of selecting clusters. One of 'eom' or 'leaf'
"""
def __init__(self, condensed_tree_array, cluster_selection_method='eom'):
self._raw_tree = condensed_tree_array
self.cluster_selection_method = cluster_selection_method
def get_plot_data(self,
leaf_separation=1,
log_size=False,
max_rectangle_per_icicle=20):
"""Generates data for use in plotting the 'icicle plot' or dendrogram
plot of the condensed tree generated by HDBSCAN.
Parameters
----------
leaf_separation : float, optional
How far apart to space the final leaves of the
dendrogram. (default 1)
log_size : boolean, optional
Use log scale for the 'size' of clusters (i.e. number of
points in the cluster at a given lambda value).
(default False)
max_rectangles_per_icicle : int, optional
To simplify the plot this method will only emit
``max_rectangles_per_icicle`` bars per branch of the dendrogram.
This ensures that we don't suffer from massive overplotting in
cases with a lot of data points.
Returns
-------
plot_data : dict
Data associated to bars in a bar plot:
`bar_centers` x coordinate centers for bars
`bar_tops` heights of bars in lambda scale
`bar_bottoms` y coordinate of bottoms of bars
`bar_widths` widths of the bars (in x coord scale)
`bar_bounds` a 4-tuple of [left, right, bottom, top]
giving the bounds on a full set of
cluster bars
Data associates with cluster splits:
`line_xs` x coordinates for horizontal dendrogram lines
`line_ys` y coordinates for horizontal dendrogram lines
"""
leaves = _get_leaves(self._raw_tree)
last_leaf = self._raw_tree['parent'].max()
root = self._raw_tree['parent'].min()
# We want to get the x and y coordinates for the start of each cluster
# Initialize the leaves, since we know where they go, the iterate
# through everything from the leaves back, setting coords as we go
cluster_x_coords = dict(zip(leaves, [leaf_separation * x
for x in range(len(leaves))]))
cluster_y_coords = {root: 0.0}
for cluster in range(last_leaf, root - 1, -1):
split = self._raw_tree[['child', 'lambda_val']]
split = split[(self._raw_tree['parent'] == cluster) &
(self._raw_tree['child_size'] > 1)]
if len(split['child']) > 1:
left_child, right_child = split['child']
cluster_x_coords[cluster] = np.mean([cluster_x_coords[left_child],
cluster_x_coords[right_child]])
cluster_y_coords[left_child] = split['lambda_val'][0]
cluster_y_coords[right_child] = split['lambda_val'][1]
# We use bars to plot the 'icicles', so we need to generate centers, tops,
# bottoms and widths for each rectangle. We can go through each cluster
# and do this for each in turn.
bar_centers = []
bar_tops = []
bar_bottoms = []
bar_widths = []
cluster_bounds = {}
scaling = np.sum(self._raw_tree[self._raw_tree['parent'] == root]['child_size'])
if log_size:
scaling = np.log(scaling)
for c in range(last_leaf, root - 1, -1):
cluster_bounds[c] = [0, 0, 0, 0]
c_children = self._raw_tree[self._raw_tree['parent'] == c]
current_size = np.sum(c_children['child_size'])
current_lambda = cluster_y_coords[c]
cluster_max_size = current_size
cluster_max_lambda = c_children['lambda_val'].max()
cluster_min_size = np.sum(
c_children[c_children['lambda_val'] ==
cluster_max_lambda]['child_size'])
if log_size:
current_size = np.log(current_size)
cluster_max_size = np.log(cluster_max_size)
cluster_min_size = np.log(cluster_min_size)
total_size_change = float(cluster_max_size - cluster_min_size)
step_size_change = total_size_change / max_rectangle_per_icicle
cluster_bounds[c][CB_LEFT] = cluster_x_coords[c] * scaling - (current_size / 2.0)
cluster_bounds[c][CB_RIGHT] = cluster_x_coords[c] * scaling + (current_size / 2.0)
cluster_bounds[c][CB_BOTTOM] = cluster_y_coords[c]
cluster_bounds[c][CB_TOP] = np.max(c_children['lambda_val'])
last_step_size = current_size
last_step_lambda = current_lambda
for i in np.argsort(c_children['lambda_val']):
row = c_children[i]
if row['lambda_val'] != current_lambda and \
(last_step_size - current_size > step_size_change
or row['lambda_val'] == cluster_max_lambda):
bar_centers.append(cluster_x_coords[c] * scaling)
bar_tops.append(row['lambda_val'] - last_step_lambda)
bar_bottoms.append(last_step_lambda)
bar_widths.append(last_step_size)
last_step_size = current_size
last_step_lambda = current_lambda
if log_size:
exp_size = np.exp(current_size) - row['child_size']
# Ensure we don't try to take log of zero
if exp_size > 0.01:
current_size = np.log(np.exp(current_size) - row['child_size'])
else:
current_size = 0.0
else:
current_size -= row['child_size']
current_lambda = row['lambda_val']
# Finally we need the horizontal lines that occur at cluster splits.
line_xs = []
line_ys = []
for row in self._raw_tree[self._raw_tree['child_size'] > 1]:
parent = row['parent']
child = row['child']
child_size = row['child_size']
if log_size:
child_size = np.log(child_size)
sign = np.sign(cluster_x_coords[child] - cluster_x_coords[parent])
line_xs.append([
cluster_x_coords[parent] * scaling,
cluster_x_coords[child] * scaling + sign * (child_size / 2.0)
])
line_ys.append([
cluster_y_coords[child],
cluster_y_coords[child]
])
return {
'bar_centers': bar_centers,
'bar_tops': bar_tops,
'bar_bottoms': bar_bottoms,
'bar_widths': bar_widths,
'line_xs': line_xs,
'line_ys': line_ys,
'cluster_bounds': cluster_bounds
}
def _select_clusters(self):
if self.cluster_selection_method == 'eom':
stability = compute_stability(self._raw_tree)
node_list = sorted(stability.keys(), reverse=True)[:-1]
cluster_tree = self._raw_tree[self._raw_tree['child_size'] > 1]
is_cluster = {cluster: True for cluster in node_list}
for node in node_list:
child_selection = (cluster_tree['parent'] == node)
subtree_stability = np.sum([stability[child] for
child in cluster_tree['child'][child_selection]])
if subtree_stability > stability[node]:
is_cluster[node] = False
stability[node] = subtree_stability
else:
for sub_node in _bfs_from_cluster_tree(cluster_tree, node):
if sub_node != node:
is_cluster[sub_node] = False
return [cluster for cluster in is_cluster if is_cluster[cluster]]
elif self.cluster_selection_method == 'leaf':
return _get_leaves(self._raw_tree)
else:
raise ValueError('Invalid Cluster Selection Method: %s\n'
'Should be one of: "eom", "leaf"\n')
def plot(self, leaf_separation=1, cmap='viridis', select_clusters=False,
label_clusters=False, selection_palette=None,
axis=None, colorbar=True, log_size=False,
max_rectangles_per_icicle=20):
"""Use matplotlib to plot an 'icicle plot' dendrogram of the condensed tree.
Effectively this is a dendrogram where the width of each cluster bar is
equal to the number of points (or log of the number of points) in the cluster
at the given lambda value. Thus bars narrow as points progressively drop
out of clusters. The make the effect more apparent the bars are also colored
according the the number of points (or log of the number of points).
Parameters
----------
leaf_separation : float, optional (default 1)
How far apart to space the final leaves of the
dendrogram.
cmap : string or matplotlib colormap, optional (default viridis)
The matplotlib colormap to use to color the cluster bars.
select_clusters : boolean, optional (default False)
Whether to draw ovals highlighting which cluster
bar represent the clusters that were selected by
HDBSCAN as the final clusters.
label_clusters : boolean, optional (default False)
If select_clusters is True then this determines
whether to draw text labels on the clusters.
selection_palette : list of colors, optional (default None)
If not None, and at least as long as
the number of clusters, draw ovals
in colors iterating through this palette.
This can aid in cluster identification
when plotting.
axis : matplotlib axis or None, optional (default None)
The matplotlib axis to render to. If None then a new axis
will be generated. The rendered axis will be returned.
colorbar : boolean, optional (default True)
Whether to draw a matplotlib colorbar displaying the range
of cluster sizes as per the colormap.
log_size : boolean, optional (default False)
Use log scale for the 'size' of clusters (i.e. number of
points in the cluster at a given lambda value).
max_rectangles_per_icicle : int, optional (default 20)
To simplify the plot this method will only emit
``max_rectangles_per_icicle`` bars per branch of the dendrogram.
This ensures that we don't suffer from massive overplotting in
cases with a lot of data points.
Returns
-------
axis : matplotlib axis
The axis on which the 'icicle plot' has been rendered.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError(
'You must install the matplotlib library to plot the condensed tree.'
'Use get_plot_data to calculate the relevant data without plotting.')
plot_data = self.get_plot_data(leaf_separation=leaf_separation,
log_size=log_size,
max_rectangle_per_icicle=max_rectangles_per_icicle)
if cmap != 'none':
sm = plt.cm.ScalarMappable(cmap=cmap,
norm=plt.Normalize(0, max(plot_data['bar_widths'])))
sm.set_array(plot_data['bar_widths'])
bar_colors = [sm.to_rgba(x) for x in plot_data['bar_widths']]
else:
bar_colors = 'black'
if axis is None:
axis = plt.gca()
axis.bar(
plot_data['bar_centers'],
plot_data['bar_tops'],
bottom=plot_data['bar_bottoms'],
width=plot_data['bar_widths'],
color=bar_colors,
align='center',
linewidth=0
)
for xs, ys in zip(plot_data['line_xs'], plot_data['line_ys']):
axis.plot(xs, ys, color='black', linewidth=1)
if select_clusters:
try:
from matplotlib.patches import Ellipse
except ImportError:
raise ImportError('You must have matplotlib.patches available to plot selected clusters.')
chosen_clusters = self._select_clusters()
for i, c in enumerate(chosen_clusters):
c_bounds = plot_data['cluster_bounds'][c]
width = (c_bounds[CB_RIGHT] - c_bounds[CB_LEFT])
height = (c_bounds[CB_TOP] - c_bounds[CB_BOTTOM])
center = (
np.mean([c_bounds[CB_LEFT], c_bounds[CB_RIGHT]]),
np.mean([c_bounds[CB_TOP], c_bounds[CB_BOTTOM]]),
)
if selection_palette is not None and \
len(selection_palette) >= len(chosen_clusters):
oval_color = selection_palette[i]
else:
oval_color = 'r'
box = Ellipse(
center,
2.0 * width,
1.2 * height,
facecolor='none',
edgecolor=oval_color,
linewidth=2
)
if label_clusters:
axis.annotate(str(i), xy=center,
xytext=(center[0] - 4.0 * width, center[1] + 0.65 * height),
horizontalalignment='left',
verticalalignment='bottom')
axis.add_artist(box)
if colorbar:
cb = plt.colorbar(sm)
if log_size:
cb.ax.set_ylabel('log(Number of points)')
else:
cb.ax.set_ylabel('Number of points')
axis.set_xticks([])
for side in ('right', 'top', 'bottom'):
axis.spines[side].set_visible(False)
axis.invert_yaxis()
axis.set_ylabel('$\lambda$ value')
return axis
def to_numpy(self):
"""Return a numpy structured array representation of the condensed tree.
"""
return self._raw_tree.copy()
def to_pandas(self):
"""Return a pandas dataframe representation of the condensed tree.
Each row of the dataframe corresponds to an edge in the tree.
The columns of the dataframe are `parent`, `child`, `lambda_val`
and `child_size`.
The `parent` and `child` are the ids of the
parent and child nodes in the tree. Node ids less than the number
of points in the original dataset represent individual points, while
ids greater than the number of points are clusters.
The `lambda_val` value is the value (1/distance) at which the `child`
node leaves the cluster.
The `child_size` is the number of points in the `child` node.
"""
try:
from pandas import DataFrame, Series
except ImportError:
raise ImportError('You must have pandas installed to export pandas DataFrames')
result = DataFrame(self._raw_tree)
return result
def to_networkx(self):
"""Return a NetworkX DiGraph object representing the condensed tree.
Edge weights in the graph are the lamba values at which child nodes
'leave' the parent cluster.
Nodes have a `size` attribute attached giving the number of points
that are in the cluster (or 1 if it is a singleton point) at the
point of cluster creation (fewer points may be in the cluster at
larger lambda values).
"""
try:
from networkx import DiGraph, set_node_attributes
except ImportError:
raise ImportError('You must have networkx installed to export networkx graphs')
result = DiGraph()
for row in self._raw_tree:
result.add_edge(row['parent'], row['child'], weight=row['lambda_val'])
set_node_attributes(result, 'size', dict(self._raw_tree[['child', 'child_size']]))
return result
def _line_width(y, linkage):
if y == 0.0:
return 1.0
else:
return linkage[linkage.T[2] == y][0, 3]
class SingleLinkageTree(object):
"""A single linkage format dendrogram tree, with plotting functionality
and networkX support.
Parameters
----------
linkage : ndarray (n_samples, 4)
The numpy array that holds the tree structure. As output by
scipy.cluster.hierarchy, hdbscan, of fastcluster.
"""
def __init__(self, linkage):
self._linkage = linkage
def plot(self, axis=None, truncate_mode=None, p=0, vary_line_width=True,
cmap='viridis', colorbar=True):
"""Plot a dendrogram of the single linkage tree.
Parameters
----------
truncate_mode : str, optional
The dendrogram can be hard to read when the original
observation matrix from which the linkage is derived
is large. Truncation is used to condense the dendrogram.
There are several modes:
``None/'none'``
No truncation is performed (Default).
``'lastp'``
The last p non-singleton formed in the linkage are the only
non-leaf nodes in the linkage; they correspond to rows
Z[n-p-2:end] in Z. All other non-singleton clusters are
contracted into leaf nodes.
``'level'/'mtica'``
No more than p levels of the dendrogram tree are displayed.
This corresponds to Mathematica(TM) behavior.
p : int, optional
The ``p`` parameter for ``truncate_mode``.
vary_line_width : boolean, optional
Draw downward branches of the dendrogram with line thickness that
varies depending on the size of the cluster.
cmap : string or matplotlib colormap, optional
The matplotlib colormap to use to color the cluster bars.
A value of 'none' will result in black bars.
(default 'viridis')
colorbar : boolean, optional
Whether to draw a matplotlib colorbar displaying the range
of cluster sizes as per the colormap. (default True)
Returns
-------
axis : matplotlib axis
The axis on which the dendrogram plot has been rendered.
"""
dendrogram_data = dendrogram(self._linkage, p=p, truncate_mode=truncate_mode, no_plot=True)
X = dendrogram_data['icoord']
Y = dendrogram_data['dcoord']
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError('You must install the matplotlib library to plot the single linkage tree.')
if axis is None:
axis = plt.gca()
if vary_line_width:
linewidths = [(_line_width(y[0], self._linkage),
_line_width(y[1], self._linkage))
for y in Y]
else:
linewidths = [(1.0, 1.0)] * len(Y)
if cmap != 'none':
color_array = np.log2(np.array(linewidths).flatten())
sm = plt.cm.ScalarMappable(cmap=cmap,
norm=plt.Normalize(0, color_array.max()))
sm.set_array(color_array)
for x, y, lw in zip(X, Y, linewidths):
left_x = x[:2]
right_x = x[2:]
left_y = y[:2]
right_y = y[2:]
horizontal_x = x[1:3]
horizontal_y = y[1:3]
if cmap != 'none':
axis.plot(left_x, left_y, color=sm.to_rgba(np.log2(lw[0])),
linewidth=np.log2(1 + lw[0]),
solid_joinstyle='miter', solid_capstyle='butt')
axis.plot(right_x, right_y, color=sm.to_rgba(np.log2(lw[1])),
linewidth=np.log2(1 + lw[1]),
solid_joinstyle='miter', solid_capstyle='butt')
else:
axis.plot(left_x, left_y, color='k',
linewidth=np.log2(1 + lw[0]),
solid_joinstyle='miter', solid_capstyle='butt')
axis.plot(right_x, right_y, color='k',
linewidth=np.log2(1 + lw[1]),
solid_joinstyle='miter', solid_capstyle='butt')
axis.plot(horizontal_x, horizontal_y, color='k', linewidth=1.0,
solid_joinstyle='miter', solid_capstyle='butt')
if colorbar:
cb = plt.colorbar(sm)
cb.ax.set_ylabel('log(Number of points)')
axis.set_xticks([])
for side in ('right', 'top', 'bottom'):
axis.spines[side].set_visible(False)
axis.set_ylabel('distance')
return axis
def to_numpy(self):
"""Return a numpy array representation of the single linkage tree.
This representation conforms to the scipy.cluster.hierarchy notion
of a single linkage tree, and can be used with all the associated
scipy tools. Please see the scipy documentation for more details
on the format.
"""
return self._linkage.copy()
def to_pandas(self):
"""Return a pandas dataframe representation of the single linkage tree.
Each row of the dataframe corresponds to an edge in the tree.
The columns of the dataframe are `parent`, `left_child`,
`right_child`, `distance` and `size`.
The `parent`, `left_child` and `right_child` are the ids of the
parent and child nodes in the tree. Node ids less than the number
of points in the original dataset represent individual points, while
ids greater than the number of points are clusters.
The `distance` value is the at which the child nodes merge to form
the parent node.
The `size` is the number of points in the `parent` node.
"""
try:
from pandas import DataFrame, Series
except ImportError:
raise ImportError('You must have pandas installed to export pandas DataFrames')
max_node = 2 * self._linkage.shape[0]
num_points = max_node - (self._linkage.shape[0] - 1)
parent_array = np.arange(num_points, max_node + 1)
result = DataFrame({
'parent': parent_array,
'left_child': self._linkage.T[0],
'right_child': self._linkage.T[1],
'distance': self._linkage.T[2],
'size': self._linkage.T[3]
})[['parent', 'left_child', 'right_child', 'distance', 'size']]
return result
def to_networkx(self):
"""Return a NetworkX DiGraph object representing the single linkage tree.
Edge weights in the graph are the distance values at which child nodes
merge to form the parent cluster.
Nodes have a `size` attribute attached giving the number of points
that are in the cluster.
"""
try:
from networkx import DiGraph, set_node_attributes
except ImportError:
raise ImportError('You must have networkx installed to export networkx graphs')
max_node = 2 * self._linkage.shape[0]
num_points = max_node - (self._linkage.shape[0] - 1)
result = DiGraph()
for parent, row in enumerate(self._linkage, num_points):
result.add_edge(parent, row[0], weight=row[2])
result.add_edge(parent, row[1], weight=row[2])
size_dict = {parent: row[3] for parent, row in enumerate(self._linkage, num_points)}
set_node_attributes(result, 'size', size_dict)
return result
def get_clusters(self, cut_distance, min_cluster_size=5):
"""Return a flat clustering from the single linkage hierarchy.
This represents the result of selecting a cut value for robust single linkage
clustering. The `min_cluster_size` allows the flat clustering to declare noise
points (and cluster smaller than `min_cluster_size`).
Parameters
----------
cut_distance : float
The mutual reachability distance cut value to use to generate a flat clustering.
min_cluster_size : int, optional
Clusters smaller than this value with be called 'noise' and remain unclustered
in the resulting flat clustering.
Returns
-------
labels : array [n_samples]
An array of cluster labels, one per datapoint. Unclustered points are assigned
the label -1.
"""
return labelling_at_cut(self._linkage, cut_distance, min_cluster_size)
class MinimumSpanningTree(object):
def __init__(self, mst, data):
self._mst = mst
self._data = data
def plot(self, axis=None, node_size=40, node_color='k',
node_alpha=0.8, edge_alpha=0.5, edge_cmap='viridis_r',
edge_linewidth=2, vary_line_width=True, colorbar=True):
"""Plot the minimum spanning tree (as projected into 2D by t-SNE if required).
Parameters
----------
axis : matplotlib axis, optional
The axis to render the plot to
node_size : int, optional
The size of nodes in the plot (default 40).
node_color : matplotlib color spec, optional
The color to render nodes (default black).
node_alpha : float, optional
The alpha value (between 0 and 1) to render nodes with
(default 0.8).
edge_cmap : matplotlib colormap, optional
The colormap to color edges by (varying color by edge
weight/distance). Can be a cmap object or a string
recognised by matplotlib. (default `viridis_r`)
edge_alpha : float, optional
The alpha value (between 0 and 1) to render edges with
(default 0.5).
edge_linewidth : float, optional
The linewidth to use for rendering edges (default 2).
vary_line_width : bool, optional
Edge width is proportional to (log of) the inverse of the
mutual reachability distance. (default True)
colorbar : bool, optional
Whether to draw a colorbar. (default True)
Returns
-------
axis : matplotlib axis
The axis used the render the plot.
"""
try:
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
except ImportError:
raise ImportError('You must install the matplotlib library to plot the minimum spanning tree.')
if self._data.shape[0] > 32767:
warn('Too many data points for safe rendering of an minimal spanning tree!')
return None
if axis is None:
axis = plt.gca()
if self._data.shape[1] > 2:
# Get a 2D projection; if we have a lot of dimensions use PCA first
if self._data.shape[1] > 32:
# Use PCA to get down to 32 dimension
data_for_projection = PCA(n_components=32).fit_transform(self._data)
else:
data_for_projection = self._data.copy()
projection = TSNE().fit_transform(data_for_projection)
else:
projection = self._data.copy()
if vary_line_width:
line_width = edge_linewidth * (np.log(self._mst.T[2].max() / self._mst.T[2]) + 1.0)
else:
line_width = edge_linewidth
line_coords = projection[self._mst[:, :2].astype(int)]
line_collection = LineCollection(line_coords, linewidth=line_width,
cmap=edge_cmap, alpha=edge_alpha)
line_collection.set_array(self._mst[:, 2].T)
axis.add_artist(line_collection)
axis.scatter(projection.T[0], projection.T[1], c=node_color, alpha=node_alpha, s=node_size)
axis.set_xticks([])
axis.set_yticks([])
if colorbar:
cb = plt.colorbar(line_collection)
cb.ax.set_ylabel('Mutual reachability distance')
return axis
def to_numpy(self):
"""Return a numpy array of weighted edges in the minimum spanning tree
"""
return self._mst.copy()
def to_pandas(self):
"""Return a Pandas dataframe of the minimum spanning tree.
Each row is an edge in the tree; the columns are `from`,
`to`, and `distance` giving the two vertices of the edge
which are indices into the dataset, and the distance
between those datapoints.
"""
try:
from pandas import DataFrame
except ImportError:
raise ImportError('You must have pandas installed to export pandas DataFrames')
result = DataFrame({'from': self._mst.T[0].astype(int),
'to': self._mst.T[1].astype(int),
'distance': self._mst.T[2]})
return result
def to_networkx(self):
"""Return a NetworkX Graph object representing the minimum spanning tree.
Edge weights in the graph are the distance between the nodes they connect.
Nodes have a `data` attribute attached giving the data vector of the
associated point.
"""
try:
from networkx import Graph, set_node_attributes
except ImportError:
raise ImportError('You must have networkx installed to export networkx graphs')
result = Graph()
for row in self._mst:
result.add_edge(row[0], row[1], weight=row[2])
data_dict = {index: tuple(row) for index, row in enumerate(self._data)}
set_node_attributes(result, 'data', data_dict)
return result
| bsd-3-clause |
idekerlab/graph-services | services/ig_community/service/test/myservice.py | 1 | 1621 | import cxmate
import logging
import seaborn as sns
from Adapter import IgraphAdapter
from handlers import CommunityDetectionHandlers
logging.basicConfig(level=logging.DEBUG)
# Label for CXmate output
OUTPUT_LABEL = 'out_net'
# Community detection algorithm name
ALGORITHM_TYPE = 'type'
# Palette name
PALETTE_NAME = 'palette'
class IgCommunityDetectionService(cxmate.Service):
"""
CI service for detecting communities in the given network data
"""
def __init__(self):
self.__handlers = CommunityDetectionHandlers()
def process(self, params, input_stream):
logging.debug(params)
algorithm_type = params[ALGORITHM_TYPE]
del params[ALGORITHM_TYPE]
palette = params[PALETTE_NAME]
del params[PALETTE_NAME]
# Set color palette
sns.set_palette(palette=palette)
# Replace string None to Python None data type
for k, v in params.items():
if v == str(None):
params[k] = None
# Convert to igraph objects
ig_networks = IgraphAdapter.to_igraph(input_stream)
for net in ig_networks:
net['label'] = OUTPUT_LABEL
# Get the community detection function by name of the algorithm
handler = self.__handlers.get_handler(algorithm_type)
# Call the function to detect community
handler(net, **params)
return IgraphAdapter.from_igraph(ig_networks)
if __name__ == "__main__":
analyzer = IgCommunityDetectionService()
logging.info('Starting igraph community detection service...')
analyzer.run()
| mit |