repo_name
stringlengths 7
90
| path
stringlengths 4
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 762
838k
| license
stringclasses 15
values |
---|---|---|---|---|---|
koverholt/bayes-fire | Example_Cases/Evac_Stairs/Scripts/run_evac_flow_effective_width.py | 1 | 5235 | #!/usr/bin/env python
"""
PyMC Bayesian Inference on Evacuation Data
Stage 0: Plot evac data.
Stage 1: Fit evac data with linear model.
Stage 2: Fit evac data with power law model.
"""
import matplotlib
matplotlib.use("Agg")
import pylab as pl
import evac_flow_effective_width_graphics as graphics
import pymc as mc
import evac_flow_effective_width_models as models
import data_evac
# ============
# = Settings =
# ============
mcmc_iterations = 1000000
burn_iterations = 800000
thinning_parameter = 200
case_name = 'effective_width'
independent_var = data_evac.effective_width
# ============================
# = Stage 0 (plot exp. data) =
# ============================
# Plot evac data
pl.figure(figsize=(12,9))
graphics.plot_all_data()
pl.savefig('../Figures/Data_Models/flow_' + case_name + '_evac_data.pdf')
# ==========================
# = Stage 1 (linear model) =
# ==========================
# Generate model
vars1 = models.linear()
# Fit model with MAP estimates
map = mc.MAP(vars1)
map.fit(method='fmin_powell', verbose=2)
### Initilaize posterior predictive check ###
# Add a data posterior prediction deterministic
@mc.deterministic
def y_pred1(mu=vars1['y_mean'], sigma=vars1['sigma']):
return mc.rnormal(mu, sigma**-2)
vars1['y_pred1'] = y_pred1
### End initialize posterior predictive check ###
# Import model variables and set database options
m1 = mc.MCMC(vars1, db='sqlite', dbname='../Figures/Data_Models/flow_' + case_name + '_evac_linear.sqlite')
# Use adaptive Metropolis-Hastings step method
m1.use_step_method(mc.AdaptiveMetropolis, [m1.theta])
# Configure and run MCMC simulation
m1.sample(iter=mcmc_iterations, burn=burn_iterations, thin=thinning_parameter)
# Plot traces and model with mean values
pl.figure(figsize=(12,9))
graphics.plot_evac_data()
graphics.plot_linear_model(m1)
pl.savefig('../Figures/Data_Models/flow_' + case_name + '_evac_linear.pdf')
# Plot resulting distributions and convergence diagnostics
mc.Matplot.plot(m1, format='pdf', path='../Figures/Data_Models/flow_' + case_name + '_evac_linear',
common_scale=False)
### Posterior predictive check ###
# Plot residuals
pl.figure(figsize=(12,9))
y_err1 = m1.y_obs.value - m1.y_mean.stats()['mean']
pl.hlines([0], 0.6, 1.3, linewidth=3, linestyle='dashed')
pl.plot(independent_var, y_err1, 'gs', mew=0, ms=10)
graphics.decorate_plot()
pl.ylabel("Residual (observed - expected)", fontsize=24)
pl.axis([0.6, 1.3, -3, 3])
pl.savefig('../Figures/Data_Models/PPC/flow_' + case_name + '_evac_linear_residuals.pdf')
# Generate a posterior predictive check
pl.figure(figsize=(12,9))
graphics.plot_predicted_data(y_pred1)
pl.savefig('../Figures/Data_Models/PPC/flow_' + case_name + '_evac_linear_ppc.pdf')
### End posterior predictive check ###
# =============================
# = Stage 2 (power law model) =
# =============================
# Generate model
vars2 = models.power_law()
# Fit model with MAP estimates
map = mc.MAP(vars2)
map.fit(method='fmin_powell', verbose=2)
### Initilaize posterior predictive check ###
# Add a data posterior prediction deterministic
@mc.deterministic
def y_pred2(mu=vars2['y_mean'], sigma=vars2['sigma']):
return mc.rnormal(mu, sigma**-2)
vars2['y_pred2'] = y_pred2
### End initialize posterior predictive check ###
# Import model variables and set database options
m2 = mc.MCMC(vars2, db='sqlite', dbname='../Figures/Data_Models/flow_' + case_name + '_evac_power_law.sqlite')
# Use adaptive Metropolis-Hastings step method
m2.use_step_method(mc.AdaptiveMetropolis, [m2.theta])
# Configure and run MCMC simulation
m2.sample(iter=mcmc_iterations, burn=burn_iterations, thin=thinning_parameter)
# Plot traces and model with mean values
pl.figure(figsize=(12,9))
graphics.plot_evac_data()
graphics.plot_power_law_model(m2)
pl.savefig('../Figures/Data_Models/flow_' + case_name + '_evac_power_law.pdf')
# Plot resulting distributions and convergence diagnostics
mc.Matplot.plot(m2, format='pdf', path='../Figures/Data_Models/flow_' + case_name + '_evac_power_law',
common_scale=False)
### Posterior predictive check ###
# Plot residuals
pl.figure(figsize=(12,9))
y_err2 = m2.y_obs.value - m2.y_mean.stats()['mean']
pl.hlines([0], 0.6, 1.3, linewidth=3, linestyle='dashed')
pl.plot(independent_var, y_err2, 'gs', mew=0, ms=10)
graphics.decorate_plot()
pl.ylabel("Residual (observed - expected)", fontsize=24)
pl.axis([0.6, 1.3, -3, 3])
pl.savefig('../Figures/Data_Models/PPC/flow_' + case_name + '_evac_power_law_residuals.pdf')
# Generate a posterior predictive check
pl.figure(figsize=(12,9))
graphics.plot_predicted_data(y_pred2)
pl.savefig('../Figures/Data_Models/PPC/flow_' + case_name + '_evac_power_law_ppc.pdf')
### End posterior predictive check ###
# =================
# = Print results =
# =================
# Display results
print "Results for Linear Model"
m1.theta.summary()
print "Results for Power Law Model"
m2.theta.summary()
# Write results to file
m1.write_csv('../Figures/Data_Models/flow_' + case_name + '_evac_linear.csv')
m2.write_csv('../Figures/Data_Models/flow_' + case_name + '_evac_power_law.csv')
# Find DIC
print 'DIC (Linear Model) = %f' % m1.dic
print 'DIC (Power Law Model) = %f' % m2.dic
| bsd-3-clause |
khkaminska/scikit-learn | sklearn/linear_model/randomized_l1.py | 68 | 23405 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True,
ensure_min_samples=2)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
andyh616/mne-python | mne/time_frequency/tfr.py | 1 | 51516 | """A module which implements the time frequency estimation.
Morlet code inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
"""
# Authors : Alexandre Gramfort <[email protected]>
# Hari Bharadwaj <[email protected]>
#
# License : BSD (3-clause)
import warnings
from math import sqrt
from copy import deepcopy
import numpy as np
from scipy import linalg
from scipy.fftpack import fftn, ifftn
from ..fixes import partial
from ..baseline import rescale
from ..parallel import parallel_func
from ..utils import logger, verbose, _time_mask
from ..channels.channels import ContainsMixin, UpdateChannelsMixin
from ..io.pick import pick_info, pick_types
from ..io.meas_info import Info
from ..utils import check_fname
from .multitaper import dpss_windows
from ..viz.utils import figure_nobar
from ..externals.h5io import write_hdf5, read_hdf5
def _get_data(inst, return_itc):
"""Get data from Epochs or Evoked instance as epochs x ch x time"""
from ..epochs import _BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (_BaseEpochs, Evoked)):
raise TypeError('inst must be Epochs or Evoked')
if isinstance(inst, _BaseEpochs):
data = inst.get_data()
else:
if return_itc:
raise ValueError('return_itc must be False for evoked data')
data = inst.data[np.newaxis, ...].copy()
return data
def morlet(sfreq, freqs, n_cycles=7, sigma=None, zero_mean=False):
"""Compute Wavelets for the given frequency range
Parameters
----------
sfreq : float
Sampling Frequency
freqs : array
frequency range of interest (1 x Frequencies)
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
sigma : float, (optional)
It controls the width of the wavelet ie its temporal
resolution. If sigma is None the temporal resolution
is adapted with the frequency like for all wavelet transform.
The higher the frequency the shorter is the wavelet.
If sigma is fixed the temporal resolution is fixed
like for the short time Fourier transform and the number
of oscillations increases with the frequency.
zero_mean : bool
Make sure the wavelet is zero mean
Returns
-------
Ws : list of array
Wavelets time series
See Also
--------
mne.time_frequency.cwt_morlet : Compute time-frequency decomposition
with Morlet wavelets
"""
Ws = list()
n_cycles = np.atleast_1d(n_cycles)
if (n_cycles.size != 1) and (n_cycles.size != len(freqs)):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
# fixed or scale-dependent window
if sigma is None:
sigma_t = this_n_cycles / (2.0 * np.pi * f)
else:
sigma_t = this_n_cycles / (2.0 * np.pi * sigma)
# this scaling factor is proportional to (Tallon-Baudry 98):
# (sigma_t*sqrt(pi))^(-1/2);
t = np.arange(0., 5. * sigma_t, 1.0 / sfreq)
t = np.r_[-t[::-1], t[1:]]
oscillation = np.exp(2.0 * 1j * np.pi * f * t)
gaussian_enveloppe = np.exp(-t ** 2 / (2.0 * sigma_t ** 2))
if zero_mean: # to make it zero mean
real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2)
oscillation -= real_offset
W = oscillation * gaussian_enveloppe
W /= sqrt(0.5) * linalg.norm(W.ravel())
Ws.append(W)
return Ws
def _dpss_wavelet(sfreq, freqs, n_cycles=7, time_bandwidth=4.0,
zero_mean=False):
"""Compute Wavelets for the given frequency range
Parameters
----------
sfreq : float
Sampling Frequency.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
Defaults to 7.
time_bandwidth : float, (optional)
Time x Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1).
Default is 4.0, giving 3 good tapers.
Returns
-------
Ws : list of array
Wavelets time series
"""
Ws = list()
if time_bandwidth < 2.0:
raise ValueError("time_bandwidth should be >= 2.0 for good tapers")
n_taps = int(np.floor(time_bandwidth - 1))
n_cycles = np.atleast_1d(n_cycles)
if n_cycles.size != 1 and n_cycles.size != len(freqs):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for m in range(n_taps):
Wm = list()
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
t_win = this_n_cycles / float(f)
t = np.arange(0., t_win, 1.0 / sfreq)
# Making sure wavelets are centered before tapering
oscillation = np.exp(2.0 * 1j * np.pi * f * (t - t_win / 2.))
# Get dpss tapers
tapers, conc = dpss_windows(t.shape[0], time_bandwidth / 2.,
n_taps)
Wk = oscillation * tapers[m]
if zero_mean: # to make it zero mean
real_offset = Wk.mean()
Wk -= real_offset
Wk /= sqrt(0.5) * linalg.norm(Wk.ravel())
Wm.append(Wk)
Ws.append(Wm)
return Ws
def _centered(arr, newsize):
"""Aux Function to center data"""
# Return the center newsize portion of the array.
newsize = np.asarray(newsize)
currsize = np.array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _cwt_fft(X, Ws, mode="same"):
"""Compute cwt with fft based convolutions
Return a generator over signals.
"""
X = np.asarray(X)
# Precompute wavelets for given frequency range to save time
n_signals, n_times = X.shape
n_freqs = len(Ws)
Ws_max_size = max(W.size for W in Ws)
size = n_times + Ws_max_size - 1
# Always use 2**n-sized FFT
fsize = 2 ** int(np.ceil(np.log2(size)))
# precompute FFTs of Ws
fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
for i, W in enumerate(Ws):
if len(W) > n_times:
raise ValueError('Wavelet is too long for such a short signal. '
'Reduce the number of cycles.')
fft_Ws[i] = fftn(W, [fsize])
for k, x in enumerate(X):
if mode == "full":
tfr = np.zeros((n_freqs, fsize), dtype=np.complex128)
elif mode == "same" or mode == "valid":
tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
fft_x = fftn(x, [fsize])
for i, W in enumerate(Ws):
ret = ifftn(fft_x * fft_Ws[i])[:n_times + W.size - 1]
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
tfr[i, offset:(offset + sz)] = _centered(ret, sz)
else:
tfr[i, :] = _centered(ret, n_times)
yield tfr
def _cwt_convolve(X, Ws, mode='same'):
"""Compute time freq decomposition with temporal convolutions
Return a generator over signals.
"""
X = np.asarray(X)
n_signals, n_times = X.shape
n_freqs = len(Ws)
# Compute convolutions
for x in X:
tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
for i, W in enumerate(Ws):
ret = np.convolve(x, W, mode=mode)
if len(W) > len(x):
raise ValueError('Wavelet is too long for such a short '
'signal. Reduce the number of cycles.')
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
tfr[i, offset:(offset + sz)] = ret
else:
tfr[i] = ret
yield tfr
def cwt_morlet(X, sfreq, freqs, use_fft=True, n_cycles=7.0, zero_mean=False):
"""Compute time freq decomposition with Morlet wavelets
This function operates directly on numpy arrays. Consider using
`tfr_morlet` to process `Epochs` or `Evoked` instances.
Parameters
----------
X : array of shape [n_signals, n_times]
signals (one per line)
sfreq : float
sampling Frequency
freqs : array
Array of frequencies of interest
use_fft : bool
Compute convolution with FFT or temoral convolution.
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
zero_mean : bool
Make sure the wavelets are zero mean.
Returns
-------
tfr : 3D array
Time Frequency Decompositions (n_signals x n_frequencies x n_times)
See Also
--------
tfr.cwt : Compute time-frequency decomposition with user-provided wavelets
"""
mode = 'same'
# mode = "valid"
n_signals, n_times = X.shape
n_frequencies = len(freqs)
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
if use_fft:
coefs = _cwt_fft(X, Ws, mode)
else:
coefs = _cwt_convolve(X, Ws, mode)
tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr
return tfrs
def cwt(X, Ws, use_fft=True, mode='same', decim=1):
"""Compute time freq decomposition with continuous wavelet transform
Parameters
----------
X : array of shape [n_signals, n_times]
signals (one per line)
Ws : list of array
Wavelets time series
use_fft : bool
Use FFT for convolutions
mode : 'same' | 'valid' | 'full'
Convention for convolution
decim : int
Temporal decimation factor
Returns
-------
tfr : 3D array
Time Frequency Decompositions (n_signals x n_frequencies x n_times)
See Also
--------
mne.time_frequency.cwt_morlet : Compute time-frequency decomposition
with Morlet wavelets
"""
n_signals, n_times = X[:, ::decim].shape
n_frequencies = len(Ws)
if use_fft:
coefs = _cwt_fft(X, Ws, mode)
else:
coefs = _cwt_convolve(X, Ws, mode)
tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr[..., ::decim]
return tfrs
def _time_frequency(X, Ws, use_fft, decim):
"""Aux of time_frequency for parallel computing over channels
"""
n_epochs, n_times = X.shape
n_times = n_times // decim + bool(n_times % decim)
n_frequencies = len(Ws)
psd = np.zeros((n_frequencies, n_times)) # PSD
plf = np.zeros((n_frequencies, n_times), np.complex) # phase lock
mode = 'same'
if use_fft:
tfrs = _cwt_fft(X, Ws, mode)
else:
tfrs = _cwt_convolve(X, Ws, mode)
for tfr in tfrs:
tfr = tfr[:, ::decim]
tfr_abs = np.abs(tfr)
psd += tfr_abs ** 2
plf += tfr / tfr_abs
psd /= n_epochs
plf = np.abs(plf) / n_epochs
return psd, plf
@verbose
def single_trial_power(data, sfreq, frequencies, use_fft=True, n_cycles=7,
baseline=None, baseline_mode='ratio', times=None,
decim=1, n_jobs=1, zero_mean=False, verbose=None):
"""Compute time-frequency power on single epochs
Parameters
----------
data : array of shape [n_epochs, n_channels, n_times]
The epochs
sfreq : float
Sampling rate
frequencies : array-like
The frequencies
use_fft : bool
Use the FFT for convolutions or not.
n_cycles : float | array of float
Number of cycles in the Morlet wavelet. Fixed number
or one per frequency.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
baseline_mode : None | 'ratio' | 'zscore'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
times : array
Required to define baseline
decim : int
Temporal decimation factor
n_jobs : int
The number of epochs to process at the same time
zero_mean : bool
Make sure the wavelets are zero mean.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : 4D array
Power estimate (Epochs x Channels x Frequencies x Timepoints).
"""
mode = 'same'
n_frequencies = len(frequencies)
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
parallel, my_cwt, _ = parallel_func(cwt, n_jobs)
logger.info("Computing time-frequency power on single epochs...")
power = np.empty((n_epochs, n_channels, n_frequencies, n_times),
dtype=np.float)
# Package arguments for `cwt` here to minimize omissions where only one of
# the two calls below is updated with new function arguments.
cwt_kw = dict(Ws=Ws, use_fft=use_fft, mode=mode, decim=decim)
if n_jobs == 1:
for k, e in enumerate(data):
x = cwt(e, **cwt_kw)
power[k] = (x * x.conj()).real
else:
# Precompute tf decompositions in parallel
tfrs = parallel(my_cwt(e, **cwt_kw) for e in data)
for k, tfr in enumerate(tfrs):
power[k] = (tfr * tfr.conj()).real
# Run baseline correction. Be sure to decimate the times array as well if
# needed.
if times is not None:
times = times[::decim]
power = rescale(power, times, baseline, baseline_mode, copy=False)
return power
def _induced_power_cwt(data, sfreq, frequencies, use_fft=True, n_cycles=7,
decim=1, n_jobs=1, zero_mean=False):
"""Compute time induced power and inter-trial phase-locking factor
The time frequency decomposition is done with Morlet wavelets
Parameters
----------
data : array
3D array of shape [n_epochs, n_channels, n_times]
sfreq : float
sampling Frequency
frequencies : array
Array of frequencies of interest
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions.
n_cycles : float | array of float
Number of cycles. Fixed number or one per frequency.
decim: int
Temporal decimation factor
n_jobs : int
The number of CPUs used in parallel. All CPUs are used in -1.
Requires joblib package.
zero_mean : bool
Make sure the wavelets are zero mean.
Returns
-------
power : 2D array
Induced power (Channels x Frequencies x Timepoints).
Squared amplitude of time-frequency coefficients.
phase_lock : 2D array
Phase locking factor in [0, 1] (Channels x Frequencies x Timepoints)
"""
n_frequencies = len(frequencies)
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
psd = np.empty((n_channels, n_frequencies, n_times))
plf = np.empty((n_channels, n_frequencies, n_times))
# Separate to save memory for n_jobs=1
parallel, my_time_frequency, _ = parallel_func(_time_frequency, n_jobs)
psd_plf = parallel(my_time_frequency(data[:, c, :], Ws, use_fft, decim)
for c in range(n_channels))
for c, (psd_c, plf_c) in enumerate(psd_plf):
psd[c, :, :], plf[c, :, :] = psd_c, plf_c
return psd, plf
def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB):
"""Aux Function to prepare tfr computation"""
from ..viz.utils import _setup_vmin_vmax
if mode is not None and baseline is not None:
logger.info("Applying baseline correction '%s' during %s" %
(mode, baseline))
data = rescale(data.copy(), times, baseline, mode)
# crop time
itmin, itmax = None, None
idx = np.where(_time_mask(times, tmin, tmax))[0]
if tmin is not None:
itmin = idx[0]
if tmax is not None:
itmax = idx[-1] + 1
times = times[itmin:itmax]
# crop freqs
ifmin, ifmax = None, None
idx = np.where(_time_mask(freqs, fmin, fmax))[0]
if fmin is not None:
ifmin = idx[0]
if fmax is not None:
ifmax = idx[-1] + 1
freqs = freqs[ifmin:ifmax]
# crop data
data = data[:, ifmin:ifmax, itmin:itmax]
times *= 1e3
if dB:
data = 10 * np.log10((data * data.conj()).real)
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
return data, times, freqs, vmin, vmax
class AverageTFR(ContainsMixin, UpdateChannelsMixin):
"""Container for Time-Frequency data
Can for example store induced power at sensor level or intertrial
coherence.
Parameters
----------
info : Info
The measurement info.
data : ndarray, shape (n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
nave : int
The number of averaged TFRs.
comment : str | None
Comment on the data, e.g., the experimental condition.
Defaults to None.
method : str | None
Comment on the method used to compute the data, e.g., morlet wavelet.
Defaults to None.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
ch_names : list
The names of the channels.
"""
@verbose
def __init__(self, info, data, times, freqs, nave, comment=None,
method=None, verbose=None):
self.info = info
if data.ndim != 3:
raise ValueError('data should be 3d. Got %d.' % data.ndim)
n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
self.data = data
self.times = times
self.freqs = freqs
self.nave = nave
self.comment = comment
self.method = method
@property
def ch_names(self):
return self.info['ch_names']
def crop(self, tmin=None, tmax=None, copy=False):
"""Crop data to a given time interval
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
copy : bool
If False epochs is cropped in place.
"""
inst = self if not copy else self.copy()
mask = _time_mask(inst.times, tmin, tmax)
inst.times = inst.times[mask]
inst.data = inst.data[..., mask]
return inst
@verbose
def plot(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
cmap='RdBu_r', dB=False, colorbar=True, show=True,
title=None, axes=None, layout=None, verbose=None):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot. For user defined axes,
the colorbar cannot be drawn. Defaults to True.
show : bool
Call pyplot.show() at the end.
title : str | None
String for title. Defaults to None (blank/no title).
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channels. If instance of Axes,
there must be only one channel plotted.
layout : Layout | None
Layout instance specifying sensor positions. Used for interactive
plotting of topographies on rectangle selection. If possible, the
correct layout is inferred from the data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz.topo import _imshow_tfr
import matplotlib.pyplot as plt
times, freqs = self.times.copy(), self.freqs.copy()
data = self.data[picks]
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB)
tmin, tmax = times[0], times[-1]
if isinstance(axes, plt.Axes):
axes = [axes]
if isinstance(axes, list) or isinstance(axes, np.ndarray):
if len(axes) != len(picks):
raise RuntimeError('There must be an axes for each picked '
'channel.')
for idx in range(len(data)):
if axes is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = axes[idx]
fig = ax.get_figure()
onselect_callback = partial(self._onselect, baseline=baseline,
mode=mode, layout=layout)
_imshow_tfr(ax, 0, tmin, tmax, vmin, vmax, onselect_callback,
ylim=None, tfr=data[idx: idx + 1], freq=freqs,
x_label='Time (ms)', y_label='Frequency (Hz)',
colorbar=colorbar, picker=False, cmap=cmap)
if title:
fig.suptitle(title)
colorbar = False # only one colorbar for multiple axes
if show:
plt.show()
return fig
def _onselect(self, eclick, erelease, baseline, mode, layout):
"""Callback function called by rubber band selector in channel tfr."""
import matplotlib.pyplot as plt
from ..viz import plot_tfr_topomap
if abs(eclick.x - erelease.x) < .1 or abs(eclick.y - erelease.y) < .1:
return
plt.ion() # turn interactive mode on
tmin = round(min(eclick.xdata, erelease.xdata) / 1000., 5) # ms to s
tmax = round(max(eclick.xdata, erelease.xdata) / 1000., 5)
fmin = round(min(eclick.ydata, erelease.ydata), 5) # Hz
fmax = round(max(eclick.ydata, erelease.ydata), 5)
tmin = min(self.times, key=lambda x: abs(x - tmin)) # find closest
tmax = min(self.times, key=lambda x: abs(x - tmax))
fmin = min(self.freqs, key=lambda x: abs(x - fmin))
fmax = min(self.freqs, key=lambda x: abs(x - fmax))
if tmin == tmax or fmin == fmax:
logger.info('The selected area is too small. '
'Select a larger time-frequency window.')
return
types = list()
if 'eeg' in self:
types.append('eeg')
if 'mag' in self:
types.append('mag')
if 'grad' in self:
types.append('grad')
fig = figure_nobar()
fig.suptitle('{:.2f} s - {:.2f} s, {:.2f} Hz - {:.2f} Hz'.format(tmin,
tmax,
fmin,
fmax),
y=0.04)
for idx, ch_type in enumerate(types):
ax = plt.subplot(1, len(types), idx + 1)
plot_tfr_topomap(self, ch_type=ch_type, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, layout=layout,
baseline=baseline, mode=mode, cmap=None,
title=ch_type, vmin=None, vmax=None,
axes=ax)
def plot_topo(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
layout=None, cmap='RdBu_r', title=None, dB=False,
colorbar=True, layout_scale=0.945, show=True,
border='none', fig_facecolor='k', font_color='w'):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot. If None all available
channels are displayed.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
layout : Layout | None
Layout instance specifying sensor positions. If possible, the
correct layout is inferred from the data.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
title : str
Title of the figure.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
show : bool
Call pyplot.show() at the end.
border : str
matplotlib borders style to be used for each sensor plot.
fig_facecolor : str | obj
The figure face color. Defaults to black.
font_color: str | obj
The color of tick labels in the colorbar. Defaults to white.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz.topo import _imshow_tfr, _plot_topo
import matplotlib.pyplot as plt
times = self.times.copy()
freqs = self.freqs
data = self.data
info = self.info
if picks is not None:
data = data[picks]
info = pick_info(info, picks)
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax,
mode, baseline, vmin, vmax, dB)
if layout is None:
from mne import find_layout
layout = find_layout(self.info)
onselect_callback = partial(self._onselect, baseline=baseline,
mode=mode, layout=layout)
imshow = partial(_imshow_tfr, tfr=data, freq=freqs, cmap=cmap,
onselect=onselect_callback)
fig = _plot_topo(info=info, times=times, show_func=imshow,
layout=layout, colorbar=colorbar, vmin=vmin,
vmax=vmax, cmap=cmap, layout_scale=layout_scale,
title=title, border=border, x_label='Time (ms)',
y_label='Frequency (Hz)', fig_facecolor=fig_facecolor,
font_color=font_color)
if show:
plt.show()
return fig
def _check_compat(self, tfr):
"""checks that self and tfr have the same time-frequency ranges"""
assert np.all(tfr.times == self.times)
assert np.all(tfr.freqs == self.freqs)
def __add__(self, tfr):
self._check_compat(tfr)
out = self.copy()
out.data += tfr.data
return out
def __iadd__(self, tfr):
self._check_compat(tfr)
self.data += tfr.data
return self
def __sub__(self, tfr):
self._check_compat(tfr)
out = self.copy()
out.data -= tfr.data
return out
def __isub__(self, tfr):
self._check_compat(tfr)
self.data -= tfr.data
return self
def copy(self):
"""Return a copy of the instance."""
return deepcopy(self)
def __repr__(self):
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", nave : %d" % self.nave
s += ', channels : %d' % self.data.shape[0]
return "<AverageTFR | %s>" % s
def apply_baseline(self, baseline, mode='mean'):
"""Baseline correct the data
Parameters
----------
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
"""
self.data = rescale(self.data, self.times, baseline, mode, copy=False)
def plot_topomap(self, tmin=None, tmax=None, fmin=None, fmax=None,
ch_type=None, baseline=None, mode='mean',
layout=None, vmin=None, vmax=None, cmap=None,
sensors=True, colorbar=True, unit=None, res=64, size=2,
cbar_fmt='%1.1e', show_names=False, title=None,
axes=None, show=True, outlines='head', head_pos=None):
"""Plot topographic maps of time-frequency intervals of TFR data
Parameters
----------
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
vmin : float | callable | None
The value specifying the lower bound of the color range. If None,
and vmax is None, -vmax is used. Else np.min(data) or in case
data contains only positive values 0. If callable, the output
equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range. If None,
the maximum value is used. If callable, the output equals
vmax(data). Defaults to None.
cmap : matplotlib colormap | None
Colormap. If None and the plotted data is all positive, defaults to
'Reds'. If None and data contains also negative values, defaults to
'RdBu_r'. Defaults to None.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
axes : instance of Axes | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Call pyplot.show() at the end.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz import plot_tfr_topomap
return plot_tfr_topomap(self, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, ch_type=ch_type, baseline=baseline,
mode=mode, layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, sensors=sensors, colorbar=colorbar,
unit=unit, res=res, size=size,
cbar_fmt=cbar_fmt, show_names=show_names,
title=title, axes=axes, show=show,
outlines=outlines, head_pos=head_pos)
def save(self, fname, overwrite=False):
"""Save TFR object to hdf5 file
Parameters
----------
fname : str
The file name, which should end with -tfr.h5 .
overwrite : bool
If True, overwrite file (if it exists). Defaults to false
"""
write_tfrs(fname, self, overwrite=overwrite)
def _prepare_write_tfr(tfr, condition):
"""Aux function"""
return (condition, dict(times=tfr.times, freqs=tfr.freqs,
data=tfr.data, info=tfr.info, nave=tfr.nave,
comment=tfr.comment, method=tfr.method))
def write_tfrs(fname, tfr, overwrite=False):
"""Write a TFR dataset to hdf5.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5
tfr : AverageTFR instance, or list of AverageTFR instances
The TFR dataset, or list of TFR datasets, to save in one file.
Note. If .comment is not None, a name will be generated on the fly,
based on the order in which the TFR objects are passed
overwrite : bool
If True, overwrite file (if it exists). Defaults to False.
See Also
--------
read_tfrs
Notes
-----
.. versionadded:: 0.9.0
"""
out = []
if not isinstance(tfr, (list, tuple)):
tfr = [tfr]
for ii, tfr_ in enumerate(tfr):
comment = ii if tfr_.comment is None else tfr_.comment
out.append(_prepare_write_tfr(tfr_, condition=comment))
write_hdf5(fname, out, overwrite=overwrite, title='mnepython')
def read_tfrs(fname, condition=None):
"""
Read TFR datasets from hdf5 file.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5 .
condition : int or str | list of int or str | None
The condition to load. If None, all conditions will be returned.
Defaults to None.
See Also
--------
write_tfrs
Returns
-------
tfrs : list of instances of AverageTFR | instance of AverageTFR
Depending on `condition` either the TFR object or a list of multiple
TFR objects.
Notes
-----
.. versionadded:: 0.9.0
"""
check_fname(fname, 'tfr', ('-tfr.h5',))
logger.info('Reading %s ...' % fname)
tfr_data = read_hdf5(fname, title='mnepython')
for k, tfr in tfr_data:
tfr['info'] = Info(tfr['info'])
if condition is not None:
tfr_dict = dict(tfr_data)
if condition not in tfr_dict:
keys = ['%s' % k for k in tfr_dict]
raise ValueError('Cannot find condition ("{0}") in this file. '
'I can give you "{1}""'
.format(condition, " or ".join(keys)))
out = AverageTFR(**tfr_dict[condition])
else:
out = [AverageTFR(**d) for d in list(zip(*tfr_data))[1]]
return out
def tfr_morlet(inst, freqs, n_cycles, use_fft=False,
return_itc=True, decim=1, n_jobs=1):
"""Compute Time-Frequency Representation (TFR) using Morlet wavelets
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
use_fft : bool
The fft based convolution or not.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
Must be ``False`` for evoked data.
decim : int
The decimation factor on the time axis. To reduce memory usage.
n_jobs : int
The number of jobs to run in parallel.
Returns
-------
power : instance of AverageTFR
The averaged power.
itc : instance of AverageTFR
The intertrial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
tfr_multitaper, tfr_stockwell
"""
data = _get_data(inst, return_itc)
picks = pick_types(inst.info, meg=True, eeg=True)
info = pick_info(inst.info, picks)
data = data[:, picks, :]
power, itc = _induced_power_cwt(data, sfreq=info['sfreq'],
frequencies=freqs,
n_cycles=n_cycles, n_jobs=n_jobs,
use_fft=use_fft, decim=decim,
zero_mean=True)
times = inst.times[::decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave, method='morlet-power')
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='morlet-itc'))
return out
@verbose
def _induced_power_mtm(data, sfreq, frequencies, time_bandwidth=4.0,
use_fft=True, n_cycles=7, decim=1, n_jobs=1,
zero_mean=True, verbose=None):
"""Compute time induced power and inter-trial phase-locking factor
The time frequency decomposition is done with DPSS wavelets
Parameters
----------
data : np.ndarray, shape (n_epochs, n_channels, n_times)
The input data.
sfreq : float
sampling Frequency
frequencies : np.ndarray, shape (n_frequencies,)
Array of frequencies of interest
time_bandwidth : float
Time x (Full) Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1). Default is 4.0 (3 tapers).
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions. Defaults to True.
n_cycles : float | np.ndarray shape (n_frequencies,)
Number of cycles. Fixed number or one per frequency. Defaults to 7.
decim: int
Temporal decimation factor. Defaults to 1.
n_jobs : int
The number of CPUs used in parallel. All CPUs are used in -1.
Requires joblib package. Defaults to 1.
zero_mean : bool
Make sure the wavelets are zero mean. Defaults to True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : np.ndarray, shape (n_channels, n_frequencies, n_times)
Induced power. Squared amplitude of time-frequency coefficients.
itc : np.ndarray, shape (n_channels, n_frequencies, n_times)
Phase locking value.
"""
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
logger.info('Data is %d trials and %d channels', n_epochs, n_channels)
n_frequencies = len(frequencies)
logger.info('Multitaper time-frequency analysis for %d frequencies',
n_frequencies)
# Precompute wavelets for given frequency range to save time
Ws = _dpss_wavelet(sfreq, frequencies, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, zero_mean=zero_mean)
n_taps = len(Ws)
logger.info('Using %d tapers', n_taps)
n_times_wavelets = Ws[0][0].shape[0]
if n_times <= n_times_wavelets:
warnings.warn("Time windows are as long or longer than the epoch. "
"Consider reducing n_cycles.")
psd = np.zeros((n_channels, n_frequencies, n_times))
itc = np.zeros((n_channels, n_frequencies, n_times))
parallel, my_time_frequency, _ = parallel_func(_time_frequency,
n_jobs)
for m in range(n_taps):
psd_itc = parallel(my_time_frequency(data[:, c, :],
Ws[m], use_fft, decim)
for c in range(n_channels))
for c, (psd_c, itc_c) in enumerate(psd_itc):
psd[c, :, :] += psd_c
itc[c, :, :] += itc_c
psd /= n_taps
itc /= n_taps
return psd, itc
def tfr_multitaper(inst, freqs, n_cycles, time_bandwidth=4.0, use_fft=True,
return_itc=True, decim=1, n_jobs=1):
"""Compute Time-Frequency Representation (TFR) using DPSS wavelets
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
The time-window length is thus T = n_cycles / freq.
time_bandwidth : float, (optional)
Time x (Full) Bandwidth product. Should be >= 2.0.
Choose this along with n_cycles to get desired frequency resolution.
The number of good tapers (least leakage from far away frequencies)
is chosen automatically based on this to floor(time_bandwidth - 1).
Default is 4.0 (3 good tapers).
E.g., With freq = 20 Hz and n_cycles = 10, we get time = 0.5 s.
If time_bandwidth = 4., then frequency smoothing is (4 / time) = 8 Hz.
use_fft : bool
The fft based convolution or not.
Defaults to True.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
Defaults to True.
decim : int
The decimation factor on the time axis. To reduce memory usage.
Note than this is brute force decimation, no anti-aliasing is done.
Defaults to 1.
n_jobs : int
The number of jobs to run in parallel. Defaults to 1.
Returns
-------
power : AverageTFR
The averaged power.
itc : AverageTFR
The intertrial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
tfr_multitaper, tfr_stockwell
Notes
-----
.. versionadded:: 0.9.0
"""
data = _get_data(inst, return_itc)
picks = pick_types(inst.info, meg=True, eeg=True)
info = pick_info(inst.info, picks)
data = data[:, picks, :]
power, itc = _induced_power_mtm(data, sfreq=info['sfreq'],
frequencies=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth,
use_fft=use_fft, decim=decim,
n_jobs=n_jobs, zero_mean=True,
verbose='INFO')
times = inst.times[::decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave,
method='mutlitaper-power')
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='mutlitaper-itc'))
return out
| bsd-3-clause |
sanketloke/scikit-learn | sklearn/neighbors/tests/test_ball_tree.py | 159 | 10196 | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
| bsd-3-clause |
SoluMilken/xgboostwithwarmstart | xgboostwithwarmstart/xgboost_with_warm_start.py | 1 | 15366 | # coding: utf-8
# pylint: disable=too-many-arguments, too-many-locals, invalid-name, fixme, E0012, R0912
"""Scikit-Learn Wrapper interface for XGBoost."""
from __future__ import absolute_import
import numpy as np
from xgboost import XGBRegressor
from xgboost.core import Booster, DMatrix, XGBoostError
from xgboost.training import train
# Do not use class names on scikit-learn directly.
# Re-define the classes on .compat to guarantee the behavior without scikit-learn
from xgboost.compat import (SKLEARN_INSTALLED, XGBModelBase,
XGBClassifierBase, XGBRegressorBase,
XGBLabelEncoder)
from xgboost.sklearn import _objective_decorator, XGBModel
class XGBRegressorWithWarmStart(XGBRegressor):
def __init__(self, max_depth=3, learning_rate=0.1, n_estimators=100,
silent=True, objective="reg:linear",
nthread=-1, gamma=0, min_child_weight=1, max_delta_step=0,
subsample=1, colsample_bytree=1, colsample_bylevel=1,
reg_alpha=0, reg_lambda=1, scale_pos_weight=1,
base_score=0.5, seed=0, missing=None, warm_start=False):
super(XGBRegressorWithWarmStart, self).__init__(
max_depth, learning_rate, n_estimators,
silent, objective,
nthread, gamma, min_child_weight, max_delta_step,
subsample, colsample_bytree, colsample_bylevel,
reg_alpha, reg_lambda, scale_pos_weight,
base_score, seed, missing)
self.warm_start = warm_start
self.n_trained_estimators = 0
def fit(self, X, y, sample_weight=None, eval_set=None, eval_metric=None,
early_stopping_rounds=None, verbose=True):
# pylint: disable=missing-docstring,invalid-name,attribute-defined-outside-init
"""
Fit the gradient boosting model
Parameters
----------
X : array_like
Feature matrix
y : array_like
Labels
sample_weight : array_like
Weight for each instance
eval_set : list, optional
A list of (X, y) tuple pairs to use as a validation set for
early-stopping
eval_metric : str, callable, optional
If a str, should be a built-in evaluation metric to use. See
doc/parameter.md. If callable, a custom evaluation metric. The call
signature is func(y_predicted, y_true) where y_true will be a
DMatrix object such that you may need to call the get_label
method. It must return a str, value pair where the str is a name
for the evaluation and value is the value of the evaluation
function. This objective is always minimized.
early_stopping_rounds : int
Activates early stopping. Validation error needs to decrease at
least every <early_stopping_rounds> round(s) to continue training.
Requires at least one item in evals. If there's more than one,
will use the last. Returns the model from the last iteration
(not the best one). If early stopping occurs, the model will
have three additional fields: bst.best_score, bst.best_iteration
and bst.best_ntree_limit.
(Use bst.best_ntree_limit to get the correct value if num_parallel_tree
and/or num_class appears in the parameters)
verbose : bool
If `verbose` and an evaluation set is used, writes the evaluation
metric measured on the validation set to stderr.
"""
if sample_weight is not None:
trainDmatrix = DMatrix(X, label=y, weight=sample_weight, missing=self.missing)
else:
trainDmatrix = DMatrix(X, label=y, missing=self.missing)
evals_result = {}
if eval_set is not None:
evals = list(DMatrix(x[0], label=x[1], missing=self.missing) for x in eval_set)
evals = list(zip(evals, ["validation_{}".format(i) for i in
range(len(evals))]))
else:
evals = ()
params = self.get_xgb_params()
if callable(self.objective):
obj = _objective_decorator(self.objective)
params["objective"] = "reg:linear"
else:
obj = None
feval = eval_metric if callable(eval_metric) else None
if eval_metric is not None:
if callable(eval_metric):
eval_metric = None
else:
params.update({'eval_metric': eval_metric})
if self.warm_start:
n_estimators = self.n_estimators - self.n_trained_estimators
self._Booster = train(params, trainDmatrix,
n_estimators, evals=evals,
early_stopping_rounds=early_stopping_rounds,
evals_result=evals_result, obj=obj, feval=feval,
verbose_eval=verbose, xgb_model=self._Booster)
else:
self._Booster = train(params, trainDmatrix,
self.n_estimators, evals=evals,
early_stopping_rounds=early_stopping_rounds,
evals_result=evals_result, obj=obj, feval=feval,
verbose_eval=verbose)
self.n_trained_estimators = self.n_estimators
if evals_result:
for val in evals_result.items():
evals_result_key = list(val[1].keys())[0]
evals_result[val[0]][evals_result_key] = val[1][evals_result_key]
self.evals_result_ = evals_result
if early_stopping_rounds is not None:
self.best_score = self._Booster.best_score
self.best_iteration = self._Booster.best_iteration
self.best_ntree_limit = self._Booster.best_ntree_limit
return self
@property
def feature_importances_(self):
"""
Returns
-------
feature_importances_ : array of shape = [n_features]
"""
b = self.booster()
fs = b.get_fscore()
all_features = [fs.get(f, 0.) for f in b.feature_names]
all_features = np.array(all_features, dtype=np.float32)
return all_features / all_features.sum()
class XGBClassifierWithWarmStart(XGBModel, XGBClassifierBase):
# pylint: disable=missing-docstring,too-many-arguments,invalid-name
__doc__ = """Implementation of the scikit-learn API for XGBoost classification.
""" + '\n'.join(XGBModel.__doc__.split('\n')[2:])
def __init__(self, max_depth=3, learning_rate=0.1,
n_estimators=100, silent=True,
objective="binary:logistic",
nthread=-1, gamma=0, min_child_weight=1,
max_delta_step=0, subsample=1, colsample_bytree=1, colsample_bylevel=1,
reg_alpha=0, reg_lambda=1, scale_pos_weight=1,
base_score=0.5, seed=0, missing=None, warm_start=False):
super(XGBClassifierWithWarmStart, self).__init__(
max_depth, learning_rate, n_estimators, silent, objective,
nthread, gamma, min_child_weight, max_delta_step, subsample,
colsample_bytree, colsample_bylevel, reg_alpha, reg_lambda,
scale_pos_weight, base_score, seed, missing)
self.warm_start = warm_start
self.n_trained_estimators = 0
def fit(self, X, y, sample_weight=None, eval_set=None, eval_metric=None,
early_stopping_rounds=None, verbose=True):
# pylint: disable = attribute-defined-outside-init,arguments-differ
"""
Fit gradient boosting classifier
Parameters
----------
X : array_like
Feature matrix
y : array_like
Labels
sample_weight : array_like
Weight for each instance
eval_set : list, optional
A list of (X, y) pairs to use as a validation set for
early-stopping
eval_metric : str, callable, optional
If a str, should be a built-in evaluation metric to use. See
doc/parameter.md. If callable, a custom evaluation metric. The call
signature is func(y_predicted, y_true) where y_true will be a
DMatrix object such that you may need to call the get_label
method. It must return a str, value pair where the str is a name
for the evaluation and value is the value of the evaluation
function. This objective is always minimized.
early_stopping_rounds : int, optional
Activates early stopping. Validation error needs to decrease at
least every <early_stopping_rounds> round(s) to continue training.
Requires at least one item in evals. If there's more than one,
will use the last. Returns the model from the last iteration
(not the best one). If early stopping occurs, the model will
have three additional fields: bst.best_score, bst.best_iteration
and bst.best_ntree_limit.
(Use bst.best_ntree_limit to get the correct value if num_parallel_tree
and/or num_class appears in the parameters)
verbose : bool
If `verbose` and an evaluation set is used, writes the evaluation
metric measured on the validation set to stderr.
"""
evals_result = {}
self.classes_ = np.unique(y)
self.n_classes_ = len(self.classes_)
params = self.get_xgb_params()
if callable(self.objective):
obj = _objective_decorator(self.objective)
# Use default value. Is it really not used ?
params["objective"] = "binary:logistic"
else:
obj = None
if self.n_classes_ > 2:
# Switch to using a multiclass objective in the underlying XGB instance
params["objective"] = "multi:softprob"
params['num_class'] = self.n_classes_
feval = eval_metric if callable(eval_metric) else None
if eval_metric is not None:
if callable(eval_metric):
eval_metric = None
else:
params.update({"eval_metric": eval_metric})
self._le = XGBLabelEncoder().fit(y)
training_labels = self._le.transform(y)
if eval_set is not None:
# TODO: use sample_weight if given?
evals = list(
DMatrix(x[0], label=self._le.transform(x[1]), missing=self.missing)
for x in eval_set
)
nevals = len(evals)
eval_names = ["validation_{}".format(i) for i in range(nevals)]
evals = list(zip(evals, eval_names))
else:
evals = ()
self._features_count = X.shape[1]
if sample_weight is not None:
trainDmatrix = DMatrix(X, label=training_labels, weight=sample_weight,
missing=self.missing)
else:
trainDmatrix = DMatrix(X, label=training_labels, missing=self.missing)
if self.warm_start:
n_estimators = self.n_estimators - self.n_trained_estimators
self._Booster = train(params, trainDmatrix,
n_estimators, evals=evals,
early_stopping_rounds=early_stopping_rounds,
evals_result=evals_result, obj=obj, feval=feval,
verbose_eval=verbose, xgb_model=self._Booster)
else:
self._Booster = train(params, trainDmatrix,
self.n_estimators, evals=evals,
early_stopping_rounds=early_stopping_rounds,
evals_result=evals_result, obj=obj, feval=feval,
verbose_eval=verbose)
self.n_trained_estimators = self.n_estimators
self.objective = params["objective"]
if evals_result:
for val in evals_result.items():
evals_result_key = list(val[1].keys())[0]
evals_result[val[0]][evals_result_key] = val[1][evals_result_key]
self.evals_result_ = evals_result
if early_stopping_rounds is not None:
self.best_score = self._Booster.best_score
self.best_iteration = self._Booster.best_iteration
self.best_ntree_limit = self._Booster.best_ntree_limit
return self
def predict(self, data, output_margin=False, ntree_limit=0):
test_dmatrix = DMatrix(data, missing=self.missing)
class_probs = self.booster().predict(test_dmatrix,
output_margin=output_margin,
ntree_limit=ntree_limit)
if len(class_probs.shape) > 1:
column_indexes = np.argmax(class_probs, axis=1)
else:
column_indexes = np.repeat(0, class_probs.shape[0])
column_indexes[class_probs > 0.5] = 1
return self._le.inverse_transform(column_indexes)
def predict_proba(self, data, output_margin=False, ntree_limit=0):
test_dmatrix = DMatrix(data, missing=self.missing)
class_probs = self.booster().predict(test_dmatrix,
output_margin=output_margin,
ntree_limit=ntree_limit)
if self.objective == "multi:softprob":
return class_probs
else:
classone_probs = class_probs
classzero_probs = 1.0 - classone_probs
return np.vstack((classzero_probs, classone_probs)).transpose()
def evals_result(self):
"""Return the evaluation results.
If eval_set is passed to the `fit` function, you can call evals_result() to
get evaluation results for all passed eval_sets. When eval_metric is also
passed to the `fit` function, the evals_result will contain the eval_metrics
passed to the `fit` function
Returns
-------
evals_result : dictionary
Example
-------
param_dist = {'objective':'binary:logistic', 'n_estimators':2}
clf = xgb.XGBClassifier(**param_dist)
clf.fit(X_train, y_train,
eval_set=[(X_train, y_train), (X_test, y_test)],
eval_metric='logloss',
verbose=True)
evals_result = clf.evals_result()
The variable evals_result will contain:
{'validation_0': {'logloss': ['0.604835', '0.531479']},
'validation_1': {'logloss': ['0.41965', '0.17686']}}
"""
if self.evals_result_:
evals_result = self.evals_result_
else:
raise XGBoostError('No results.')
return evals_result
@property
def feature_importances_(self):
"""
Returns
-------
feature_importances_ : array of shape = [n_features]
"""
b = self.booster()
fs = b.get_fscore()
all_features = [fs.get(f, 0.) for f in b.feature_names]
all_features = np.array(all_features, dtype=np.float32)
return all_features / all_features.sum()
| bsd-2-clause |
ArteliaTelemac/PostTelemac | PostTelemac/meshlayerrenderer/post_telemac_opengl_get_qimage_qt5.py | 1 | 78234 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
PostTelemac
A QGIS plugin
Post Traitment or Telemac
-------------------
begin : 2015-07-07
git sha : $Format:%H$
copyright : (C) 2015 by Artelia
email : [email protected]
***************************************************************************/
***************************************************************************/
get Image class
Generate a Qimage from selafin file to be displayed in map canvas
with tht draw method of posttelemacpluginlayer
Versions :
0.0 : debut
***************************************************************************/
"""
# import qgis
import qgis.core
# import PyQT
# import matplotlib
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
# from matplotlib import tri
# from matplotlib.backends.backend_agg import FigureCanvasAgg
# import numpy
import numpy as np
# other imports
from time import ctime
# import cStringIO
import gc
import time
from OpenGL.GL import *
from OpenGL.GL import shaders
# from PyQt4 import QtGui, QtCore
from qgis.PyQt import QtGui, QtCore
try:
from qgis.PyQt.QtGui import QApplication
except:
from qgis.PyQt.QtWidgets import QApplication
try:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtOpenGL import QGLPixelBuffer, QGLFormat, QGLContext
except:
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtOpenGL import QGLFormat, QGLContext
import numpy
from math import log, ceil, exp
# from utilities import complete_filename, format_
from .post_telemac_pluginlayer_colormanager import *
from .post_telemac_abstract_get_qimage import *
PRECISION = 0.01
def roundUpSize(size):
"""return size roudup to the nearest power of 2"""
if False:
return QSize(pow(2, ceil(log(size.width()) / log(2))), pow(2, ceil(log(size.height()) / log(2))))
else:
return size
class MeshRenderer(AbstractMeshRenderer):
# __imageChangeRequested = QtCore.pyqtSignal(qgis.core.QgsRenderContext)
__imageChangeRequested = QtCore.pyqtSignal()
RENDERER_TYPE = "OpenGL"
def __init__(self, meshlayer, integertemp, vtx=[[0.0, 0.0, 0.0]], idx=[0]):
AbstractMeshRenderer.__init__(self, meshlayer, integertemp, vtx=[[0.0, 0.0, 0.0]], idx=[0])
# self.fig = plt.figure(int)
# self.canvas = FigureCanvasAgg(self.fig)
# self.meshlayer = meshlayer
# self.ax = self.fig.add_subplot(111)
# Reprojected things
# self.meshxreprojected, self.meshyreprojected = None, None
self.goodpointindex = None
self.arraypoints = None
# Opengl
self.__vtxfacetodraw = numpy.require(vtx, numpy.float32, "F")
self.__idxfacetodraw = numpy.require(idx, numpy.int32, "F")
self.__vtxcanvas = numpy.require(vtx, numpy.float32, "F")
self.__idxcanvas = numpy.require(idx, numpy.int32, "F")
self.__vtxfacetotal = numpy.require(vtx, numpy.float32, "F")
self.__idxfacetotal = numpy.require(idx, numpy.int32, "F")
self.__pixBuf = None
# self.__legend = legend
# self.__legend.symbologyChanged.connect(self.__recompileNeeded)
self.__colorPerElement = False
self.__recompileShader = False
self.__vtxfacetotal[:, 2] = 0
self.meshlayer = meshlayer
self.__imageChangedMutex = QtCore.QMutex()
self.__rendererContext = None
self.__size = None
self.__img = None
self.__imageChangeRequested.connect(self.__drawInMainThread)
self.__pixelColor = ""
self.__pixelColorVelocity = ""
self.__graduation = []
self.__graduationvelocity = []
self.timemax = 1000
self.timestart = None
# ************************************************************************************
# *************************************** Display behaviour******************************
# ************************************************************************************
def CrsChanged(self):
# ikle = self.meshlayer.hydrauparser.getIkle()
# mesh = self.meshlayer.hydrauparser.getElemFaces()
# nodecoords = np.array( [[self.meshxreprojected[i], self.meshyreprojected[i], 0.0] for i in range(len(self.meshxreprojected)) ] )
# nodecoords = np.array( [[self.facenodereprojected[0][i], self.facenodereprojected[1][i], 0.0] for i in range(len(self.facenodereprojected[0])) ] )
# reset : facenode elemnode
self.resetFaceNodeCoord()
# self.resetIdx()
self.resetMesh()
def resetFaceNodeCoord(self, vtx=None):
# __vtx
if False:
if vtx != None:
self.__vtxfacetotal = numpy.require(vtx, numpy.float32, "F")
else:
# self.__vtxfacetotal = np.array( [[self.meshxreprojected[i], self.meshyreprojected[i], 0.0] for i in range(len(self.meshxreprojected)) ] )
self.__vtxfacetotal = np.array(
[
[self.facenodereprojected[0][i], self.facenodereprojected[1][i], 0.0]
for i in range(len(self.facenodereprojected[0]))
]
)
if True:
try:
self.__vtxfacetotal = np.array(
[
[self.facenodereprojected[0][i], self.facenodereprojected[1][i], 0.0]
for i in range(len(self.facenodereprojected[0]))
]
)
self.__idxfacetotal = self.meshlayer.hydrauparser.getElemFaces()
self.__idxfaceonlytotal = self.meshlayer.hydrauparser.getFaces()
# wherebegin polygon
self.__idxfacetotalcountidx = [0]
self.__idxfacetotalcountlen = []
for elem in self.__idxfacetotal:
self.__idxfacetotalcountidx.append((self.__idxfacetotalcountidx[-1]) + len(elem))
# self.__idxfacetotalcountlen.append(len(elem))
self.__idxfacetotalcountidx = np.array(self.__idxfacetotalcountidx)
self.__idxfacetotalcountlen = np.array([len(elem) for elem in self.__idxfacetotal])
except Exception as e:
print("resetFaceNodeCoord " + str(e))
self.__vtxfacetodraw = self.__vtxfacetotal
self.__idxfacetodraw = self.__idxfacetotal
self.__idxfaceonlytodraw = self.__idxfaceonlytotal
if False:
self.__idxfacetodraw1Darray = np.concatenate(self.__idxfacetodraw)
self.__idxfaceonlytodraw1Darray = np.concatenate(self.__idxfaceonlytodraw)
if True:
"""
self.__idxfacetodraw1Darray = self.__idxfacetodraw.ravel()
self.__idxfaceonlytodraw1Darray = self.__idxfaceonlytodraw.ravel()
print ( self.__idxfacetodraw1Darray)
"""
self.__idxfacetodraw1Darray = np.array([idx for idxs in self.__idxfacetodraw for idx in idxs])
self.__idxfaceonlytodraw1Darray = np.array([idx for idxs in self.__idxfaceonlytodraw for idx in idxs])
if False:
def resetIdx(self, idx=None):
# __idx
if False:
if idx != None:
if False:
try:
self.__idxfacetotal = numpy.require(idx, numpy.int32, "F")
except Exception as e:
self.__idxfacetotal = idx
else:
self.__idxfacetotal = numpy.require(idx, numpy.int32, "F")
else:
# self.__idxfacetotal = self.meshlayer.hydrauparser.getIkle()
self.__idxfacetotal = self.meshlayer.hydrauparser.getElemFaces()
if True:
self.__idxfacetotal = self.meshlayer.hydrauparser.getElemFaces()
self.__idxfacetotal = self.meshlayer.hydrauparser.getElemFaces()
self.__idxfacetodraw = self.__idxfacetotal
def resetMesh(self):
self.__vtxmesh = np.array(
[
[self.facenodereprojected[0][i], self.facenodereprojected[1][i], 0.0]
for i in range(len(self.facenodereprojected[0]))
]
)
self.__idxmesh = self.meshlayer.hydrauparser.getFaces()
def CrsChanged2(self):
ikle = self.meshlayer.hydrauparser.getIkle()
nodecoords = np.array(
[[self.meshxreprojected[i], self.meshyreprojected[i], 0.0] for i in range(len(self.meshxreprojected))]
)
self.resetFaceNodeCoord(nodecoords)
self.resetIdx(ikle)
def change_cm_contour(self, cm_raw):
"""
change the color map and layer symbology
"""
self.cmap_contour_leveled = self.colormanager.fromColorrampAndLevels(self.lvl_contour, cm_raw)
if qgis.utils.iface is not None:
try:
qgis.utils.iface.legendInterface().refreshLayerSymbology(self.meshlayer)
except Exception as e:
# print('openglgetimage -change_cm_contour ' + str(e))
# self.meshlayer.propertiesdialog.errorMessage( 'openglgetimage -change_cm_contour ' + str(e) )
qgis.utils.iface.layerTreeView().refreshLayerSymbology(self.meshlayer.id())
# transparency - alpha changed
# if self.cmap_contour_leveled != None and len(self.lvl_contour) > 0:
if isinstance(self.cmap_contour_leveled, list) and len(self.lvl_contour) > 0:
colortemp = np.array(self.cmap_contour_leveled)
for i in range(len(colortemp)):
colortemp[i][3] = min(colortemp[i][3], self.alpha_displayed / 100.0)
# opengl
try:
gradudation = []
tempun = []
if len(self.lvl_contour) >= 3:
for i, color in enumerate(colortemp):
gradudation.append(
(
QtGui.QColor.fromRgbF(color[0], color[1], color[2], color[3]),
self.lvl_contour[i],
self.lvl_contour[i + 1],
)
)
else:
color = colortemp[0]
gradudation.append(
(
QtGui.QColor.fromRgbF(color[0], color[1], color[2], color[3]),
self.lvl_contour[0],
self.lvl_contour[1],
)
)
self.setGraduation(gradudation)
except Exception as e:
self.meshlayer.propertiesdialog.errorMessage("toggle graduation " + str(e))
if self.meshlayer.draw:
self.meshlayer.triggerRepaint()
def change_cm_vel(self, cm_raw):
"""
change_cm_vel
change the color map and layer symbology
"""
if False:
cm = self.colormanager.arrayStepRGBAToCmap(cm_raw)
self.cmap_mpl_vel, self.norm_mpl_vel, self.color_mpl_vel = self.colormanager.changeColorMap(
cm, self.lvl_vel
)
try:
qgis.utils.iface.legendInterface().refreshLayerSymbology(self.meshlayer)
except Exception as e:
# print('openglgetimage -change_cm_contour ' + str(e))
# self.meshlayer.propertiesdialog.errorMessage( 'openglgetimage -change_cm_contour ' + str(e) )
qgis.utils.iface.layerTreeView().refreshLayerSymbology(self.meshlayer.id())
# transparency - alpha changed
if self.color_mpl_vel != None:
colortemp = np.array(self.color_mpl_vel.tolist())
for i in range(len(colortemp)):
colortemp[i][3] = min(colortemp[i][3], self.alpha_displayed / 100.0)
# redefine cmap_mpl_contour and norm_mpl_contour :
self.cmap_mpl_vel, self.norm_mpl_vel = matplotlib.colors.from_levels_and_colors(self.lvl_vel, colortemp)
# repaint
if self.meshlayer.draw:
self.meshlayer.triggerRepaint()
else:
# print 'change vl'
self.cmap_vel_leveled = self.colormanager.fromColorrampAndLevels(self.lvl_vel, cm_raw)
if qgis.utils.iface is not None:
try:
qgis.utils.iface.legendInterface().refreshLayerSymbology(self.meshlayer)
except Exception as e:
# print('openglgetimage -change_cm_contour ' + str(e))
# self.meshlayer.propertiesdialog.errorMessage( 'openglgetimage -change_cm_contour ' + str(e) )
qgis.utils.iface.layerTreeView().refreshLayerSymbology(self.meshlayer.id())
# transparency - alpha changed
# if self.cmap_vel_leveled != None and len(self.lvl_vel) > 0:
if isinstance(self.cmap_vel_leveled, list) and len(self.lvl_vel) > 0:
colortemp = np.array(self.cmap_vel_leveled)
if False:
for i in range(len(colortemp)):
colortemp[i][3] = min(colortemp[i][3], self.alpha_displayed / 100.0)
# opengl
try:
gradudation = []
tempun = []
if len(self.lvl_vel) >= 3:
for i, color in enumerate(colortemp):
gradudation.append(
(
QtGui.QColor.fromRgbF(color[0], color[1], color[2], color[3]),
self.lvl_vel[i],
self.lvl_vel[i + 1],
)
)
else:
color = colortemp[0]
gradudation.append(
(
QtGui.QColor.fromRgbF(color[0], color[1], color[2], color[3]),
self.lvl_vel[0],
self.lvl_vel[1],
)
)
self.setGraduationVelocity(gradudation)
except Exception as e:
self.meshlayer.propertiesdialog.errorMessage("toggle graduation " + str(e))
if self.meshlayer.draw:
self.meshlayer.triggerRepaint()
# ************************************************************************************
# *************************************** Main func : getimage ******************************
# ************************************************************************************
def canvasPaned(self):
if QApplication.instance().thread() != QtCore.QThread.currentThread():
self.__img = None
# self.__imageChangeRequested.emit(rendererContext)
self.__imageChangeRequested.emit()
i = 0
while not self.__img and not self.rendererContext.renderingStopped() and i < self.timemax:
# active wait to avoid deadlocking if event loop is stopped
# this happens when a render job is cancellled
i += 1
# active wait to avoid deadlocking if event loop is stopped
# this happens when a render job is cancellled
QtCore.QThread.msleep(1)
if not self.rendererContext.renderingStopped():
# if not self.showmesh:
# painter.drawImage(0, 0, self.__img)
return (self.__img, None)
else:
self.__drawInMainThread()
# self.rendererContext.painter().drawImage(0, 0, self.__img)
return (self.__img, None)
def canvasChangedWithSameBBox(self):
if False and self.__vtxcanvas == None:
xMeshcanvas, yMeshcanvas, goodiklecanvas, self.goodpointindex = self.getCoordsIndexInCanvas(
self.meshlayer, self.rendererContext
)
# self.resetIdx(goodiklecanvas)
nodecoords = np.array([[xMeshcanvas[i], yMeshcanvas[i], 0.0] for i in range(len(xMeshcanvas))])
self.__vtxcanvas = numpy.require(nodecoords, numpy.float32, "F")
self.__idxcanvas = numpy.require(goodiklecanvas, numpy.int32, "F")
# self.resetFaceNodeCoord(nodecoords)
# self.meshadaptedtocanvas = True
# self.__vtxfacetodraw = self.__vtxcanvas
# self.__idxfacetodraw = self.__idxcanvas
if QApplication.instance().thread() != QtCore.QThread.currentThread():
self.__img = None
# self.__imageChangeRequested.emit(rendererContext)
self.__imageChangeRequested.emit()
i = 0
while not self.__img and not self.rendererContext.renderingStopped() and i < self.timemax:
# active wait to avoid deadlocking if event loop is stopped
# this happens when a render job is cancellled
i += 1
# active wait to avoid deadlocking if event loop is stopped
# this happens when a render job is cancellled
QtCore.QThread.msleep(1)
if not self.rendererContext.renderingStopped():
# if not self.showmesh:
# painter.drawImage(0, 0, self.__img)
return (self.__img, None)
else:
self.__drawInMainThread()
# self.rendererContext.painter().drawImage(0, 0, self.__img)
return (self.__img, None)
def canvasCreation(self):
# self.meshadaptedtocanvas = False
# self.resetFaceNodeCoord()
# self.resetIdx()
self.__vtxcanvas = None
self.__idxcanvas = None
self.goodpointindex = None
self.__vtxfacetodraw = self.__vtxfacetotal
self.__idxfacetodraw = self.__idxfacetotal
if QApplication.instance().thread() != QtCore.QThread.currentThread():
try:
self.__img = None
# self.__imageChangeRequested.emit(rendererContext)
self.__imageChangeRequested.emit()
i = 0
while not self.__img and not self.rendererContext.renderingStopped() and i < self.timemax:
# active wait to avoid deadlocking if event loop is stopped
# this happens when a render job is cancellled
i += 1
QtCore.QThread.msleep(1)
if not self.rendererContext.renderingStopped():
# if not self.showmesh:
# painter.drawImage(0, 0, self.__img)
# self.debugtext += ['img done : ' + str(round(time.clock()-self.timestart,3)) ]
return (self.__img, None)
except Exception as e:
print(str(e))
else:
self.__drawInMainThread()
# self.rendererContext.painter().drawImage(0, 0, self.__img)
return (self.__img, None)
def __drawInMainThread(self):
# print rendererContext
try:
self.__imageChangedMutex.lock()
includevel = True
if self.meshlayer.hydrauparser.parametres[self.meshlayer.param_displayed][2] == 0:
list1 = self.meshlayer.value
if self.meshlayer.hydrauparser.parametres[self.meshlayer.param_displayed][2] == 1:
if self.meshlayer.hydrauparser.parametrevx != None and self.meshlayer.hydrauparser.parametrevy != None:
list1 = np.stack(
(
self.meshlayer.value,
self.meshlayer.values[self.meshlayer.hydrauparser.parametrevx],
self.meshlayer.values[self.meshlayer.hydrauparser.parametrevy],
),
axis=-1,
)
else:
list1 = np.stack(
(
self.meshlayer.value,
np.array([0] * self.meshlayer.hydrauparser.facesnodescount),
np.array([0] * self.meshlayer.hydrauparser.facesnodescount),
),
axis=-1,
)
if True:
if self.goodpointindex != None:
list1 = list1[self.goodpointindex]
if self.meshlayer.hydrauparser.parametres[self.meshlayer.param_displayed][2] == 2:
list1 = self.meshlayer.value
self.__img = self.image(
list1,
self.sizepx,
# size,
(0.5 * (self.ext.xMinimum() + self.ext.xMaximum()), 0.5 * (self.ext.yMinimum() + self.ext.yMaximum())),
(
self.rendererContext.mapToPixel().mapUnitsPerPixel(),
self.rendererContext.mapToPixel().mapUnitsPerPixel(),
),
self.rendererContext.mapToPixel().mapRotation(),
)
self.__imageChangedMutex.unlock()
except Exception as e:
print("draw " + str(e))
# ************************************************************************************
# *************************************** Secondary func ******************************
# ************************************************************************************
def getVelocity(self, selafin, rendererContext):
tabx = []
taby = []
tabvx = []
tabvy = []
recttemp = rendererContext.extent()
rect = [
float(recttemp.xMinimum()),
float(recttemp.xMaximum()),
float(recttemp.yMinimum()),
float(recttemp.yMaximum()),
]
# print str(selafin.showvelocityparams)
if selafin.showvelocityparams["type"] in [0, 1]:
if selafin.showvelocityparams["type"] == 0:
nombrecalcul = selafin.showvelocityparams["step"]
pasespace = int((rect[1] - rect[0]) / nombrecalcul)
pasx = pasespace
pasy = pasespace
rect[0] = int(rect[0] / pasespace) * pasespace
rect[2] = int(rect[2] / pasespace) * pasespace
rangex = nombrecalcul + 3
rangey = nombrecalcul + 3
pasy = int((rect[3] - rect[2]) / nombrecalcul)
elif selafin.showvelocityparams["type"] == 1:
pasespace = selafin.showvelocityparams["step"]
pasx = pasespace
pasy = pasespace
rect[0] = int(rect[0] / pasespace) * pasespace
rect[2] = int(rect[2] / pasespace) * pasespace
rangex = int((rect[1] - rect[0]) / pasespace) + 3
rangey = int((rect[3] - rect[2]) / pasespace) + 3
x = np.arange(rect[0], rect[0] + rangex * pasx, pasx)
y = np.arange(rect[2], rect[2] + rangey * pasy, pasy)
mesh = np.meshgrid(x, y)
tabx = np.ravel(mesh[0].tolist())
taby = np.ravel(mesh[1].tolist())
if not selafin.triinterp:
selafin.initTriinterpolator()
"""
tabvx = selafin.triinterp[selafin.parametrevx].__call__(tabx,taby)
tabvy = selafin.triinterp[selafin.parametrevy].__call__(tabx,taby)
"""
tempx1, tempy1 = self.getTransformedCoords(tabx, taby, False)
tabvx = selafin.triinterp[selafin.hydrauparser.parametrevx].__call__(tempx1, tempy1)
tabvy = selafin.triinterp[selafin.hydrauparser.parametrevy].__call__(tempx1, tempy1)
elif selafin.showvelocityparams["type"] == 2:
if not self.goodpointindex == None:
# tabx, taby = selafin.hydrauparser.getMesh()
"""
tabx = self.meshxreprojected
taby = self.meshyreprojected
"""
tabx = self.facenodereprojected[0]
taby = self.facenodereprojected[1]
goodnum = self.goodpointindex
tabx = tabx[goodnum]
taby = taby[goodnum]
else:
tabx, taby, goodnum = self.getxynuminrenderer(selafin, rendererContext)
tabvx = selafin.values[selafin.hydrauparser.parametrevx][goodnum]
tabvy = selafin.values[selafin.hydrauparser.parametrevy][goodnum]
return np.array(tabx), np.array(taby), np.array(tabvx), np.array(tabvy)
def getxynuminrenderer(self, selafin, rendererContext):
"""
Return index of selafin points in the visible canvas with corresponding x and y value
"""
recttemp = rendererContext.extent()
rect = [
float(recttemp.xMinimum()),
float(recttemp.xMaximum()),
float(recttemp.yMinimum()),
float(recttemp.yMaximum()),
]
"""
tabx, taby = selafin.hydrauparser.getMesh()
tabx, taby = self.getTransformedCoords(tabx,taby)
"""
"""
tabx = self.meshxreprojected
taby = self.meshyreprojected
"""
tabx = self.facenodereprojected[0]
taby = self.facenodereprojected[1]
valtabx = np.where(np.logical_and(tabx > rect[0], tabx < rect[1]))
valtaby = np.where(np.logical_and(taby > rect[2], taby < rect[3]))
goodnum = np.intersect1d(valtabx[0], valtaby[0])
tabx = tabx[goodnum]
taby = taby[goodnum]
# badnum = np.setxor1d(valtabx[0],valtaby[0])
return tabx, taby, goodnum
# **********************************************************************************************
# **********************************************************************************************
# **********************************************************************************************
# OPENGL
# **********************************************************************************************
# **********************************************************************************************
def __recompileNeeded(self):
self.__recompileShader = True
def __compileShaders(self):
vertex_shader = shaders.compileShader(
"""
varying float value;
varying float w;
varying vec3 normal;
varying vec4 ecPos;
void main()
{
ecPos = gl_ModelViewMatrix * gl_Vertex;
normal = normalize(gl_NormalMatrix * gl_Normal);
value = gl_MultiTexCoord0.st.x;
w = value > 0.0 ? 1.0 : 0.0;
gl_Position = ftransform();;
}
""",
GL_VERTEX_SHADER,
)
fragment_shader = shaders.compileShader(self._fragmentShader(), GL_FRAGMENT_SHADER)
self.__shaders = shaders.compileProgram(vertex_shader, fragment_shader)
# self.__legend._setUniformsLocation(self.__shaders)
self.__recompileShader = False
def toggleGraduation(self):
# self.__graduated = bool(flag)
self.__graduated = True
# print self.__graduation
if self.__graduated:
self.__pixelColor = "vec4 pixelColor(float value)\n{\n"
for c, min_, max_ in self.__graduation:
"""
self.__pixelColor += " if (float(%g) < value && value <= float(%g)) return vec4(%g, %g, %g, 1.);\n"%(
min_, max_, c.redF(), c.greenF(), c.blueF())
"""
self.__pixelColor += (
" if (float(%g) < value && value <= float(%g)) return vec4(%g, %g, %g, %g);\n"
% (min_, max_, c.redF(), c.greenF(), c.blueF(), c.alphaF())
)
self.__pixelColor += " return vec4(0., 0., 0., 0.);\n"
self.__pixelColor += "}\n"
else:
self.__pixelColor = ColorLegend.__pixelColorContinuous
# self.symbologyChanged.emit()
self.__recompileNeeded()
def toggleGraduationVelocity(self):
# self.__graduated = bool(flag)
self.__graduated = True
# print self.__graduation
if self.__graduated:
self.__pixelColorVelocity = "vec4 pixelColor(float value)\n{\n"
for c, min_, max_ in self.__graduationvelocity:
"""
self.__pixelColor += " if (float(%g) < value && value <= float(%g)) return vec4(%g, %g, %g, 1.);\n"%(
min_, max_, c.redF(), c.greenF(), c.blueF())
"""
self.__pixelColorVelocity += (
" if (float(%g) < value && value <= float(%g)) return vec4(%g, %g, %g, %g);\n"
% (min_, max_, c.redF(), c.greenF(), c.blueF(), c.alphaF())
)
self.__pixelColorVelocity += " return vec4(0., 0., 0., 0.);\n"
self.__pixelColorVelocity += "}\n"
# print self.__pixelColorVelocity
else:
self.__pixelColorVelocity = ColorLegend.__pixelColorContinuous
# self.symbologyChanged.emit()
# self.__recompileNeeded()
def setGraduation(self, graduation):
"""graduation is a list of tuple (color, min, max) the alpha componant is not considered"""
self.__graduation = graduation
# print self.__graduation
self.toggleGraduation()
def setGraduationVelocity(self, graduation):
"""graduation is a list of tuple (color, min, max) the alpha componant is not considered"""
self.__graduationvelocity = graduation
# print self.__graduation
self.toggleGraduationVelocity()
def _fragmentShader(self):
"""Return a string containing the definition of the GLSL pixel shader
vec4 pixelColor(float value)
This may contain global shader variables and should therefore
be included in the fragment shader between the global variables
definition and the main() declaration.
Note that:
varying float value
must be defined by the vertex shader
"""
return (
"""
varying float value;
varying float w;
varying vec3 normal;
varying vec4 ecPos;
uniform float transparency;
uniform float minValue;
uniform float maxValue;
uniform bool logscale;
uniform bool withNormals;
uniform sampler2D tex;
"""
+ self.__pixelColor
+ """
void main()
{
gl_FragColor = pixelColor(value);
}
"""
)
def __resize(self, roundupImageSize):
# QGLPixelBuffer size must be power of 2
assert roundupImageSize == roundUpSize(roundupImageSize)
# force alpha format, it should be the default,
# but isn't all the time (uninitialized)
if False:
fmt = QGLFormat()
fmt.setAlpha(True)
self.__pixBuf = QGLPixelBuffer(roundupImageSize, fmt)
assert self.__pixBuf.format().alpha()
self.__pixBuf.makeCurrent()
self.__pixBuf.bindToDynamicTexture(self.__pixBuf.generateDynamicTexture())
self.__compileShaders()
self.__pixBuf.doneCurrent()
self.__pixBufMesh = QGLPixelBuffer(roundupImageSize, fmt)
assert self.__pixBufMesh.format().alpha()
self.__pixBufMesh.makeCurrent()
self.__pixBufMesh.bindToDynamicTexture(self.__pixBufMesh.generateDynamicTexture())
self.__compileShaders()
self.__pixBufMesh.doneCurrent()
if True:
# self.surface = QOffscreenSurface()
self.surfaceFormat = QSurfaceFormat()
self.context = QOpenGLContext()
self.context.setFormat(self.surfaceFormat)
self.context.create()
self.surface = QOffscreenSurface()
self.surface.setFormat(self.surfaceFormat)
self.surface.create()
self.context.makeCurrent(self.surface)
self.__compileShaders()
if True:
fmt1 = QOpenGLFramebufferObjectFormat()
self.__pixBuf = QOpenGLFramebufferObject(roundupImageSize, fmt1)
self.__pixBuf.takeTexture()
self.__pixBuf.bind()
else:
self.__pixBuf = QOpenGLFramebufferObject(roundupImageSize)
self.context.doneCurrent()
def image(self, values, imageSize, center, mapUnitsPerPixel, rotation=0):
"""Return the rendered image of a given size for values defined at each vertex
or at each element depending on setColorPerElement.
Values are normalized using valueRange = (minValue, maxValue).
transparency is in the range [0,1]"""
DEBUGTIME = False
if DEBUGTIME:
self.debugtext = []
self.timestart = time.clock()
if False:
if QApplication.instance().thread() != QThread.currentThread():
raise RuntimeError("trying to use gl draw calls in a thread")
try:
if not len(values):
img = QImage(imageSize, QImage.Format_ARGB32)
img.fill(Qt.transparent)
return img
roundupSz = roundUpSize(imageSize)
if (
not self.__pixBuf
or roundupSz.width() != self.__pixBuf.size().width()
or roundupSz.height() != self.__pixBuf.size().height()
):
# print('resize')
self.__resize(roundupSz)
val = numpy.require(values, numpy.float32) if not isinstance(values, numpy.ndarray) else values
if self.__colorPerElement:
val = numpy.concatenate((val, val, val))
# try:
# self.__pixBuf.makeCurrent()
# self.context.makeCurrent()
self.context.makeCurrent(self.surface)
if True:
# define current opengl drawing
# self.__pixBuf.makeCurrent()
# ?
if self.__recompileShader:
self.__compileShaders()
# init gl client
# glClearColor(1., 1., 1., 1.)
# glClearColor(0., 0., 0., 1.)
glClearColor(0.0, 0.0, 0.0, 0.0)
# tell OpenGL that the VBO contains an array of vertices
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_TEXTURE_COORD_ARRAY)
glEnable(GL_TEXTURE_2D)
if True:
# initialisation de la transparence
glEnable(GL_BLEND)
# la couleur de l'objet va etre (1-alpha_de_l_objet) * couleur du fond et (le_reste * couleur originale)
# glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glBlendFunc(GL_SRC_ALPHA_SATURATE, GL_ONE)
else:
glDisable(GL_BLEND)
glEnable(GL_ALPHA_TEST)
glAlphaFunc(GL_GREATER, 0.1) # Or some fitting threshold for your texture
glShadeModel(GL_FLAT)
# clear the buffer
glClear(GL_COLOR_BUFFER_BIT)
# set orthographic projection (2D only)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
# scale
if True:
glScalef(
2.0 / (roundupSz.width() * mapUnitsPerPixel[0]),
2.0 / (roundupSz.height() * mapUnitsPerPixel[1]),
1,
)
else:
glScalef(
2.0 / (roundupSz.height() * mapUnitsPerPixel[0]),
2.0 / (roundupSz.width() * mapUnitsPerPixel[1]),
1,
)
# rotate
glRotatef(-rotation, 0, 0, 1)
## translate
glTranslatef(-center[0], -center[1], 0)
glViewport(0, 0, roundupSz.width(), roundupSz.height())
if DEBUGTIME:
self.debugtext += ["init done : " + str(round(time.clock() - self.timestart, 3))]
if self.meshlayer.showmesh: # draw triangle contour but not inside
# Draw the object here
glDisable(GL_TEXTURE_2D)
glUseProgram(0)
if True:
glColor4f(0.2, 0.2, 0.2, 0.2)
glLineWidth(1) # or whatever
glPolygonMode(GL_FRONT, GL_LINE)
glPolygonMode(GL_BACK, GL_LINE)
# Draw the object here
glVertexPointerf(self.__vtxmesh)
glDrawElementsui(GL_LINES, self.__idxmesh)
# glPolygonMode(GL_FRONT_AND_BACK,GL_FILL)
glPolygonMode(GL_FRONT, GL_FILL)
glPolygonMode(GL_BACK, GL_FILL)
if False:
glPointSize(20.0)
glBegin(GL_POINTS)
glColor3f(1.0, 0, 0)
glVertex3f(center[0], center[1], 0)
glColor3f(0, 0, 1.0)
glVertex3f(self.rect[0], self.rect[2], 0)
glVertex3f(self.rect[1], self.rect[2], 0)
glVertex3f(self.rect[0], self.rect[3], 0)
glEnd()
if DEBUGTIME:
self.debugtext += ["mesh done : " + str(round(time.clock() - self.timestart, 3))]
if True:
if self.meshlayer.showvelocityparams["show"]:
# glDisable(GL_TEXTURE_2D)
glEnable(GL_PROGRAM_POINT_SIZE)
glEnable(GL_TEXTURE_2D)
# print self.__vtxfacetodraw
if True:
vertex_shader_vel = shaders.compileShader(
"""
#version 120
varying float valuev;
varying vec2 valuevel;
varying vec2 hw;
varying vec3 normal;
varying vec4 ecPos;
//out vec2 valuevel;
//out vec2 hw;
//out vec3 normal;
//out vec4 ecPos;
//varying float value ;
void main()
{
ecPos = gl_ModelViewMatrix * gl_Vertex;
normal = normalize(gl_NormalMatrix * gl_Normal);
//value = gl_MultiTexCoord0.st.x;
//valuev = gl_MultiTexCoord0.x;
valuevel = gl_MultiTexCoord0.yz;
//w = valuev > 0.0 ? 1.0 : 0.0;
//gl_Position = ftransform();
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
//gl_PointSize = 10.0;
}
""",
GL_VERTEX_SHADER,
)
if True:
"""
https://www.opengl.org/wiki/Geometry_Shader_Examples
"""
geom_shader_vel = shaders.compileShader(
"""
#version 150
uniform float mapunitsperpixel;
uniform float norm;
uniform vec2 hw[];
//varying in vec2 valuevel[];
//varying vec2 valuevel[];
in vec2 valuevel[];
//varying in vec2 valuevel;
out float value ;
//out varying value ;
//varying float value[] ;
layout(points) in;
//layout(line_strip, max_vertices = 2) out;
layout(triangle_strip, max_vertices = 9) out;
//layout(triangles, max_vertices = 6) out;
void main()
{
float normfinal ;
float valuedraw ;
//float value ;
//float normfinal = 1.0 ;
float headsize = 2.0 ;
//normfinal = norm ;
value = sqrt( valuevel[0].x * valuevel[0].x + valuevel[0].y * valuevel[0].y ) ;
if ( norm < 0.0 )
{
normfinal = - 1.0 / norm ;
valuedraw = value ;
}
else
{
normfinal = norm ;
valuedraw = 1.0 ;
}
vec4 center = gl_in[0].gl_Position ;
vec4 head = gl_in[0].gl_Position + vec4( valuevel[0].x / hw[0].x , valuevel[0].y / hw[0].y , 0.0, 0.0) / mapunitsperpixel / normfinal / valuedraw ;
vec4 arrowr = gl_in[0].gl_Position + vec4( valuevel[0].y / headsize / hw[0].x , - valuevel[0].x / headsize / hw[0].y , 0.0, 0.0) / mapunitsperpixel / normfinal / valuedraw ;
vec4 arrowl = gl_in[0].gl_Position + vec4(- valuevel[0].y / headsize / hw[0].x , valuevel[0].x / headsize / hw[0].y , 0.0, 0.0) / mapunitsperpixel / normfinal / valuedraw ;
vec4 base = gl_in[0].gl_Position * 2 - ( head) ;
vec4 baser = base + ( arrowr - head ) / 2 ;
vec4 basel = base + ( arrowl - head ) / 2 ;
gl_Position = arrowl ;
EmitVertex();
gl_Position = arrowr ;
EmitVertex();
gl_Position = head ;
EmitVertex();
EndPrimitive();
gl_Position = head ;
EmitVertex();
gl_Position = base ;
EmitVertex();
gl_Position = baser ;
EmitVertex();
EndPrimitive();
gl_Position = head ;
EmitVertex();
gl_Position = base ;
EmitVertex();
gl_Position = basel ;
EmitVertex();
EndPrimitive();
}
""",
GL_GEOMETRY_SHADER,
)
if True:
fragment_shader_vel = shaders.compileShader(
"""
#version 150
//varying float value;
//varying vec2 valuevel;
in float value;
in vec2 valuevel;
"""
+ self.__pixelColorVelocity
+ """
void main() {
//float valuetest ;
//valuetest = sqrt( valuevel.x * valuevel.x + valuevel.y * valuevel.y ) ;
//gl_FragColor = vec4( min( value ,1.0 ), 0.0, 0.0, 1.0);
gl_FragColor = pixelColor(value);
}
""",
GL_FRAGMENT_SHADER,
)
self.__shadersvel = shaders.compileProgram(
vertex_shader_vel, fragment_shader_vel, geom_shader_vel
)
# glDisableClientState(GL_TEXTURE_COORD_ARRAY)
# glClear(GL_COLOR_BUFFER_BIT)
glUseProgram(self.__shadersvel)
temp = glGetUniformLocation(self.__shadersvel, "mapunitsperpixel")
glUniform1f(temp, float(mapUnitsPerPixel[0]))
temp = glGetUniformLocation(self.__shadersvel, "norm")
glUniform1f(temp, float(self.meshlayer.showvelocityparams["norm"]))
temp = glGetUniformLocation(self.__shadersvel, "hw")
# glUniform2f( temp , float( imageSize.height() ) , float( imageSize.width() ) )
# glUniform2f( temp , float( imageSize.width() ) , float( imageSize.height() ) )
glUniform2f(temp, float(roundupSz.width()), float(roundupSz.height()))
# these vertices contain 2 single precision coordinates
glVertexPointerf(self.__vtxfacetodraw)
glTexCoordPointer(3, GL_FLOAT, 0, val)
glDrawArrays(GL_POINTS, 0, len(self.__vtxfacetodraw))
if DEBUGTIME:
self.debugtext += ["velocity done : " + str(round(time.clock() - self.timestart, 3))]
if self.meshlayer.hydrauparser.parametres[self.meshlayer.param_displayed][2] == 0:
try:
if False:
glEnable(GL_TEXTURE_2D)
glColor4f(0.2, 0.2, 0.2, 0.2)
glVertexPointerf(self.__vtxfacetodraw)
temp = np.array(sum(self.__idxfacetodraw.tolist(), []))
# print self.__idxfacetodraw.shape
# print self.__idxfacetodraw.flatten().shape
# glDrawElementsui( GL_TRIANGLE_FAN, self.__idxfacetodraw)
for elem in self.__idxfacetodraw:
if len(elem) > 2:
glDrawElementsui(GL_TRIANGLE_FAN, np.array(elem))
if False:
glUseProgram(0)
glDisable(GL_TEXTURE_2D)
glEnableClientState(GL_VERTEX_ARRAY)
# glEnable(GL_TEXTURE_2D)
glColor4f(0.2, 0.2, 0.2, 0.2)
# print self.__graduation
glVertexPointerf(self.__vtxfacetodraw)
print(len(self.__idxfacetodraw))
# print 'len ' + str( len(self.__idxfacetodraw) ) + ' ' + str(len(val))
# print self.__graduation
for i, elem in enumerate(self.__idxfacetodraw):
if len(elem) > 2:
if val[i] > self.__graduation[-1][2]:
continue
j = 0
while j < len(self.__graduation) and val[i] > self.__graduation[j][1]:
j += 1
j += -1
# print j
glColor4f(
self.__graduation[j][0].redF(),
self.__graduation[j][0].greenF(),
self.__graduation[j][0].blueF(),
self.__graduation[j][0].alphaF(),
)
# c.redF(), c.greenF(), c.blueF(), c.alphaF())
if True:
try:
# print str(i) + ' ' + str(elem)
glDrawElements(GL_TRIANGLE_FAN, len(elem), GL_UNSIGNED_BYTE, elem)
# print str(i) + ' ' + str(elem)
except Exception as e:
print(str(e))
if False:
glBegin(GL_TRIANGLE_FAN)
for id in elem:
glVertex2f(self.__vtxfacetodraw[id][0], self.__vtxfacetodraw[id][1])
glEnd()
if True:
if DEBUGTIME:
self.debugtext += [
"param render start : " + str(round(time.clock() - self.timestart, 3))
]
# glDisable(GL_TEXTURE_2D)
# glEnableClientState( GL_VERTEX_ARRAY )
glColor4f(0.2, 0.2, 0.2, 0.2)
if True:
# print 'vertex'
# vtx = self.__vtxfacetodraw[sum(self.__idxfacetodraw,[])]
vtx = self.__vtxfacetodraw[self.__idxfacetodraw1Darray]
if DEBUGTIME:
self.debugtext += [
"param render vertex interm : "
+ str(round(time.clock() - self.timestart, 3))
]
# print vtx
# print np.array(vtx).shape
# print str( vtx[0:10] )
glVertexPointerf(vtx)
if False:
glVertexPointerf(self.__vtxfacetodraw)
if DEBUGTIME:
self.debugtext += [
"param render vertex done : " + str(round(time.clock() - self.timestart, 3))
]
if False:
print(str(np.array(sum(self.__idxfacetodraw, [])).shape) + " " + str())
print(
str(self.__idxfacetotalcountidx[0:10])
+ " "
+ str(self.__idxfacetotalcountidx[-10:])
)
print(
str(self.__idxfacetotalcountlen[0:10])
+ " "
+ str(self.__idxfacetotalcountlen[-10:])
)
print(str(np.max(self.__idxfacetotalcountidx)))
print(self.__idxfacetotalcountidx[0:2])
print(self.__idxfacetotalcountlen[0:2])
if True:
# print 'render 1'
glDisable(GL_TEXTURE_2D)
glUseProgram(0)
# glEnableClientState(GL_COLOR_ARRAY)
glColor4f(0.2, 0.2, 0.2, 0.2)
"""
first = bounds[:-1]
count = np.diff(bounds)
primcount = len(bounds) - 1
gl.glMultiDrawArrays(primtype, first, count, primcount)
"""
# print 'render 2'
if DEBUGTIME:
self.debugtext += [
"param render color begin : " + str(round(time.clock() - self.timestart, 3))
]
colors = np.zeros((len(val), 4))
# colors = np.zeros((np.max(np.array(sum(self.__idxfacetodraw,[])))+1,4))
# print val.shape
# print self.__idxfacetodraw.shape
colors[:, :] = np.NAN
# print colors
for gradu in self.__graduation:
# temp = np.where(val > gradu[1])
# print np.where(np.logical_and(val > gradu[1], val < gradu[2]))
tempidx = np.where(np.logical_and(val > gradu[1], val < gradu[2]))
if len(tempidx) > 0:
# print np.array([gradu[0].redF() , gradu[0].greenF() , gradu[0].blueF() ,gradu[0].alphaF() ])
colors[tempidx] = [
gradu[0].redF(),
gradu[0].greenF(),
gradu[0].blueF(),
gradu[0].alphaF(),
]
# colors[np.logical_and(val > gradu[1], val < gradu[2])] = np.array([gradu[0].redF() , gradu[0].greenF() , gradu[0].blueF() ,gradu[0].alphaF() ])
# self.__graduation[j][0].redF(), self.__graduation[j][0].greenF(), self.__graduation[j][0].blueF() , self.__graduation[j][0].alphaF()
# print colors
colors[colors[:, 0] == np.NAN] = np.array([0.0, 0.0, 0.0, 0.0])
# print colors.shape
# print np.max(np.array(sum(self.__idxfacetodraw,[])))
# colors2 = colors[sum(self.__idxfacetodraw,[])]
if DEBUGTIME:
self.debugtext += [
"param render color end : " + str(round(time.clock() - self.timestart, 3))
]
# print 'render 3'
first = self.__idxfacetotalcountidx[:-1]
count = np.diff(self.__idxfacetotalcountidx)
primcount = len(self.__idxfacetotalcountidx) - 1
if DEBUGTIME:
self.debugtext += [
"param render first count end : "
+ str(round(time.clock() - self.timestart, 3))
]
if False:
trueidx = np.where(count > 2)
first = first[trueidx]
count = count[trueidx]
primcount = len(first)
# print '3bis'
colors2 = np.repeat(colors, count, axis=0)
# print colors2.shape
# print vtx.shape
if DEBUGTIME:
self.debugtext += [
"param render first colorpointer begin : "
+ str(round(time.clock() - self.timestart, 3))
]
if True:
glEnableClientState(GL_COLOR_ARRAY)
# glColorPointerf(colors2)
glColorPointer(4, GL_FLOAT, 0, colors2)
if DEBUGTIME:
self.debugtext += [
"param render first colorpointer end : "
+ str(round(time.clock() - self.timestart, 3))
]
# colors = colors[trueidx]
# print colors
# print str(first[0:10]) + ' ' +str(first[-10:]) + ' ' + str(len(first))
# print str(count[0:10])+ ' ' +str(count[-10:])+ ' ' + str(len(count))
# print str( primcount )
# print count[0]
# print self.__idxfacetodraw[0:count]
# idxtemp = np.array(sum(self.__idxfacetodraw,[]) )
# print idxtemp
# print 'render 4'
glMultiDrawArrays(GL_TRIANGLE_FAN, first, count, primcount)
if DEBUGTIME:
self.debugtext += [
"param render first draw array end : "
+ str(round(time.clock() - self.timestart, 3))
]
# glMultiDrawElements(GL_TRIANGLE_FAN, count[0], GL_UNSIGNED_BYTE, self.__idxfacetodraw, 1 )
# glMultiDrawElements(GL_TRIANGLE_FAN, count, GL_UNSIGNED_BYTE, idxtemp, 10 )
# print 'render 5'
glDisableClientState(GL_COLOR_ARRAY)
if False:
glUseProgram(0)
glEnable(GL_PRIMITIVE_RESTART)
glPrimitiveRestartIndex(99999)
if False and self.setprimitive:
glPrimitiveRestartIndex(-1)
self.setprimitive = False
temp = []
for i, elem in enumerate(self.__idxfacetodraw):
if len(elem) > 2:
if i > 0:
temp1 = np.array(elem).tolist()
temp1.insert(0, 99999)
temp.append(temp1)
else:
temp1 = np.array(elem).tolist()
temp.append(temp1)
idx1 = np.array(sum(temp, []))
print(self.__idxfacetodraw[0:20])
print(idx1[0:20])
print(self.__vtxfacetodraw[0:4])
# glDrawElements(GL_TRIANGLE_FAN, 20, GL_UNSIGNED_INT, idx1)
glDrawElements(GL_TRIANGLE_FAN, 5, GL_UNSIGNED_BYTE, idx1)
# glMultiDrawArrays(GL_TRIANGLE_FAN, self.__idxfacetotalcountidx[0:2], self.__idxfacetotalcountlen[0:2], 2) #; // 2 fans
except Exception as e:
print("face elem rendering " + str(e))
elif self.meshlayer.hydrauparser.parametres[self.meshlayer.param_displayed][2] == 1:
glEnable(GL_TEXTURE_2D)
glUseProgram(self.__shaders)
# self.__legend._setUniforms(self.__pixBuf)
# these vertices contain 2 single precision coordinates
glVertexPointerf(self.__vtxfacetodraw)
glTexCoordPointer(3, GL_FLOAT, 0, val)
glDrawElementsui(GL_TRIANGLES, self.__idxfacetodraw)
elif self.meshlayer.hydrauparser.parametres[self.meshlayer.param_displayed][2] == 2:
try:
if True:
"""
self.__vtxfacetodraw = self.__vtxfacetotal
self.__idxfacetodraw = self.__idxfacetotal
self.__idxfaceonlytodraw = self.__idxfaceonlytotal
self.__idxfacetodraw1Darray = np.concatenate(self.__idxfacetodraw)
self.__idxfaceonlytodraw1Darray = np.concatenate(self.__idxfaceonlytodraw)
"""
if DEBUGTIME:
self.debugtext += [
"param render start : " + str(round(time.clock() - self.timestart, 3))
]
# glDisable(GL_TEXTURE_2D)
# glEnableClientState( GL_VERTEX_ARRAY )
glColor4f(0.2, 0.2, 0.2, 0.2)
if True:
vtx = self.__vtxfacetodraw[self.__idxfaceonlytodraw1Darray]
if DEBUGTIME:
self.debugtext += [
"param render vertex interm : "
+ str(round(time.clock() - self.timestart, 3))
]
# print vtx
# print np.array(vtx).shape
# print str( vtx[0:10] )
glVertexPointerf(vtx)
# print 'vtxshape ' + str(vtx.shape)
if DEBUGTIME:
self.debugtext += [
"param render vertex done : " + str(round(time.clock() - self.timestart, 3))
]
if False:
print(str(np.array(sum(self.__idxfacetodraw, [])).shape) + " " + str())
print(
str(self.__idxfacetotalcountidx[0:10])
+ " "
+ str(self.__idxfacetotalcountidx[-10:])
)
print(
str(self.__idxfacetotalcountlen[0:10])
+ " "
+ str(self.__idxfacetotalcountlen[-10:])
)
print(str(np.max(self.__idxfacetotalcountidx)))
print(self.__idxfacetotalcountidx[0:2])
print(self.__idxfacetotalcountlen[0:2])
if True:
# print 'render 1'
glDisable(GL_TEXTURE_2D)
glUseProgram(0)
# glEnableClientState(GL_COLOR_ARRAY)
glColor4f(0.2, 0.2, 0.2, 0.2)
"""
first = bounds[:-1]
count = np.diff(bounds)
primcount = len(bounds) - 1
gl.glMultiDrawArrays(primtype, first, count, primcount)
"""
# print 'render 2'
if DEBUGTIME:
self.debugtext += [
"param render color begin : " + str(round(time.clock() - self.timestart, 3))
]
colors = np.zeros((len(val), 4))
# colors = np.zeros((np.max(np.array(sum(self.__idxfacetodraw,[])))+1,4))
# print val.shape
# print self.__idxfacetodraw.shape
colors[:, :] = np.NAN
# print colors
for gradu in self.__graduation:
# temp = np.where(val > gradu[1])
# print np.where(np.logical_and(val > gradu[1], val < gradu[2]))
tempidx = np.where(np.logical_and(val > gradu[1], val < gradu[2]))
if len(tempidx) > 0:
# print np.array([gradu[0].redF() , gradu[0].greenF() , gradu[0].blueF() ,gradu[0].alphaF() ])
colors[tempidx] = [
gradu[0].redF(),
gradu[0].greenF(),
gradu[0].blueF(),
gradu[0].alphaF(),
]
# colors[np.logical_and(val > gradu[1], val < gradu[2])] = np.array([gradu[0].redF() , gradu[0].greenF() , gradu[0].blueF() ,gradu[0].alphaF() ])
# self.__graduation[j][0].redF(), self.__graduation[j][0].greenF(), self.__graduation[j][0].blueF() , self.__graduation[j][0].alphaF()
# print colors
colors[colors[:, 0] == np.NAN] = np.array([0.0, 0.0, 0.0, 0.0])
# print colors.shape
# print np.max(np.array(sum(self.__idxfacetodraw,[])))
# colors2 = colors[sum(self.__idxfacetodraw,[])]
if DEBUGTIME:
self.debugtext += [
"param render color end : " + str(round(time.clock() - self.timestart, 3))
]
# print 'render 3'
# first = self.__idxfacetotalcountidx[:-1]
# count = np.diff(self.__idxfacetotalcountidx)
# primcount = len(self.__idxfacetotalcountidx) - 1
if DEBUGTIME:
self.debugtext += [
"param render first count end : "
+ str(round(time.clock() - self.timestart, 3))
]
# print '3bis'
colors2 = np.repeat(colors, 2, axis=0)
# print colors2.shape
# print vtx.shape
if DEBUGTIME:
self.debugtext += [
"param render first colorpointer begin : "
+ str(round(time.clock() - self.timestart, 3))
]
if True:
glEnableClientState(GL_COLOR_ARRAY)
# glColorPointerf(colors2)
glColorPointer(4, GL_FLOAT, 0, colors2)
if DEBUGTIME:
self.debugtext += [
"param render first colorpointer end : "
+ str(round(time.clock() - self.timestart, 3))
]
# colors = colors[trueidx]
# print colors
# print str(first[0:10]) + ' ' +str(first[-10:]) + ' ' + str(len(first))
# print str(count[0:10])+ ' ' +str(count[-10:])+ ' ' + str(len(count))
# print str( primcount )
# print count[0]
# print self.__idxfacetodraw[0:count]
# idxtemp = np.array(sum(self.__idxfacetodraw,[]) )
# print idxtemp
# print 'render 4'
# print 'draw'
glLineWidth(5) # or whatever
glDrawArrays(GL_LINES, 0, len(vtx))
# print 'draw2'
# glDrawArrays(GL_POINTS, 0, len(self.__vtxfacetodraw))
if DEBUGTIME:
self.debugtext += [
"param render first draw array end : "
+ str(round(time.clock() - self.timestart, 3))
]
# glMultiDrawElements(GL_TRIANGLE_FAN, count[0], GL_UNSIGNED_BYTE, self.__idxfacetodraw, 1 )
# glMultiDrawElements(GL_TRIANGLE_FAN, count, GL_UNSIGNED_BYTE, idxtemp, 10 )
# print 'render 5'
glDisableClientState(GL_COLOR_ARRAY)
except Exception as e:
print("face elem rendering " + str(e))
if DEBUGTIME:
self.debugtext += ["param done : " + str(round(time.clock() - self.timestart, 3))]
else:
self.doRenderWork(val, imageSize, center, mapUnitsPerPixel, rotation)
img = self.__pixBuf.toImage()
# self.__pixBuf.doneCurrent()
self.context.doneCurrent()
if DEBUGTIME:
self.debugtext += ["image done : " + str(round(time.clock() - self.timestart, 3))]
if DEBUGTIME:
self.meshlayer.propertiesdialog.textBrowser_2.append(str(self.debugtext))
"""
if False:
tempcopy = img.copy( 0,
0,
imageSize.width(), imageSize.height())
if True:
tempcopy = img.copy( .5*(roundupSz.width()-imageSize.width()),
.5*(roundupSz.height()-imageSize.height()),
imageSize.width(), imageSize.height())
if False:
tempcopy = img.copy( -imageSize.width()/2.,
-imageSize.height()/2.,
imageSize.width(), imageSize.height())
if False:
tempcopy = img.copy( 1.0*(roundupSz.width()-imageSize.width()),
1.0*(roundupSz.height()-imageSize.height()),
imageSize.width(), imageSize.height())
if False:
tempcopy.setDotsPerMeterX(int(self.dpi*39.3701))
tempcopy.setDotsPerMeterY(int(self.dpi*39.3701))
return img.copy( .5*(roundupSz.width()-imageSize.width()),
.5*(roundupSz.height()-imageSize.height()),
imageSize.width(), imageSize.height())
if False:
self.meshlayer.propertiesdialog.textBrowser_2.append(str('Rendering report ***********************************'))
self.meshlayer.propertiesdialog.textBrowser_2.append(str('raw image size px : ') + str(img.size()))
self.meshlayer.propertiesdialog.textBrowser_2.append(str('image size px : ') + str(tempcopy.size()))
self.meshlayer.propertiesdialog.textBrowser_2.append(str('pixbuff size px : ') + str(self.__pixBuf.size()))
self.meshlayer.propertiesdialog.textBrowser_2.append(str('roundupSz size px : ') + str( roundupSz.width() ) +' ' +str(roundupSz.height()) )
self.meshlayer.propertiesdialog.textBrowser_2.append(str('decoup pixbuff : ') + str(.5*(roundupSz.width()-imageSize.width()))
+' ' +str(.5*(roundupSz.height()-imageSize.height()) ) +' ' +str(imageSize.width())
+' ' +str(imageSize.height()) )
self.meshlayer.propertiesdialog.textBrowser_2.append(str('bbox : ') + str(self.rect))
"""
if True:
return img
"""
if False:
return tempcopy
if False:
img = QImage(QSize(imageSize.width()-10, imageSize.height()-10), QImage.Format_ARGB32)
img.fill(Qt.blue)
print('sizepx',self.sizepx)
print(img.size())
return img
"""
except Exception as e:
print(str(e))
return QImage()
| gpl-3.0 |
depet/scikit-learn | sklearn/externals/joblib/parallel.py | 6 | 21763 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
import os
import sys
import warnings
from collections import Sized
from math import sqrt
import functools
import time
import threading
import itertools
try:
import cPickle as pickle
except:
import pickle
# Obtain possible configuration from the environment, assuming 1 (on)
# by default, upon 0 set to None. Should instructively fail if some non
# 0/1 value is set.
multiprocessing = int(os.environ.get('JOBLIB_MULTIPROCESSING', 1)) or None
if multiprocessing:
try:
import multiprocessing
except ImportError:
multiprocessing = None
# 2nd stage: validate that locking is available on the system and
# issue a warning if not
if multiprocessing:
try:
_sem = multiprocessing.Semaphore()
del _sem # cleanup
except (ImportError, OSError) as e:
multiprocessing = None
warnings.warn('%s. joblib will operate in serial mode' % (e,))
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
###############################################################################
# CPU that works also when multiprocessing is not installed (python2.5)
def cpu_count():
""" Return the number of CPUs.
"""
if multiprocessing is None:
return 1
return multiprocessing.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
raise TransportableException(text, e_type)
###############################################################################
def delayed(function):
""" Decorator used to capture the arguments of a function.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateApply(object):
""" A non-delayed apply function.
"""
def __init__(self, func, args, kwargs):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = func(*args, **kwargs)
def get(self):
return self.results
###############################################################################
class CallBack(object):
""" Callback used by parallel: it is used for progress reporting, and
to add data to be processed
"""
def __init__(self, index, parallel):
self.parallel = parallel
self.index = index
def __call__(self, out):
self.parallel.print_progress(self.index)
if self.parallel._iterable:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int
The number of jobs to use for the computation. If -1 all CPUs
are used. If 1 is given, no parallel computing code is used
at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The amount of jobs to be pre-dispatched. Default is 'all',
but it may be memory consuming, for instance if each job
involves a lot of a data.
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, verbose=0, pre_dispatch='all'):
self.verbose = verbose
self.n_jobs = n_jobs
self.pre_dispatch = pre_dispatch
self._pool = None
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it.
self._output = None
self._jobs = list()
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
def dispatch(self, func, args, kwargs):
""" Queue the function for computing, with or without multiprocessing
"""
if self._pool is None:
job = ImmediateApply(func, args, kwargs)
index = len(self._jobs)
if not _verbosity_filter(index, self.verbose):
self._print('Done %3i jobs | elapsed: %s',
(index + 1,
short_format_time(time.time() - self._start_time)
))
self._jobs.append(job)
self.n_dispatched += 1
else:
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
try:
self._lock.acquire()
job = self._pool.apply_async(SafeFunction(func), args,
kwargs, callback=CallBack(self.n_dispatched, self))
self._jobs.append(job)
self.n_dispatched += 1
except AssertionError:
print('[Parallel] Pool seems closed')
finally:
self._lock.release()
def dispatch_next(self):
""" Dispatch more data for parallel processing
"""
self._dispatch_amount += 1
while self._dispatch_amount:
try:
# XXX: possible race condition shuffling the order of
# dispatches in the next two lines.
func, args, kwargs = next(self._iterable)
self.dispatch(func, args, kwargs)
self._dispatch_amount -= 1
except ValueError:
""" Race condition in accessing a generator, we skip,
the dispatch will be done later.
"""
except StopIteration:
self._iterable = None
return
def _print(self, msg, msg_args):
""" Display the message on stout or stderr depending on verbosity
"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self, index):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._iterable:
if _verbosity_filter(index, self.verbose):
return
self._print('Done %3i jobs | elapsed: %s',
(index + 1,
short_format_time(elapsed_time),
))
else:
# We are finished dispatching
queue_length = self.n_dispatched
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (queue_length - index + 1
- self._pre_dispatch_amount)
frequency = (queue_length // self.verbose) + 1
is_last_item = (index + 1 == queue_length)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
queue_length,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._jobs:
# We need to be careful: the job queue can be filling up as
# we empty it
if hasattr(self, '_lock'):
self._lock.acquire()
job = self._jobs.pop(0)
if hasattr(self, '_lock'):
self._lock.release()
try:
self._output.append(job.get())
except tuple(self.exceptions) as exception:
try:
self._aborting = True
self._lock.acquire()
if isinstance(exception,
(KeyboardInterrupt, WorkerInterrupt)):
# We have captured a user interruption, clean up
# everything
if hasattr(self, '_pool'):
self._pool.close()
self._pool.terminate()
# We can now allow subprocesses again
os.environ.pop('__JOBLIB_SPAWNED_PARALLEL__', 0)
raise exception
elif isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (
this_report,
exception.message,
)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
raise exception_type(report)
raise exception
finally:
self._lock.release()
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
if n_jobs < 0 and multiprocessing is not None:
n_jobs = max(multiprocessing.cpu_count() + 1 + n_jobs, 1)
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
if n_jobs is None or multiprocessing is None or n_jobs == 1:
n_jobs = 1
self._pool = None
else:
if multiprocessing.current_process()._daemonic:
# Daemonic processes cannot have children
n_jobs = 1
self._pool = None
warnings.warn(
'Parallel loops cannot be nested, setting n_jobs=1',
stacklevel=2)
else:
already_forked = int(os.environ.get('__JOBLIB_SPAWNED_PARALLEL__', 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing'
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect you main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Set an environment variable to avoid infinite loops
os.environ['__JOBLIB_SPAWNED_PARALLEL__'] = '1'
self._pool = multiprocessing.Pool(n_jobs)
self._lock = threading.Lock()
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
pre_dispatch = self.pre_dispatch
if isinstance(iterable, Sized):
# We are given a sized (an object with len). No need to be lazy.
pre_dispatch = 'all'
if pre_dispatch == 'all' or n_jobs == 1:
self._iterable = None
self._pre_dispatch_amount = 0
else:
self._iterable = iterable
self._dispatch_amount = 0
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
iterable = itertools.islice(iterable, pre_dispatch)
self._start_time = time.time()
self.n_dispatched = 0
try:
for function, args, kwargs in iterable:
self.dispatch(function, args, kwargs)
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output),
len(self._output),
short_format_time(elapsed_time)
))
finally:
if n_jobs > 1:
self._pool.close()
self._pool.join()
os.environ.pop('__JOBLIB_SPAWNED_PARALLEL__', 0)
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
TaxIPP-Life/Til | til/data/DataTil.py | 1 | 26640 | # -*- coding:utf-8 -*-
'''
Created on 22 juil. 2013
Alexis Eidelman
'''
#TODO: duppliquer la table avant le matching parent enfant pour ne pas se trimbaler les valeur de hod dans la duplication.
import numpy as np
import os
import tables
from pandas import merge, notnull, DataFrame, concat, HDFStore
import pkg_resources
import pdb
from til.data.utils.utils import replicate, new_link_with_men, of_name_to_til, new_idmen, count_dup
path_model = os.path.join(
pkg_resources.get_distribution("Til-BaseModel").location,
"til_base_model",
)
# Dictionnaire des variables, cohérent avec les imports du modèle.
# il faut que ce soit à jour. Le premier éléments est la liste des
# entiers, le second celui des floats
variables_til = {'ind': (['agem','sexe','men','quimen','foy','quifoy','tuteur',
'pere','mere','partner','civilstate','findet',
'workstate','xpr','anc'],['sali','rsti','choi', 'tauxprime']),
'men': (['pref'],[]),
'foy': (['vous','men'],[]),
'futur':(['agem','sexe','men','quimen','foy','quifoy',
'pere','mere','partner','civilstate','findet',
'workstate','xpr','anc', 'deces'],['sali','rsti','choi']),
'past': ([],[])}
class DataTil(object):
"""
La classe qui permet de lancer le travail sur les données
La structure de classe n'est peut-être pas nécessaire pour l'instant
"""
def __init__(self):
self.name = None
self.survey_date = None
self.ind = None
self.men = None
self.foy = None
self.futur = None
self.past = None
self.longitudinal = {}
self.child_out_of_house = None
self.seuil= None
#TODO: Faire une fonction qui chexk où on en est, si les précédent on bien été fait, etc.
self.done = []
self.order = []
def load(self):
print "début de l'importation des données"
raise NotImplementedError()
print "fin de l'importation des données"
#def rename_var(self, [pe1e, me1e]):
# TODO : fonction qui renomme les variables pour qu'elles soient au format liam
# period, id, agem, age, sexe, men, quimen, foy quifoy pere, mere, partner, dur_in_couple, civilstate, workstate, sali, findet
def drop_variable(self, dict_to_drop=None, option='white'):
'''
- Si on dict_to_drop is not None, il doit avoir la forme table: [liste de variables],
on retire alors les variable de la liste de la table nommée.
- Sinon, on se sert de cette méthode pour faire la première épuration des données, on
a deux options:
- passer par la liste blanche ce que l'on recommande pour l'instant
- passer par liste noire.
'''
if 'ind' in dict_to_drop.keys():
self.ind = self.ind.drop(dict_to_drop['ind'], axis=1)
if 'men' in dict_to_drop.keys():
self.men = self.men.drop(dict_to_drop['men'], axis=1)
if 'foy' in dict_to_drop.keys():
self.foy = self.foy.drop(dict_to_drop['foy'], axis=1)
def format_initial(self):
raise NotImplementedError()
def enfants(self):
'''
Calcule l'identifiant des parents
'''
raise NotImplementedError()
def table_initial(self):
raise NotImplementedError()
def creation_foy(self):
'''
Créer les déclarations fiscale. Il s'agit principalement de regrouper certains individus entre eux.
Ce n'est qu'ici qu'on s'occupe de verifier que les individus mariés ou pacsé ont le même statut matrimonial
que leur partenaire légal. On ne peut pas le faire dès le début parce qu'on a besoin du numéro du partneroint.
'''
ind = self.ind
men = self.men
survey_date = self.survey_date
print ("Creation des declarations fiscales")
# 0eme étape : création de la variable 'nb_enf' si elle n'existe pas + ajout 'lienpref'
if 'nb_enf' not in ind.columns:
## nb d'enfant
ind.index = ind['id']
nb_enf_mere = ind.groupby('mere').size()
nb_enf_pere = ind.groupby('pere').size()
# On assemble le nombre d'enfants pour les peres et meres en enlevant les manquantes ( = -1)
enf_tot = concat([nb_enf_mere, nb_enf_pere], axis=0)
enf_tot = enf_tot.drop([-1])
ind['nb_enf'] = 0
ind['nb_enf'][enf_tot.index] = enf_tot.values
def _name_var(ind, men):
if 'lienpref' in ind.columns :
ind['quimen'] = ind['lienpref']
ind.loc[ind['quimen'] >1 , 'quimen'] = 2
# a changer avec values quand le probleme d'identifiant et résolu .values
men['pref'] = ind.loc[ind['lienpref']==0,'id'].values
return men, ind
men, ind = _name_var(ind, men)
# 1ere étape : Identification des personnes mariées/pacsées
spouse = (ind['partner'] != -1) & ind['civilstate'].isin([1,5])
print str(sum(spouse)) + " personnes en couples"
# 2eme étape : rôles au sein du foyer fiscal
# selection du partneroint qui va être le vousrant (= déclarant principal du foyer fiscal) : pas d'incidence en théorie
decl = spouse & ( ind['partner'] > ind['id'])
partner = spouse & ( ind['partner'] < ind['id'])
# Identification des personnes à charge (moins de 21 ans sauf si étudiant, moins de 25 ans )
# attention, on ne peut être à charge que si on n'est pas soi-même parent
pac_condition = (ind['civilstate'] == 2) & ( ((ind['agem'] < 12*25) & \
(ind['workstate'] == 11)) | (ind['agem'] < 12*21) ) &(ind['nb_enf'] == 0)
pac = ((ind['pere'] != -1) | (ind['mere'] != -1)) & pac_condition
print str(sum(pac)) + ' personnes prises en charge'
# Identifiants associés
ind['quifoy'] = 0
ind.loc[partner,'quifoy'] = 1
# Comprend les enfants n'ayant pas de parents spécifiés (à terme rattachés au foyer 0= DASS)
ind.loc[pac,'quifoy'] = 2
ind.loc[(ind['men'] == 0) & (ind['quifoy'] == 0), 'quifoy'] = 2
print "Nombres de foyers fiscaux", sum(ind['quifoy'] == 0), ", dont couple", sum(ind['quifoy'] == 1)
# 3eme étape : attribution des identifiants des foyers fiscaux
ind['foy'] = -1
nb_foy = sum(ind['quifoy'] == 0)
print "Le nombre de foyers créés est : " + str(nb_foy)
# Rq: correspond au même décalage que pour les ménages (10premiers : institutions)
ind.loc[ind['quifoy'] == 0, 'foy'] = range(10, nb_foy +10)
# 4eme étape : Rattachement des autres membres du ménage
# (a) - Rattachements des partneroints des personnes en couples
partner = ind.loc[(ind['partner'] != -1) & (ind['civilstate'].isin([1,5]))& (ind['quifoy'] == 0), ['partner','foy']]
ind['foy'][partner['partner'].values] = partner['foy'].values
# (b) - Rattachements de leurs enfants (en priorité sur la décla du père)
for parent in ['pere', 'mere']:
pac_par = ind.loc[ (ind['quifoy'] == 2) & (ind[parent] != -1) & (ind['foy'] == -1), ['id', parent]].astype(int)
ind['foy'][pac_par['id'].values] = ind['foy'][pac_par[parent].values]
print str(len(pac_par)) + " enfants sur la déclaration de leur " + parent
# Enfants de la Dass -> foyer fiscal 'collectif'
ind.loc[ind['men']==0, 'foy'] = 0
# 5eme étape : création de la table foy
vous = (ind['quifoy'] == 0) & (ind['foy'] > 9)
foy = ind.loc[vous,['foy', 'id', 'men']]
foy = foy.rename(columns={'foy': 'id', 'id': 'vous'})
# Etape propre à l'enquete Patrimoine
impots = ['zcsgcrds','zfoncier','zimpot', 'zpenaliv','zpenalir','zpsocm','zrevfin']
var_to_declar = impots + ['pond', 'id', 'pref']
foy_men = men.loc[men['pref'].isin(foy['vous']), var_to_declar].fillna(0)
foy_men = foy_men.rename(columns = {'id' : 'men'})
# hypothèse réparartition des élements à égalité entre les déclarations : discutable
nb_foy_men = foy.loc[foy['men'].isin(foy_men['men'].values)].groupby('men').size()
if (nb_foy_men.max() >1) & (foy_men ['zimpot'].max() >0) :
assert len(nb_foy_men) == len(foy_men)
for var in impots :
foy_men[var] = foy_men[var] / nb_foy_men
foy = merge(foy, foy_men, on = 'men', how ='left', right_index=True)
foy['period'] = survey_date
# Ajouts des 'communautés' dans la table foyer
for k in [0]:
if sum(ind['foy'] == k) !=0 :
to_add = DataFrame([np.zeros(len(foy.columns))], columns = foy.columns)
to_add['id'] = k
to_add['vous'] = -1
to_add['period'] = survey_date
foy = concat([foy, to_add], axis = 0, ignore_index=True)
foy.index = foy['id']
assert sum(ind['foy']==-1) == 0
print 'Taille de la table foyers :', len(foy)
#### fin de declar
self.ind = ind
self.foy = foy
print("fin de la creation des declarations")
def creation_child_out_of_house(self):
'''
Travail sur les liens parents-enfants.
On regarde d'abord les variables utiles pour le matching
'''
raise NotImplementedError()
def matching_par_enf(self):
'''
Matching des parents et des enfants hors du domicile
'''
raise NotImplementedError()
def match_couple_hdom(self):
'''
Certaines personnes se déclarent en couple avec quelqu'un ne vivant pas au domicile, on les reconstruit ici.
Cette étape peut s'assimiler à de la fermeture de l'échantillon.
On séléctionne les individus qui se déclare en couple avec quelqu'un hors du domicile.
On match mariés,pacsé d'un côté et sans contrat de l'autre. Dit autrement, si on ne trouve pas de partenaire à une personne mariée ou pacsé on change son statut de couple.
Comme pour les liens parents-enfants, on néglige ici la possibilité que le partneroint soit hors champ (étrange, prison, casernes, etc).
Calcul aussi la variable ind['nb_enf']
'''
raise NotImplementedError()
def expand_data(self, seuil=150, nb_ligne=None):
#TODO: add future and past
'''
Note: ne doit pas tourner après lien parent_enfant
Cependant child_out_of_house doit déjà avoir été créé car on s'en sert pour la réplication
'''
self.seuil = seuil
if seuil != 0 and nb_ligne is not None:
raise Exception("On ne peut pas à la fois avoir un nombre de ligne désiré et une valeur" \
"qui va determiner le nombre de ligne")
#TODO: on peut prendre le min des deux quand même...
men = self.men
ind = self.ind
foy = self.foy
par = self.child_out_of_house
longit = self.longitudinal
if par is None:
print("Notez qu'il est plus malin d'étendre l'échantillon après avoir fait les tables " \
"child_out_of_house plutôt que de les faire à partir des tables déjà étendue")
if foy is None:
print("C'est en principe plus efficace d'étendre après la création de la table foyer" \
" mais si on veut rattacher les enfants (par exemple de 22 ans) qui ne vivent pas au" \
" domicile des parents sur leur déclaration, il faut faire l'extension et la " \
" fermeture de l'échantillon d'abord. Pareil pour les couples. ")
min_pond = min(men['pond'])
target_pond = float(max(min_pond, seuil))
# 1 - Réhaussement des pondérations inférieures à la pondération cible
men['pond'][men['pond']<target_pond] = target_pond
# 2 - Calcul du nombre de réplications à effectuer
men['nb_rep'] = men['pond'].div(target_pond)
men['nb_rep'] = men['nb_rep'].round()
men['nb_rep'] = men['nb_rep'].astype(int)
# 3- Nouvelles pondérations (qui seront celles associées aux individus après réplication)
men['pond'] = men['pond'].div(men['nb_rep'])
# TO DO: réflechir pondération des personnes en collectivité pour l'instant = 1
men.loc[men['id'] < 10, 'pond'] = 1
men_exp = replicate(men)
# pour conserver les 10 premiers ménages = collectivités
men_exp['id'] = new_idmen(men_exp, 'id')
if foy is not None:
foy = merge(men[['id','nb_rep']], foy, left_on='id', right_on='men', how='right', suffixes=('_men',''))
foy_exp= replicate(foy)
foy_exp['men'] = new_link_with_men(foy, men_exp, 'men')
else:
foy_exp = None
if par is not None:
par = merge(men[['id','nb_rep']], par, left_on = 'id', right_on='men', how='inner', suffixes=('_men',''))
par_exp = replicate(par)
par_exp['men'] = new_link_with_men(par, men_exp, 'men')
else:
par_exp = None
ind = merge(men[['id','nb_rep']].rename(columns = {'id': 'men'}), ind, on='men', how='right', suffixes = ('_men',''))
ind_exp = replicate(ind)
# lien indiv - entités supérieures
ind_exp['men'] = new_link_with_men(ind, men_exp, 'men')
ind_exp['men'] += 10
# liens entre individus
tableB = ind_exp[['id_rep','id_ini']]
tableB['id_index'] = tableB.index
# ind_exp = ind_exp.drop(['pere', 'mere','partner'], axis=1)
print("debut travail sur identifiant")
def _align_link(link_name, table_exp):
tab = table_exp[[link_name, 'id_rep']].reset_index()
tab = tab.merge(tableB,left_on=[link_name,'id_rep'], right_on=['id_ini','id_rep'], how='inner').set_index('index')
tab = tab.drop([link_name], axis=1).rename(columns={'id_index': link_name})
table_exp[link_name][tab.index.values] = tab[link_name].values
# table_exp.merge(tab, left_index=True,right_index=True, how='left', copy=False)
return table_exp
ind_exp = _align_link('pere', ind_exp)
ind_exp = _align_link('mere', ind_exp)
ind_exp = _align_link('partner', ind_exp)
#TODO: add _align_link with 'pere' and 'mere' in child_out_ouf_house in order to swap expand
# and creation_child_out_ouf_house, in the running order
if foy is not None:
#le plus simple est de repartir des quifoy, cela change du men
# la vérité c'est que ça ne marche pas avec ind_exp['foy'] = new_link_with_men(ind, foy_exp, 'foy')
vous = (ind['quifoy'] == 0)
partner = (ind['quifoy'] == 1)
pac = (ind['quifoy'] == 2)
ind.loc[vous,'foy']= range(sum(vous))
ind.loc[partner,'foy'] = ind.ix[ind['partner'][partner],['foy']]
pac_pere = pac & notnull(ind['pere'])
ind.loc[pac_pere,'foy'] = ind.loc[ind.loc[pac_pere,'pere'],['foy']]
pac_mere = pac & ~notnull(ind['foy'])
ind.loc[pac_mere,'foy'] = ind.loc[ind.loc[pac_mere,'mere'],['foy']]
for name, table in longit.iteritems():
table = table.merge(ind_exp[['id_ini', 'id']], right_on='id', left_index=True, how='right')
table.set_index('id', inplace=True)
table.drop('id_ini', axis=1, inplace=True)
self.longitudinal[name] = table
assert sum(ind['id']==-1) == 0
self.child_out_of_house = par
self.men = men_exp
self.ind = ind_exp
self.foy = foy_exp
self.drop_variable({'men':['id_rep','nb_rep'], 'ind':['id_rep']})
def format_to_liam(self):
'''
On met ici les variables avec les bons codes pour achever le travail de DataTil
On crée aussi les variables utiles pour la simulation
'''
men = self.men
ind = self.ind
foy = self.foy
futur = self.futur
past = self.past
longit = self.longitudinal
ind_men = ind.groupby('men')
ind = ind.set_index('men')
ind['nb_men'] = ind_men.size().astype(np.int)
ind = ind.reset_index()
ind_foy = ind.groupby('foy')
ind = ind.set_index('foy')
ind['nb_foy'] = ind_foy.size().astype(np.int)
ind = ind.reset_index()
if 'lienpref' in ind.columns :
self.drop_variable({'ind':['lienpref','anais','mnais']})
for name in ['ind', 'foy', 'men', 'futur', 'past']:
table = eval(name)
if table is not None:
vars_int, vars_float = variables_til[name]
for var in vars_int + ['id','period']:
if var not in table.columns:
table[var] = -1
table = table.fillna(-1)
table[var] = table[var].astype(np.int32)
for var in vars_float + ['pond']:
if var not in table.columns:
if var=='pond':
table[var] = 1
else:
table[var] = -1
table = table.fillna(-1)
table[var] = table[var].astype(np.float64)
table = table.sort_index(by=['period','id'])
setattr(self, name, table)
# # In case we need to Add one to each link because liam need no 0 in index
# if ind['id'].min() == 0:
# links = ['id','pere','mere','partner','foy','men','pref','vous']
# for table in [ind, men, foy, futur, past]:
# if table is not None:
# vars_link = [x for x in table.columns if x in links]
# table[vars_link] += 1
# table[vars_link].replace(0,-1, inplace=True)
def _check_links(self, ind):
if ind is None:
ind = self.ind
to_check = ind[['id', 'agem', 'sexe', 'men', 'partner', 'pere', 'mere']]
# age parent
tab = to_check.copy()
for lien in ['partner', 'pere', 'mere']:
tab = tab.merge(to_check, left_on=lien, right_on='id', suffixes=('', '_' + lien), how='left', sort=False)
tab.index = tab['id']
diff_age_pere = (tab['agem_pere'] - tab['agem'])
diff_age_mere = (tab['agem_mere'] - tab['agem'])
try:
assert diff_age_pere.min() > 12*14
assert diff_age_mere.min() > 12*12.4
# pas de probleme du partneroint
assert sum(tab['id_pere'] == tab['id_partner']) == 0
assert sum(tab['id_mere'] == tab['id_partner']) == 0
assert sum(tab['id_mere'] == tab['id_pere']) == 0
assert sum(tab['sexe_mere'] == tab['sexe_pere']) == 0
except:
pdb.set_trace()
test = diff_age_pere < 0
tab[test]
# on va plus loin sur les partneroints pour éviter les frères et soeurs :
tab_partner = tab.loc[tab['partner'] > -1].copy()
tab_partner.replace(-1, np.nan, inplace=True)
try:
assert all((tab_partner['id'] == tab_partner['partner_partner'])) # Les couples sont réciproques
assert sum(tab_partner['mere'] == tab_partner['mere_partner']) == 0 # pas de mariage entre frere et soeur
assert sum(tab_partner['pere'] == tab_partner['pere_partner']) == 0
except:
test = tab_partner['pere'] == tab_partner['pere_partner']
pdb.set_trace()
def final_check(self):
''' Les checks sont censés vérifiés toutes les conditions
que doit vérifier une base pour tourner sur Til '''
men = self.men
ind = self.ind
foy = self.foy
futur = self.futur
longit = self.longitudinal
assert all(ind['workstate'].isin(range(1,12)))
assert all(ind['civilstate'].isin(range(1,6)))
# Foyers et ménages bien attribués
assert sum((ind['foy'] == -1)) == 0
assert sum((ind['men'] == -1)) == 0
print "Nombre de personnes dans ménages ordinaires : ", sum(ind['men']>9)
print "Nombre de personnes vivant au sein de collectivités : ", sum(ind['men']<10)
## On vérifie qu'on a un et un seul qui = 0 et au plus un qui = 1 pour foy et men
for ent in ['men', 'foy']:
ind['qui0'] = (ind['qui' + ent] == 0).astype(int)
ind['qui1'] = (ind['qui' + ent] == 1).astype(int)
ind0 = ind[ind[ent] > 9].groupby(ent) # on exclut les collectivités
# on vérifie qu'on a un et un seul qui = 0
assert ind0['qui0'].sum().max() == 1
assert ind0['qui0'].sum().min() == 1
# on vérifie qu'on a au plus un qui = 1
assert ind0['qui1'].sum().max() == 1
# on vérifie que les noms d'identité sont bien dans la table entity et réciproquement
list_id = eval(ent)['id']
assert ind[ent].isin(list_id).all()
assert list_id.isin(ind[ent]).all()
# si on est un 2
# si on est quimen = 1 alors on a son partneroint avec soi
qui1 = ind['qui' + ent]==1
partner = ind.loc[qui1, 'partner'].values
partner_ent = ind.iloc[partner]
partner_ent = partner_ent[ent]
qui1_ent = ind.loc[qui1, ent]
assert (qui1_ent == partner_ent).all()
# Table futur bien construite
if futur is not None:
# -> On vérifie que persone ne nait pas dans le futur tout en étant présent dans les données intiales
id_ini = ind[['id']]
# 'naiss' != -1 <-> naissance
id_futur = futur.loc[(futur['naiss']!=-1) , ['id']]
id_ok = concat([id_ini, id_futur], axis = 0)
assert count_dup(id_ok,'id') == 0
assert len(futur[(futur['naiss']<= self.survey_year) & (futur['naiss']!= -1) ])== 0
if len(futur.loc[~futur['id'].isin(id_ok['id']), 'id']) != 0:
pb_id = futur.loc[~(futur['id'].isin(id_ok['id'])), :].drop_duplicates('id')
print ('Nombre identifants problématiques dans la table futur: ', len(pb_id))
print ("Nombre de personnes présentes dans la base "
+ str( len(id_ok)) + " ("+ str( len(id_ini))
+ " initialement et " + str( len(id_futur)) + " qui naissent ensuite)")
for table in [ind, men, foy, futur]:
if table is not None:
test_month = table['period'] % 100
assert all(test_month.isin(range(1, 13)))
test_year = table['period'] // 100
assert all(test_year.isin(range(1900, 2100)))
for name, table in longit.iteritems():
cols = table.columns
cols_year = [(col // 100 in range(1900, 2100)) for col in cols]
cols_month = [(col % 100 in range(1, 13)) for col in cols]
assert all(cols_year)
assert all(cols_month)
# check reciprocity:
assert all(ind.loc[ind['civilstate'].isin([1,5]), 'partner'] > -1)
rec = ind.loc[ind['partner'] != -1, ['id','partner','civilstate']]
rec = rec.merge(rec, left_on='id', right_on='partner', suffixes=('','_c'))
# 1- check reciprocity of partner
assert all(rec['partner_c'] == rec['id'])
assert all(rec.loc[rec['civilstate'].isin([1,5]), 'civilstate'] ==
rec.loc[rec['civilstate'].isin([1,5]), 'civilstate_c'])
self._check_links(ind)
def _output_name(self, extension='.h5'):
if self.seuil is None:
name = self.name + extension
else:
name = self.name + '_' + str(self.seuil) + extension
return os.path.join(path_model, name)
def store_to_liam(self):
'''
Sauvegarde des données au format utilisé ensuite par le modèle Til
Séléctionne les variables appelée par Til derrière
Appelle des fonctions de Liam2
'''
path = self._output_name()
h5file = tables.openFile(path, mode="w")
ent_node = h5file.createGroup("/", "entities", "Entities")
for ent_name in ['ind','foy','men','futur','past']:
entity = eval('self.'+ ent_name)
if entity is not None:
entity = entity.fillna(-1)
try:
ent_table = entity.to_records(index=False)
except:
pdb.set_trace()
dtypes = ent_table.dtype
final_name = of_name_to_til[ent_name]
try:
table = h5file.createTable(ent_node, final_name, dtypes, title="%s table" % final_name)
table.append(ent_table)
except:
pdb.set_trace()
table.flush()
if ent_name == 'men':
entity = entity.loc[entity['id']>-1]
ent_table2 = entity[['pond','id','period']].to_records(index=False)
dtypes2 = ent_table2.dtype
table = h5file.createTable(ent_node, 'companies', dtypes2, title="'companies table")
table.append(ent_table2)
table.flush()
if ent_name == 'ind':
ent_table2 = entity[['agem','sexe','pere','mere','id','findet','period']].to_records(index=False)
dtypes2 = ent_table2.dtype
table = h5file.createTable(ent_node, 'register', dtypes2, title="register table")
table.append(ent_table2)
table.flush()
h5file.close()
# 3 - table longitudinal
# Note: on conserve le format pandas ici
store = HDFStore(path)
for varname, table in self.longitudinal.iteritems():
table['id'] = table.index
store.append('longitudinal/' + varname, table)
store.close()
def store(self):
path = self._output_name()
self.men.to_hdf(path, 'entites/men')
self.ind.to_hdf(path, 'entites/ind')
self.foy.to_hdf(path, 'entites/foy')
def run_all(self):
for method in self.methods_order:
eval('self.'+ method + '()')
| gpl-3.0 |
bkomboz/pymltk | pymltk/exploration.py | 1 | 6123 | # imports
import numpy as np
import pandas as pd
import dask.dataframe as dd
import matplotlib.pyplot as plt
from . import utils
# functions
def summarize(data=None, features=None, size=5,
digits=3, as_df=False, verbose=True, **kwargs):
"""
Summarize features of a given pandas/dask dataframe.
Given either a pandas or a dask dataframe, summarize will create a
descriptive summary of the specified features and prints this summary
(or if requested returns it as a dataframe).
Args:
data: A pandas or dask dataframe object.
features: String, list of strings, integers or booleans.
The features (or indices of those) to summarize.
size: Int, number of samples to draw from each feature.
digits: Int, number of digits numeric values should be rounded to.
as_df: Boolean, whether to return the summary as a dataframe
instead of printing it (the default).
verbose: Boolean, currently not used.
**kwargs: Currently not used.
Returns:
Either a printed summary or a dataframe version of it.
Raises:
ValueError: If no data is specified. If features are not specified
as string, list of strings, integers or booleans. If the specified
feature names are not valid column names or the integers or
booleans don't map to valid columns of the specified data.
If size and digits are not specified as integers.
"""
# check input
if data is None or (not isinstance(data, pd.core.frame.DataFrame)
and not isinstance(data, dd.core.DataFrame)):
raise ValueError('Data must be specified as \
pandas.core.frame.DataFrame or dask.core.DataFrame.')
features_list = data.columns.tolist()
if features is not None and (not isinstance(features, str) and
not isinstance(features, list) and
not isinstance(features, list)):
raise ValueError('Features must be specified as string or\
list of strings, integers or booleans.')
if isinstance(features, list):
if ((isinstance(features[0], str) and
len(set(features) - set(features_list))) or
(isinstance(features[0], int) and
(max(features) > len(features_list or min(features) < 0))) or
(isinstance(features[0], bool) and
len(features) != len(features_list))):
raise ValueError('Not all of the specified features\
map to a valid feature name.')
if not isinstance(size, int):
raise ValueError('Size has to be specified as integer.')
if not isinstance(digits, int):
raise ValueError('Digits has to be specified as integer.')
# setup data and features to summarize
if features is not None:
if isinstance(features, str):
features_list = [features]
elif isinstance(features[0], str):
features_list = features
elif isinstance(features[0], bool):
features_list = [name for index, name in enumerate(
features_list) if features[index]]
else:
features_list = [name for index, name in enumerate(
features_list) if index in features]
subdata = data[features_list]
# general summary statistics
dtypes = subdata.dtypes
nan_counts = ut._compute(subdata.isnull().sum())
total_counts = ut._compute(subdata.count()) + nan_counts
percents_nan = nan_counts/total_counts
sample = {feature: np.random.choice(subdata[feature], size, replace=False)
for feature in features_list}
# object/string/category summary statistics
string_features_list = dtypes[
np.logical_or(dtypes == 'category', dtypes == 'object')].index.tolist()
if string_features_list:
nuniques = {feature: ut._compute(data[feature].nunique())
for feature in string_features_list}
percent_uniques = pd.Series(nuniques)/total_counts[
string_features_list]
modes = {feature: ut.mode(data[feature])
for feature in string_features_list}
# integer/numeric summary statistics
numeric_features_list = dtypes[
np.logical_or(dtypes == 'int64', dtypes == 'float64')].index.tolist()
if numeric_features_list:
quantiles = ut._compute(data[numeric_features_list].dropna().quantile(
[0, 0.25, 0.5, 0.75, 1]))
means = ut._compute(data[numeric_features_list].mean())
vars = ut._compute(data[numeric_features_list].var())
#mads = {feature: np.mean(np.abs(data[feature] -
# quantiles.loc[0.5, feature]))
# for feature in numeric_features_list}
#skewness = {feature: np.mean(np.power((
# data[feature] - means[feature]), 3))/(np.power(
# total_counts[feature]/(total_counts[feature] - 1) * np.mean(
# np.power((data[feature] - means[feature]), 2)), 3/2))
# for feature in numeric_features_list}
# merge together into dataframe
cols = [dtypes, nan_counts, np.round(percents_nan, digits)]
names = ['name', 'dtype', '#nan', '%nan']
if string_features_list:
cols.extend([pd.Series(nuniques), pd.Series(percent_uniques),
pd.Series(modes)])
names.extend(['#uniq', '%uniq', 'mode'])
if numeric_features_list:
cols.extend([np.round(np.transpose(quantiles), digits),
np.round(means, digits), np.round(vars, digits)])
#np.round(pd.Series(mads), digits)])
# np.round(pd.Series(skewness), digits)])
names.extend(['min', 'Q25', 'med', 'Q75', 'max','mean', 'var'])
cols.append(pd.Series(sample))
names.append('sample')
summary = pd.concat(cols, axis=1)
summary = summary.loc[features_list, :]
summary = summary.reset_index()
summary.columns = names
# return or print
if as_df:
return summary
else:
print(summary)
| apache-2.0 |
kernc/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 22 | 25505 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unknown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels(): # Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear",
"chi2", "additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {'gamma': 0.1}
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_laplacian_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = laplacian_kernel(X, X)
# the diagonal elements of a laplacian kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(5))
# off-diagonal elements are < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
lcharleux/numerical_analysis | doc/Optimisation/Example_code/brachi2d.py | 1 | 1803 | #------------------------------------------------------------------------
# RECHERCHE DU CHEMIN LE PLUS RAPIDE ENTRE 2 POINTS A ET B
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# PACKAGES
from scipy import optimize as opt # Optimize
import numpy as np # Numpy
import matplotlib.pyplot as plt # Pyplot
from matplotlib import cm # Colormaps
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# POSITION DES POINTS ET DONNEES PHYSIQUES
xa, xb = 0., 1.
ya, yb = 1., 0.
m = 1. # masse en kg
g = 10. # gravite en ms**-2
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# CALCUL DU TEMPS DE PARCOURS
def temps(Y):
# On calcule l'energie potentielle en supposant qu'elle est nulle en A
Ep = m * g * (Y - Y[0])
# On calcule l'energie cinetique
Ec = - Ep
# On calcule la vitesse
V = (2. / m * Ec) **.5
# On calcule la vitesse moyenne sur chaque element
Ve = (V[1:] + V[:-1]) / 2.
# On calcule le pas en X:
dx = X[1] - X[0]
# On calcule la longueur de chaque element
Le = ( ( Y[1:] - Y[:-1] )**2 + dx**2)**.5
# On calcule le temps de parcours par element
te = Le / Ve
# On calcule le temps de parcours total
t = te.sum()
return t
def add_AB(Yc):
Y = np.zeros([len(Yc) + 2])
Y[1:-1] = Yc
Y[0], Y[-1] = ya, yb
return Y
def temps2(Yc):
return temps(add_AB(Yc))
#------------------------------------------------------------------------
def func(Yc):
Y = add_AB(Yc)
return temps(Y)
def brute_force(func, limits, samples):
| gpl-2.0 |
appapantula/scikit-learn | sklearn/preprocessing/tests/test_data.py | 113 | 38432 | import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.utils.validation import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
"""Check MaxAbsScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# sparse data
X_csr = sparse.csr_matrix(X)
X_trans = scaler.fit_transform(X_csr)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans.A, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv.A)
def test_maxabs_scaler_large_negative_value():
"""Check MaxAbsScaler on toy data with a large negative value"""
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
| bsd-3-clause |
smarden1/airflow | setup.py | 2 | 1834 | from setuptools import setup, find_packages
# Kept manually in sync with airflow.__version__
version = '1.2.0'
doc = [
'sphinx>=1.2.3',
'sphinx-argparse>=0.1.13',
'sphinx-rtd-theme>=0.1.6',
'Sphinx-PyPI-upload>=0.2.1'
]
hive = [
'hive-thrift-py>=0.0.1',
'pyhive>=0.1.3',
'pyhs2>=0.6.0',
]
mysql = ['mysql-python>=1.2.5']
postgres = ['psycopg2>=2.6']
optional = ['librabbitmq>=1.6.1']
samba = ['pysmbclient>=0.1.3']
s3 = ['boto>=2.36.0']
all_dbs = postgres + mysql + hive
devel = all_dbs + doc + samba + s3 + ['nose']
setup(
name='airflow',
description='Programmatically author, schedule and monitor data pipelines',
version=version,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'celery>=3.1.17',
'chartkick>=0.4.2',
'dill>=0.2.2',
'flask>=0.10.1',
'flask-admin>=1.0.9',
'flask-cache>=0.13.1',
'flask-login>=0.2.11',
'flower>=0.7.3',
'jinja2>=2.7.3',
'markdown>=2.5.2',
'pandas>=0.15.2',
'pygments>=2.0.1',
'python-dateutil>=2.3',
'requests>=2.5.1',
'setproctitle>=1.1.8',
'snakebite>=2.4.13',
'sqlalchemy>=0.9.8',
'statsd>=3.0.1',
'thrift>=0.9.2',
'tornado>=4.0.2',
],
extras_require={
'all': devel + optional,
'all_dbs': all_dbs,
'doc': doc,
'devel': devel,
'hive': hive,
'mysql': mysql,
'postgres': postgres,
's3': s3,
'samba': samba,
},
author='Maxime Beauchemin',
author_email='[email protected]',
url='https://github.com/airbnb/airflow',
download_url=(
'https://github.com/airbnb/airflow/tarball/' + version),
)
| apache-2.0 |
voxlol/scikit-learn | examples/exercises/plot_iris_exercise.py | 323 | 1602 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
keflavich/scikit-image | doc/examples/plot_swirl.py | 18 | 2581 | """
=====
Swirl
=====
Image swirling is a non-linear image deformation that creates a whirlpool
effect. This example describes the implementation of this transform in
``skimage``, as well as the underlying warp mechanism.
Image warping
-------------
When applying a geometric transformation on an image, we typically make use of
a reverse mapping, i.e., for each pixel in the output image, we compute its
corresponding position in the input. The reason is that, if we were to do it
the other way around (map each input pixel to its new output position), some
pixels in the output may be left empty. On the other hand, each output
coordinate has exactly one corresponding location in (or outside) the input
image, and even if that position is non-integer, we may use interpolation to
compute the corresponding image value.
Performing a reverse mapping
----------------------------
To perform a geometric warp in ``skimage``, you simply need to provide the
reverse mapping to the ``skimage.transform.warp`` function. E.g., consider the
case where we would like to shift an image 50 pixels to the left. The reverse
mapping for such a shift would be::
def shift_left(xy):
xy[:, 0] += 50
return xy
The corresponding call to warp is::
from skimage.transform import warp
warp(image, shift_left)
The swirl transformation
------------------------
Consider the coordinate :math:`(x, y)` in the output image. The reverse
mapping for the swirl transformation first computes, relative to a center
:math:`(x_0, y_0)`, its polar coordinates,
.. math::
\\theta = \\arctan(y/x)
\\rho = \sqrt{(x - x_0)^2 + (y - y_0)^2},
and then transforms them according to
.. math::
r = \ln(2) \, \mathtt{radius} / 5
\phi = \mathtt{rotation}
s = \mathtt{strength}
\\theta' = \phi + s \, e^{-\\rho / r + \\theta}
where ``strength`` is a parameter for the amount of swirl, ``radius`` indicates
the swirl extent in pixels, and ``rotation`` adds a rotation angle. The
transformation of ``radius`` into :math:`r` is to ensure that the
transformation decays to :math:`\\approx 1/1000^{\mathsf{th}}` within the
specified radius.
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.transform import swirl
image = data.checkerboard()
swirled = swirl(image, rotation=0, strength=10, radius=120, order=2)
fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(8, 3))
ax0.imshow(image, cmap=plt.cm.gray, interpolation='none')
ax0.axis('off')
ax1.imshow(swirled, cmap=plt.cm.gray, interpolation='none')
ax1.axis('off')
plt.show()
| bsd-3-clause |
mugizico/scikit-learn | sklearn/tests/test_metaestimators.py | 226 | 4954 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
GoogleCloudPlatform/professional-services | tools/bigquery_user_info_updater_tool/bigquery_user_info_updater/tests/test_nested_user_info_updater.py | 4 | 9590 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pandas as pd
from google.api_core import exceptions
from google.cloud import bigquery
from bigquery_user_info_updater.updater_tools import query_creator
from bigquery_user_info_updater.updater_tools import user_info_updater
from bigquery_user_info_updater.updater_tools import user_schema
class TestNestedUserInfoUpdater(object):
"""Tests functionality of scripts.UserInfoUpdater.
Attributes:
bq_client(google.cloud.bigquery.client.Client): Client to hold
configurations needed for BigQuery API requests.
dataset_id(str): ID of the dataset that holds the test table.
dataset_ref(google.cloud.bigquery.dataset.DatasetReference): Pointer
to the dataset that holds the test table.
dataset(google.cloud.bigquery.dataset.Dataset): Dataset that holds the
test table.
user_info_updates_id(str): The ID of the historical table that holds
all rows and updates for all users.
temp_user_info_updates_id(str): ID of the intermediary temp table.
user_info_final_id(str): The ID of table that holds one up-to-date row
per user.
"""
def setup(self):
"""Sets up resources for tests.
"""
self.bq_client = bigquery.Client()
self.dataset_id = 'user_updater_test'
self.dataset_ref = self.bq_client.dataset(self.dataset_id)
try:
self.dataset = self.bq_client.get_dataset(self.dataset_ref)
except exceptions.NotFound:
dataset = bigquery.Dataset(self.dataset_ref)
self.dataset = self.bq_client.create_dataset(dataset)
schema_path = 'test_schemas/test_nested_schema.json'
abs_path = os.path.abspath(os.path.dirname(__file__))
self.schema_path = os.path.join(abs_path, schema_path)
schema = user_schema.UserSchema(self.schema_path)
self.bq_schema = schema.translate_json_schema()
self.user_info_updates_id = 'test_nested_user_info_updates'
self.user_info_updates_table = self.create_table(
self.user_info_updates_id)
self.temp_user_info_updates_id = 'test_nested_temp_user_info_updates'
self.temp_user_info_updates_table = self.create_table(
self.temp_user_info_updates_id)
self.user_info_final_id = 'test_nested_user_info_final'
self.user_info_final_table = self.create_table(self.user_info_final_id)
def create_table(self, table_id):
"""Creates test user tables.
Args:
table_id(str): ID of the user table to be created.
Returns:
The created table (google.cloud.bigquery.table.Table).
"""
table_ref = self.dataset_ref.table(table_id)
table = bigquery.Table(table_ref, schema=self.bq_schema)
try:
self.bq_client.delete_table(table)
return self.bq_client.create_table(table)
except exceptions.NotFound:
return self.bq_client.create_table(table)
def load_json_to_bq(self, filename, table):
job_config = bigquery.LoadJobConfig()
job_config.source_format = bigquery.SourceFormat.NEWLINE_DELIMITED_JSON
abs_path = os.path.abspath(os.path.dirname(__file__))
data_file = os.path.join(abs_path, filename)
with open(data_file, 'rb') as file_obj:
load_job = self.bq_client.load_table_from_file(
file_obj=file_obj, destination=table, job_config=job_config)
return load_job.result()
def test_nested_data_user_update(self, project_id):
"""Tests UserInfoUpdater ability to run an update on nested user data.
Args:
project_id(str): ID of the project that holds the test BQ tables.
Returns:
True if test passes, else False.
"""
if not project_id:
raise Exception(
'Test needs project_id to pass. '
'Add --project_id={your project ID} to test command')
# Load a set of user updates to nested user_info_updates table.
self.load_json_to_bq(
filename='test_data/nested_data/user_info_updates_data.json',
table=self.dataset_ref.table(self.user_info_updates_id))
# Load data into the temp table and final table to simulate a previous
# run.
self.load_json_to_bq(filename='test_data/nested_data/'
'temp_user_info_updates_initial.json',
table=self.dataset_ref.table(
self.temp_user_info_updates_id))
self.load_json_to_bq(
filename='test_data/nested_data/user_info_final_initial.json',
table=self.dataset_ref.table(self.user_info_final_id))
# Run the UserInfoUpdater on the second set of updates.
test_updater = user_info_updater.UserInfoUpdater(
project_id, self.dataset_id, self.user_info_updates_id,
self.temp_user_info_updates_id, self.user_info_final_id)
update_query_creator = query_creator.QueryCreator(
schema_path=self.schema_path,
user_id_field_name='userId',
ingest_timestamp_field_name='ingestTimestamp',
project_id=project_id,
dataset_id=self.dataset_id,
updates_table_id=self.user_info_updates_id,
temp_updates_table_id=self.temp_user_info_updates_id,
final_table_id=self.user_info_final_id)
gather_updates_query = update_query_creator.create_gather_updates_query(
)
test_updater.gather_updates(gather_updates_query)
merge_udpates_query = update_query_creator.create_merge_query()
test_updater.merge_updates(merge_udpates_query)
# Query the temp table to test that the gather_updates() function worked
temp_table_query_config = bigquery.QueryJobConfig()
temp_table_query_config.use_legacy_sql = False
temp_table_query = self.bq_client.query(
query='SELECT * FROM `{0:s}.{1:s}.{2:s}`'.format(
project_id, self.dataset_id, self.temp_user_info_updates_id),
job_config=temp_table_query_config,
location='US')
temp_table_query.result()
temp_table_results_df = temp_table_query.to_dataframe() \
.sort_values(by=['userId']).reset_index(drop=True)
# Gather expected results for comparison
abs_path = os.path.abspath(os.path.dirname(__file__))
expected_temp_data_file = os.path.join(
abs_path,
'test_data/nested_data/temp_user_info_updates_expected.json')
expected_temp_table_df = pd.read_json(expected_temp_data_file)
# Reorder columns since read_json() reads them alphabetically
with open(self.schema_path, 'r') as f:
json_schema = json.loads(f.read())
col_list = [str(col['name']) for col in json_schema['fields']]
expected_temp_table_df = expected_temp_table_df[col_list]
# convert ingestTimestamp to datetime
expected_temp_table_df['ingestTimestamp'] = pd.to_datetime(
expected_temp_table_df['ingestTimestamp'])
# Compare results
pd.testing.assert_frame_equal(temp_table_results_df,
expected_temp_table_df)
# Query the final table to test that the merge_updates() function worked
final_table_query_config = bigquery.QueryJobConfig()
final_table_query_config.use_legacy_sql = False
final_table_query = self.bq_client.query(
query='SELECT * FROM `{0:s}.{1:s}.{2:s}`'.format(
project_id, self.dataset_id, self.user_info_final_id),
job_config=final_table_query_config,
location='US')
final_table_query.result()
final_table_results_df = final_table_query.to_dataframe() \
.sort_values(by=['userId']).reset_index(drop=True)
# Gather expected results for comparison
expected_final_data_file = os.path.join(
abs_path, 'test_data/nested_data/user_info_final_expected.json')
expected_final_table_df = pd.read_json(expected_final_data_file)
# Reorder columns since read_json() reads them alphabetically
with open(self.schema_path, 'r') as f:
json_schema = json.loads(f.read())
col_list = [str(col['name']) for col in json_schema['fields']]
expected_final_table_df = expected_final_table_df[col_list]
# convert ingestTimestamp to datetime
expected_final_table_df['ingestTimestamp'] = pd.to_datetime(
expected_final_table_df['ingestTimestamp'])
# Compare results
pd.testing.assert_frame_equal(final_table_results_df,
expected_final_table_df)
def teardown(self):
"""Deletes any resources used by tests.
"""
self.bq_client.delete_dataset(self.dataset_ref, delete_contents=True)
| apache-2.0 |
procoder317/scikit-learn | sklearn/preprocessing/__init__.py | 268 | 1319 | """
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from ._function_transformer import FunctionTransformer
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'FunctionTransformer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
]
| bsd-3-clause |
gdementen/PyTables | doc/sphinxext/inheritance_diagram.py | 98 | 13648 | """
Defines a docutils directive for inserting inheritance diagrams.
Provide the directive with one or more classes or modules (separated
by whitespace). For modules, all of the classes in that module will
be used.
Example::
Given the following classes:
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(B): pass
.. inheritance-diagram: D E
Produces a graph like the following:
A
/ \
B C
/ \ /
E D
The graph is inserted as a PNG+image map into HTML and a PDF in
LaTeX.
"""
import inspect
import os
import re
import subprocess
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils.nodes import Body, Element
from docutils.parsers.rst import directives
from sphinx.roles import xfileref_role
def my_import(name):
"""Module importer - taken from the python documentation.
This function allows importing names with dots in them."""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
class DotException(Exception):
pass
class InheritanceGraph(object):
"""
Given a list of classes, determines the set of classes that
they inherit from all the way to the root "object", and then
is able to generate a graphviz dot graph from them.
"""
def __init__(self, class_names, show_builtins=False):
"""
*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
self.classes = self._import_classes(class_names)
self.all_classes = self._all_classes(self.classes)
if len(self.all_classes) == 0:
raise ValueError("No classes found for inheritance diagram")
self.show_builtins = show_builtins
py_sig_re = re.compile(r'''^([\w.]*\.)? # class names
(\w+) \s* $ # optionally arguments
''', re.VERBOSE)
def _import_class_or_module(self, name):
"""
Import a class using its fully-qualified *name*.
"""
try:
path, base = self.py_sig_re.match(name).groups()
except:
raise ValueError(
"Invalid class or module '%s' specified for inheritance diagram" % name)
fullname = (path or '') + base
path = (path and path.rstrip('.'))
if not path:
path = base
try:
module = __import__(path, None, None, [])
# We must do an import of the fully qualified name. Otherwise if a
# subpackage 'a.b' is requested where 'import a' does NOT provide
# 'a.b' automatically, then 'a.b' will not be found below. This
# second call will force the equivalent of 'import a.b' to happen
# after the top-level import above.
my_import(fullname)
except ImportError:
raise ValueError(
"Could not import class or module '%s' specified for inheritance diagram" % name)
try:
todoc = module
for comp in fullname.split('.')[1:]:
todoc = getattr(todoc, comp)
except AttributeError:
raise ValueError(
"Could not find class or module '%s' specified for inheritance diagram" % name)
# If a class, just return it
if inspect.isclass(todoc):
return [todoc]
elif inspect.ismodule(todoc):
classes = []
for cls in todoc.__dict__.values():
if inspect.isclass(cls) and cls.__module__ == todoc.__name__:
classes.append(cls)
return classes
raise ValueError(
"'%s' does not resolve to a class or module" % name)
def _import_classes(self, class_names):
"""
Import a list of classes.
"""
classes = []
for name in class_names:
classes.extend(self._import_class_or_module(name))
return classes
def _all_classes(self, classes):
"""
Return a list of all classes that are ancestors of *classes*.
"""
all_classes = {}
def recurse(cls):
all_classes[cls] = None
for c in cls.__bases__:
if c not in all_classes:
recurse(c)
for cls in classes:
recurse(cls)
return all_classes.keys()
def class_name(self, cls, parts=0):
"""
Given a class object, return a fully-qualified name. This
works for things I've tested in matplotlib so far, but may not
be completely general.
"""
module = cls.__module__
if module == '__builtin__':
fullname = cls.__name__
else:
fullname = "%s.%s" % (module, cls.__name__)
if parts == 0:
return fullname
name_parts = fullname.split('.')
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
"""
Get all of the class names involved in the graph.
"""
return [self.class_name(x) for x in self.all_classes]
# These are the default options for graphviz
default_graph_options = {
"rankdir": "LR",
"size": '"8.0, 12.0"'
}
default_node_options = {
"shape": "box",
"fontsize": 10,
"height": 0.25,
"fontname": "Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans",
"style": '"setlinewidth(0.5)"'
}
default_edge_options = {
"arrowsize": 0.5,
"style": '"setlinewidth(0.5)"'
}
def _format_node_options(self, options):
return ','.join(["%s=%s" % x for x in options.items()])
def _format_graph_options(self, options):
return ''.join(["%s=%s;\n" % x for x in options.items()])
def generate_dot(self, fd, name, parts=0, urls={},
graph_options={}, node_options={},
edge_options={}):
"""
Generate a graphviz dot graph from the classes that
were passed in to __init__.
*fd* is a Python file-like object to write to.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
*graph_options*, *node_options*, *edge_options* are
dictionaries containing key/value pairs to pass on as graphviz
properties.
"""
g_options = self.default_graph_options.copy()
g_options.update(graph_options)
n_options = self.default_node_options.copy()
n_options.update(node_options)
e_options = self.default_edge_options.copy()
e_options.update(edge_options)
fd.write('digraph %s {\n' % name)
fd.write(self._format_graph_options(g_options))
for cls in self.all_classes:
if not self.show_builtins and cls in __builtins__.values():
continue
name = self.class_name(cls, parts)
# Write the node
this_node_options = n_options.copy()
url = urls.get(self.class_name(cls))
if url is not None:
this_node_options['URL'] = '"%s"' % url
fd.write(' "%s" [%s];\n' %
(name, self._format_node_options(this_node_options)))
# Write the edges
for base in cls.__bases__:
if not self.show_builtins and base in __builtins__.values():
continue
base_name = self.class_name(base, parts)
fd.write(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_options(e_options)))
fd.write('}\n')
def run_dot(self, args, name, parts=0, urls={},
graph_options={}, node_options={}, edge_options={}):
"""
Run graphviz 'dot' over this graph, returning whatever 'dot'
writes to stdout.
*args* will be passed along as commandline arguments.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
Raises DotException for any of the many os and
installation-related errors that may occur.
"""
try:
dot = subprocess.Popen(['dot'] + list(args),
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=True)
except OSError:
raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?")
except ValueError:
raise DotException("'dot' called with invalid arguments")
except:
raise DotException("Unexpected error calling 'dot'")
self.generate_dot(dot.stdin, name, parts, urls, graph_options,
node_options, edge_options)
dot.stdin.close()
result = dot.stdout.read()
returncode = dot.wait()
if returncode != 0:
raise DotException("'dot' returned the errorcode %d" % returncode)
return result
class inheritance_diagram(Body, Element):
"""
A docutils node to use as a placeholder for the inheritance
diagram.
"""
pass
def inheritance_diagram_directive(name, arguments, options, content, lineno,
content_offset, block_text, state,
state_machine):
"""
Run when the inheritance_diagram directive is first encountered.
"""
node = inheritance_diagram()
class_names = arguments
# Create a graph starting with the list of classes
graph = InheritanceGraph(class_names)
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = xfileref_role(
'class', ':class:`%s`' % name, name, 0, state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
# Store the original content for use as a hash
node['parts'] = options.get('parts', 0)
node['content'] = " ".join(class_names)
return [node]
def get_graph_hash(node):
return md5(node['content'] + str(node['parts'])).hexdigest()[-10:]
def html_output_graph(self, node):
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
path = '_images'
dest_path = os.path.join(setup.app.builder.outdir, path)
if not os.path.exists(dest_path):
os.makedirs(dest_path)
png_path = os.path.join(dest_path, name + ".png")
path = setup.app.builder.imgpath
# Create a mapping from fully-qualified class names to URLs.
urls = {}
for child in node:
if child.get('refuri') is not None:
urls[child['reftitle']] = child.get('refuri')
elif child.get('refid') is not None:
urls[child['reftitle']] = '#' + child.get('refid')
# These arguments to dot will save a PNG file to disk and write
# an HTML image map to stdout.
image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'],
name, parts, urls)
return ('<img src="%s/%s.png" usemap="#%s" class="inheritance"/>%s' %
(path, name, name, image_map))
def latex_output_graph(self, node):
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '_images'))
if not os.path.exists(dest_path):
os.makedirs(dest_path)
pdf_path = os.path.abspath(os.path.join(dest_path, name + ".pdf"))
graph.run_dot(['-Tpdf', '-o%s' % pdf_path],
name, parts, graph_options={'size': '"6.0,6.0"'})
return '\n\\includegraphics{%s}\n\n' % pdf_path
def visit_inheritance_diagram(inner_func):
"""
This is just a wrapper around html/latex_output_graph to make it
easier to handle errors and insert warnings.
"""
def visitor(self, node):
try:
content = inner_func(self, node)
except DotException, e:
# Insert the exception as a warning in the document
warning = self.document.reporter.warning(str(e), line=node.line)
warning.parent = node
node.children = [warning]
else:
source = self.document.attributes['source']
self.body.append(content)
node.children = []
return visitor
def do_nothing(self, node):
pass
def setup(app):
setup.app = app
setup.confdir = app.confdir
app.add_node(
inheritance_diagram,
latex=(visit_inheritance_diagram(latex_output_graph), do_nothing),
html=(visit_inheritance_diagram(html_output_graph), do_nothing))
app.add_directive(
'inheritance-diagram', inheritance_diagram_directive,
False, (1, 100, 0), parts = directives.nonnegative_int)
| bsd-3-clause |
poryfly/scikit-learn | examples/hetero_feature_union.py | 288 | 6236 | """
=============================================
Feature Union with Heterogeneous Data Sources
=============================================
Datasets can often contain components of that require different feature
extraction and processing pipelines. This scenario might occur when:
1. Your dataset consists of heterogeneous data types (e.g. raster images and
text captions)
2. Your dataset is stored in a Pandas DataFrame and different columns
require different processing pipelines.
This example demonstrates how to use
:class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing
different types of features. We use the 20-newsgroups dataset and compute
standard bag-of-words features for the subject line and body in separate
pipelines as well as ad hoc features on the body. We combine them (with
weights) using a FeatureUnion and finally train a classifier on the combined
set of features.
The choice of features is not particularly helpful, but serves to illustrate
the technique.
"""
# Author: Matt Terry <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to sklearn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, posts):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in posts]
class SubjectBodyExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('subject', object), ('body', object)])
for i, text in enumerate(posts):
headers, _, bod = text.partition('\n\n')
bod = strip_newsgroup_footer(bod)
bod = strip_newsgroup_quoting(bod)
features['body'][i] = bod
prefix = 'Subject:'
sub = ''
for line in headers.split('\n'):
if line.startswith(prefix):
sub = line[len(prefix):]
break
features['subject'][i] = sub
return features
pipeline = Pipeline([
# Extract the subject & body
('subjectbody', SubjectBodyExtractor()),
# Use FeatureUnion to combine the features from subject and body
('union', FeatureUnion(
transformer_list=[
# Pipeline for pulling features from the post's subject line
('subject', Pipeline([
('selector', ItemSelector(key='subject')),
('tfidf', TfidfVectorizer(min_df=50)),
])),
# Pipeline for standard bag-of-words model for body
('body_bow', Pipeline([
('selector', ItemSelector(key='body')),
('tfidf', TfidfVectorizer()),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for pulling ad hoc features from post's body
('body_stats', Pipeline([
('selector', ItemSelector(key='body')),
('stats', TextStats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
],
# weight components in FeatureUnion
transformer_weights={
'subject': 0.8,
'body_bow': 0.5,
'body_stats': 1.0,
},
)),
# Use a SVC classifier on the combined features
('svc', SVC(kernel='linear')),
])
# limit the list of categories to make running this exmaple faster.
categories = ['alt.atheism', 'talk.religion.misc']
train = fetch_20newsgroups(random_state=1,
subset='train',
categories=categories,
)
test = fetch_20newsgroups(random_state=1,
subset='test',
categories=categories,
)
pipeline.fit(train.data, train.target)
y = pipeline.predict(test.data)
print(classification_report(y, test.target))
| bsd-3-clause |
mayblue9/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 234 | 9928 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
yield
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
dhruv13J/scikit-learn | examples/neighbors/plot_species_kde.py | 282 | 4059 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
duyet-website/api.duyet.net | lib/numpy/lib/twodim_base.py | 34 | 25580 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
absolute, asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo, transpose
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2-D histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(2, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
>>> H = H.T # Let each row list bins with common y range.
:func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131, title='imshow: square bins')
>>> plt.imshow(H, interpolation='nearest', origin='low',
... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
>>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
... aspect='equal')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
:class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
display actual bin edges with interpolation:
>>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = (xedges[:-1] + xedges[1:]) / 2
>>> ycenters = (yedges[:-1] + yedges[1:]) / 2
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| mit |
draperjames/bokeh | tests/compat/polycollection.py | 13 | 1311 | from matplotlib.collections import PolyCollection
import matplotlib.pyplot as plt
import numpy as np
from bokeh import mpl
from bokeh.plotting import output_file, show
# Generate data. In this case, we'll make a bunch of center-points and generate
# verticies by subtracting random offsets from those center-points
numpoly, numverts = 100, 4
centers = 100 * (np.random.random((numpoly, 2)) - 0.5)
offsets = 10 * (np.random.random((numverts, numpoly, 2)) - 0.5)
verts = centers + offsets
verts = np.swapaxes(verts, 0, 1)
# In your case, "verts" might be something like:
# verts = zip(zip(lon1, lat1), zip(lon2, lat2), ...)
# If "data" in your case is a numpy array, there are cleaner ways to reorder
# things to suit.
facecolors = ['red', 'green', 'blue', 'cyan', 'yellow', 'magenta', 'black']
edgecolors = ['cyan', 'yellow', 'magenta', 'black', 'red', 'green', 'blue']
widths = [5, 10, 20, 10, 5]
# Make the collection and add it to the plot.
col = PolyCollection(verts, facecolor=facecolors, edgecolor=edgecolors,
linewidth=widths, linestyle='--', alpha=0.5)
ax = plt.axes()
ax.add_collection(col)
plt.xlim([-60, 60])
plt.ylim([-60, 60])
plt.title("MPL-PolyCollection support in Bokeh")
output_file("polycollection.html", title="polycollection.py example")
show(mpl.to_bokeh())
| bsd-3-clause |
cwhanse/pvlib-python | pvlib/tests/test_soiling.py | 2 | 8092 | """Test losses"""
import datetime
import numpy as np
import pandas as pd
from .conftest import assert_series_equal
from pvlib.soiling import hsu, kimber
from pvlib.iotools import read_tmy3
from .conftest import DATA_DIR
import pytest
@pytest.fixture
def expected_output():
# Sample output (calculated manually)
dt = pd.date_range(start=pd.Timestamp(2019, 1, 1, 0, 0, 0),
end=pd.Timestamp(2019, 1, 1, 23, 59, 0), freq='1h')
expected_no_cleaning = pd.Series(
data=[0.96998483, 0.94623958, 0.92468139, 0.90465654, 0.88589707,
0.86826366, 0.85167258, 0.83606715, 0.82140458, 0.80764919,
0.79476875, 0.78273241, 0.77150951, 0.76106905, 0.75137932,
0.74240789, 0.73412165, 0.72648695, 0.71946981, 0.7130361,
0.70715176, 0.70178307, 0.69689677, 0.69246034],
index=dt)
return expected_no_cleaning
@pytest.fixture
def expected_output_1():
dt = pd.date_range(start=pd.Timestamp(2019, 1, 1, 0, 0, 0),
end=pd.Timestamp(2019, 1, 1, 23, 59, 0), freq='1h')
expected_output_1 = pd.Series(
data=[0.98484972, 0.97277367, 0.96167471, 0.95119603, 1.,
0.98484972, 0.97277367, 0.96167471, 1., 1.,
0.98484972, 0.97277367, 0.96167471, 0.95119603, 0.94118234,
0.93154854, 0.922242, 0.91322759, 0.90448058, 0.89598283,
0.88772062, 0.87968325, 0.8718622, 0.86425049],
index=dt)
return expected_output_1
@pytest.fixture
def expected_output_2():
dt = pd.date_range(start=pd.Timestamp(2019, 1, 1, 0, 0, 0),
end=pd.Timestamp(2019, 1, 1, 23, 59, 0), freq='1h')
expected_output_2 = pd.Series(
data=[0.95036261, 0.91178179, 0.87774818, 0.84732079, 1.,
1., 1., 0.95036261, 1., 1.,
1., 1., 0.95036261, 0.91178179, 0.87774818,
0.84732079, 0.8201171, 1., 1., 1.,
1., 0.95036261, 0.91178179, 0.87774818],
index=dt)
return expected_output_2
@pytest.fixture
def expected_output_3():
dt = pd.date_range(start=pd.Timestamp(2019, 1, 1, 0, 0, 0),
end=pd.Timestamp(2019, 1, 1, 23, 59, 0), freq='1h')
timedelta = [0, 0, 0, 0, 0, 30, 0, 30, 0, 30, 0, -30,
-30, -30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
dt_new = dt + pd.to_timedelta(timedelta, 'm')
expected_output_3 = pd.Series(
data=[0.96576705, 0.9387675, 0.91437615, 0.89186852, 1.,
1., 0.98093819, 0.9387675, 1., 1.,
1., 1., 0.96576705, 0.9387675, 0.90291005,
0.88122293, 0.86104089, 1., 1., 1.,
0.96576705, 0.9387675, 0.91437615, 0.89186852],
index=dt_new)
return expected_output_3
@pytest.fixture
def rainfall_input():
dt = pd.date_range(start=pd.Timestamp(2019, 1, 1, 0, 0, 0),
end=pd.Timestamp(2019, 1, 1, 23, 59, 0), freq='1h')
rainfall = pd.Series(
data=[0., 0., 0., 0., 1., 0., 0., 0., 0.5, 0.5, 0., 0., 0., 0., 0.,
0., 0.3, 0.3, 0.3, 0.3, 0., 0., 0., 0.], index=dt)
return rainfall
def test_hsu_no_cleaning(rainfall_input, expected_output):
"""Test Soiling HSU function"""
rainfall = rainfall_input
pm2_5 = 1.0
pm10 = 2.0
depo_veloc = {'2_5': 1.0e-5, '10': 1.0e-4}
tilt = 0.
expected_no_cleaning = expected_output
result = hsu(rainfall=rainfall, cleaning_threshold=10., tilt=tilt,
pm2_5=pm2_5, pm10=pm10, depo_veloc=depo_veloc,
rain_accum_period=pd.Timedelta('1h'))
assert_series_equal(result, expected_no_cleaning)
def test_hsu(rainfall_input, expected_output_2):
"""Test Soiling HSU function with cleanings"""
rainfall = rainfall_input
pm2_5 = 1.0
pm10 = 2.0
depo_veloc = {'2_5': 1.0e-4, '10': 1.0e-4}
tilt = 0.
# three cleaning events at 4:00-6:00, 8:00-11:00, and 17:00-20:00
result = hsu(rainfall=rainfall, cleaning_threshold=0.5, tilt=tilt,
pm2_5=pm2_5, pm10=pm10, depo_veloc=depo_veloc,
rain_accum_period=pd.Timedelta('3h'))
assert_series_equal(result, expected_output_2)
def test_hsu_defaults(rainfall_input, expected_output_1):
"""
Test Soiling HSU function with default deposition velocity and default rain
accumulation period.
"""
result = hsu(rainfall=rainfall_input, cleaning_threshold=0.5, tilt=0.0,
pm2_5=1.0e-2, pm10=2.0e-2)
assert np.allclose(result.values, expected_output_1)
def test_hsu_variable_time_intervals(rainfall_input, expected_output_3):
"""
Test Soiling HSU function with variable time intervals.
"""
depo_veloc = {'2_5': 1.0e-4, '10': 1.0e-4}
rain = pd.DataFrame(data=rainfall_input)
# define time deltas in minutes
timedelta = [0, 0, 0, 0, 0, 30, 0, 30, 0, 30, 0, -30,
-30, -30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
rain['mins_added'] = pd.to_timedelta(timedelta, 'm')
rain['new_time'] = rain.index + rain['mins_added']
rain_var_times = rain.set_index('new_time').iloc[:, 0]
result = hsu(
rainfall=rain_var_times, cleaning_threshold=0.5, tilt=50.0,
pm2_5=1, pm10=2, depo_veloc=depo_veloc,
rain_accum_period=pd.Timedelta('2h'))
assert np.allclose(result, expected_output_3)
@pytest.fixture
def greensboro_rain():
# get TMY3 data with rain
greensboro, _ = read_tmy3(DATA_DIR / '723170TYA.CSV', coerce_year=1990)
return greensboro.Lprecipdepth
@pytest.fixture
def expected_kimber_nowash():
return pd.read_csv(
DATA_DIR / 'greensboro_kimber_soil_nowash.dat',
parse_dates=True, index_col='timestamp')
def test_kimber_nowash(greensboro_rain, expected_kimber_nowash):
"""Test Kimber soiling model with no manual washes"""
# Greensboro typical expected annual rainfall is 8345mm
assert greensboro_rain.sum() == 8345
# calculate soiling with no wash dates
nowash = kimber(greensboro_rain)
# test no washes
assert np.allclose(nowash.values, expected_kimber_nowash['soiling'].values)
@pytest.fixture
def expected_kimber_manwash():
return pd.read_csv(
DATA_DIR / 'greensboro_kimber_soil_manwash.dat',
parse_dates=True, index_col='timestamp')
def test_kimber_manwash(greensboro_rain, expected_kimber_manwash):
"""Test Kimber soiling model with a manual wash"""
# a manual wash date
manwash = [datetime.date(1990, 2, 15), ]
# calculate soiling with manual wash
manwash = kimber(greensboro_rain, manual_wash_dates=manwash)
# test manual wash
assert np.allclose(
manwash.values,
expected_kimber_manwash['soiling'].values)
@pytest.fixture
def expected_kimber_norain():
# expected soiling reaches maximum
soiling_loss_rate = 0.0015
max_loss_rate = 0.3
norain = np.ones(8760) * soiling_loss_rate/24
norain[0] = 0.0
norain = np.cumsum(norain)
return np.where(norain > max_loss_rate, max_loss_rate, norain)
def test_kimber_norain(greensboro_rain, expected_kimber_norain):
"""Test Kimber soiling model with no rain"""
# a year with no rain
norain = pd.Series(0, index=greensboro_rain.index)
# calculate soiling with no rain
norain = kimber(norain)
# test no rain, soiling reaches maximum
assert np.allclose(norain.values, expected_kimber_norain)
@pytest.fixture
def expected_kimber_initial_soil():
# expected soiling reaches maximum
soiling_loss_rate = 0.0015
max_loss_rate = 0.3
norain = np.ones(8760) * soiling_loss_rate/24
norain[0] = 0.1
norain = np.cumsum(norain)
return np.where(norain > max_loss_rate, max_loss_rate, norain)
def test_kimber_initial_soil(greensboro_rain, expected_kimber_initial_soil):
"""Test Kimber soiling model with initial soiling"""
# a year with no rain
norain = pd.Series(0, index=greensboro_rain.index)
# calculate soiling with no rain
norain = kimber(norain, initial_soiling=0.1)
# test no rain, soiling reaches maximum
assert np.allclose(norain.values, expected_kimber_initial_soil)
| bsd-3-clause |
etkirsch/scikit-learn | sklearn/decomposition/truncated_svd.py | 199 | 7744 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# Michael Becker <[email protected]>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional
Number of iterations for randomized SVD solver. Not used by ARPACK.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.27930...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
grundgruen/zipline | tests/finance/test_slippage.py | 32 | 18400 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for finance.slippage
"""
import datetime
import pytz
from unittest import TestCase
from nose_parameterized import parameterized
import pandas as pd
from zipline.finance.slippage import VolumeShareSlippage
from zipline.protocol import Event, DATASOURCE_TYPE
from zipline.finance.blotter import Order
class SlippageTestCase(TestCase):
def test_volume_share_slippage(self):
event = Event(
{'volume': 200,
'type': 4,
'price': 3.0,
'datetime': datetime.datetime(
2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.0,
'dt':
datetime.datetime(2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'open': 3.0}
)
slippage_model = VolumeShareSlippage()
open_orders = [
Order(dt=datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
amount=100,
filled=0,
sid=133)
]
orders_txns = list(slippage_model.simulate(
event,
open_orders
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.01875),
'dt': datetime.datetime(
2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'amount': int(50),
'sid': int(133),
'commission': None,
'type': DATASOURCE_TYPE.TRANSACTION,
'order_id': open_orders[0].id
}
self.assertIsNotNone(txn)
# TODO: Make expected_txn an Transaction object and ensure there
# is a __eq__ for that class.
self.assertEquals(expected_txn, txn.__dict__)
def test_orders_limit(self):
events = self.gen_trades()
slippage_model = VolumeShareSlippage()
# long, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# long, does not trade - impacted price worse than limit price
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# long, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'limit': 3.6})
]
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 1)
txn = orders_txns[0][1]
expected_txn = {
'price': float(3.500875),
'dt': datetime.datetime(
2006, 1, 5, 14, 34, tzinfo=pytz.utc),
'amount': int(100),
'sid': int(133),
'order_id': open_orders[0].id
}
self.assertIsNotNone(txn)
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
# short, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[0],
open_orders
))
expected_txn = {}
self.assertEquals(len(orders_txns), 0)
# short, does not trade - impacted price worse than limit price
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[1],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# short, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'limit': 3.4})
]
orders_txns = list(slippage_model.simulate(
events[1],
open_orders
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.499125),
'dt': datetime.datetime(
2006, 1, 5, 14, 32, tzinfo=pytz.utc),
'amount': int(-100),
'sid': int(133)
}
self.assertIsNotNone(txn)
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
STOP_ORDER_CASES = {
# Stop orders can be long/short and have their price greater or
# less than the stop.
#
# A stop being reached is conditional on the order direction.
# Long orders reach the stop when the price is greater than the stop.
# Short orders reach the stop when the price is less than the stop.
#
# Which leads to the following 4 cases:
#
# | long | short |
# | price > stop | | |
# | price < stop | | |
#
# Currently the slippage module acts according to the following table,
# where 'X' represents triggering a transaction
# | long | short |
# | price > stop | | X |
# | price < stop | X | |
#
# However, the following behavior *should* be followed.
#
# | long | short |
# | price > stop | X | |
# | price < stop | | X |
'long | price gt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 3.5
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 4.0,
'high': 3.15,
'low': 2.85,
'sid': 133,
'close': 4.0,
'open': 3.5
},
'expected': {
'transaction': {
'price': 4.001,
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'amount': 100,
'sid': 133,
}
}
},
'long | price lt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 3.6
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 3.5,
'high': 3.15,
'low': 2.85,
'sid': 133,
'close': 3.5,
'open': 4.0
},
'expected': {
'transaction': None
}
},
'short | price gt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.4
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 3.5,
'high': 3.15,
'low': 2.85,
'sid': 133,
'close': 3.5,
'open': 3.0
},
'expected': {
'transaction': None
}
},
'short | price lt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.5
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 3.0,
'high': 3.15,
'low': 2.85,
'sid': 133,
'close': 3.0,
'open': 3.0
},
'expected': {
'transaction': {
'price': 2.99925,
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'amount': -100,
'sid': 133,
}
}
},
}
@parameterized.expand([
(name, case['order'], case['event'], case['expected'])
for name, case in STOP_ORDER_CASES.items()
])
def test_orders_stop(self, name, order_data, event_data, expected):
order = Order(**order_data)
event = Event(initial_values=event_data)
slippage_model = VolumeShareSlippage()
try:
_, txn = next(slippage_model.simulate(event, [order]))
except StopIteration:
txn = None
if expected['transaction'] is None:
self.assertIsNone(txn)
else:
self.assertIsNotNone(txn)
for key, value in expected['transaction'].items():
self.assertEquals(value, txn[key])
def test_orders_stop_limit(self):
events = self.gen_trades()
slippage_model = VolumeShareSlippage()
# long, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 4.0,
'limit': 3.0})
]
orders_txns = list(slippage_model.simulate(
events[2],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# long, does not trade - impacted price worse than limit price
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 4.0,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[2],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# long, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 4.0,
'limit': 3.6})
]
orders_txns = list(slippage_model.simulate(
events[2],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.500875),
'dt': datetime.datetime(
2006, 1, 5, 14, 34, tzinfo=pytz.utc),
'amount': int(100),
'sid': int(133)
}
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
# short, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.0,
'limit': 4.0})
]
orders_txns = list(slippage_model.simulate(
events[0],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[1],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# short, does not trade - impacted price worse than limit price
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.0,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[0],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[1],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# short, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.0,
'limit': 3.4})
]
orders_txns = list(slippage_model.simulate(
events[0],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[1],
open_orders
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.499125),
'dt': datetime.datetime(
2006, 1, 5, 14, 32, tzinfo=pytz.utc),
'amount': int(-100),
'sid': int(133)
}
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
def gen_trades(self):
# create a sequence of trades
events = [
Event({
'volume': 2000,
'type': 4,
'price': 3.0,
'datetime': datetime.datetime(
2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.0,
'dt':
datetime.datetime(2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'open': 3.0
}),
Event({
'volume': 2000,
'type': 4,
'price': 3.5,
'datetime': datetime.datetime(
2006, 1, 5, 14, 32, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.5,
'dt':
datetime.datetime(2006, 1, 5, 14, 32, tzinfo=pytz.utc),
'open': 3.0
}),
Event({
'volume': 2000,
'type': 4,
'price': 4.0,
'datetime': datetime.datetime(
2006, 1, 5, 14, 33, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 4.0,
'dt':
datetime.datetime(2006, 1, 5, 14, 33, tzinfo=pytz.utc),
'open': 3.5
}),
Event({
'volume': 2000,
'type': 4,
'price': 3.5,
'datetime': datetime.datetime(
2006, 1, 5, 14, 34, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.5,
'dt':
datetime.datetime(2006, 1, 5, 14, 34, tzinfo=pytz.utc),
'open': 4.0
}),
Event({
'volume': 2000,
'type': 4,
'price': 3.0,
'datetime': datetime.datetime(
2006, 1, 5, 14, 35, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.0,
'dt':
datetime.datetime(2006, 1, 5, 14, 35, tzinfo=pytz.utc),
'open': 3.5
})
]
return events
| apache-2.0 |
ssanderson/numpy | numpy/linalg/linalg.py | 1 | 78991 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast, atleast_2d, intp, asanyarray, isscalar, object_, ones
)
from numpy.core.multiarray import normalize_axis_index
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if a.ndim != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % a.ndim)
def _assertRankAtLeast2(*arrays):
for a in arrays:
if a.ndim < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % a.ndim)
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _isEmpty2d(arr):
# check size first for efficiency
return arr.size == 0 and product(arr.shape[-2:]) == 0
def _assertNoEmpty2d(*arrays):
for a in arrays:
if _isEmpty2d(a):
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=b.ndim)``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorinv, numpy.einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
if _isEmpty2d(a):
return empty(a.shape[-1:], dtype=result_t)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _syevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[ 5.+2.j, 9.-2.j],
[ 0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()
>>> # with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[ 5.+0.j, 0.-2.j],
[ 0.+2.j, 2.+0.j]])
>>> wa = LA.eigvalsh(a)
>>> wb = LA.eigvals(b)
>>> wa; wb
array([ 1., 6.])
array([ 6.+0.j, 1.+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if _isEmpty2d(a):
return empty(a.shape[-1:], dtype=result_t)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
(conjugate symmetric) array.
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
if _isEmpty2d(a):
w = empty(a.shape[-1:], dtype=result_t)
vt = empty(a.shape, dtype=result_t)
return w, wrap(vt)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[ 5.+2.j, 9.-2.j],
[ 0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[ 5.+0.j, 0.-2.j],
[ 0.+2.j, 2.+0.j]])
>>> wa, va = LA.eigh(a)
>>> wb, vb = LA.eig(b)
>>> wa; wb
array([ 1., 6.])
array([ 6.+0.j, 1.+0.j])
>>> va; vb
array([[-0.44721360-0.j , -0.89442719+0.j ],
[ 0.00000000+0.89442719j, 0.00000000-0.4472136j ]])
array([[ 0.89442719+0.j , 0.00000000-0.4472136j],
[ 0.00000000-0.4472136j, 0.89442719+0.j ]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if _isEmpty2d(a):
w = empty(a.shape[-1:], dtype=result_t)
vt = empty(a.shape, dtype=result_t)
return w, wrap(vt)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[..., 0]/s[..., -1]
else:
return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
if _isEmpty2d(a):
res = empty(a.shape[:-2] + (a.shape[-1], a.shape[-2]), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
if _isEmpty2d(a):
# determinant of empty matrix is 1
sign = ones(a.shape[:-2], dtype=result_t)
logdet = zeros(a.shape[:-2], dtype=real_t)
return sign, logdet
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
if isscalar(sign):
sign = sign.astype(result_t)
else:
sign = sign.astype(result_t, copy=False)
if isscalar(logdet):
logdet = logdet.astype(real_t)
else:
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
# 0x0 matrices have determinant 1
if _isEmpty2d(a):
return ones(a.shape[:-2], dtype=result_t)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
if isscalar(r):
r = r.astype(result_t)
else:
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
For the purposes of rank determination, singular values are treated
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print(m, c)
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = b.ndim == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
_assertNoEmpty2d(a, b) # TODO: relax this constraint
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
# This line:
# * is incorrect, according to the LAPACK documentation
# * raises a ValueError if min(m,n) == 0
# * should not be calculated here anyway, as LAPACK should calculate
# `liwork` for us. But that only works if our version of lapack does
# not have this bug:
# http://icl.cs.utk.edu/lapack-forum/archives/lapack/msg00899.html
# Lapack_lite does have that bug...
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, (inexact, object_)):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).astype(float).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
row_axis = normalize_axis_index(row_axis, nd)
col_axis = normalize_axis_index(col_axis, nd)
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Example: multiplication costs of different parenthesizations
------------------------------------------------------------
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
a0, a1b0 = A.shape
b1c0, c1 = C.shape
# cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1
cost1 = a0 * b1c0 * (a1b0 + c1)
# cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1
cost2 = a1b0 * c1 * (a0 + b1c0)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
| bsd-3-clause |
dracos/QGIS | python/plugins/processing/algs/qgis/QGISAlgorithmProvider.py | 5 | 9868 | # -*- coding: utf-8 -*-
"""
***************************************************************************
QGISAlgorithmProvider.py
---------------------
Date : December 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'December 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
try:
import matplotlib.pyplot
hasMatplotlib = True
except:
hasMatplotlib = False
from PyQt4.QtGui import QIcon
from processing.core.AlgorithmProvider import AlgorithmProvider
from processing.script.ScriptUtils import ScriptUtils
from RegularPoints import RegularPoints
from SymmetricalDifference import SymmetricalDifference
from VectorSplit import VectorSplit
from VectorGrid import VectorGrid
from RandomExtract import RandomExtract
from RandomExtractWithinSubsets import RandomExtractWithinSubsets
from ExtractByLocation import ExtractByLocation
from PointsInPolygon import PointsInPolygon
from PointsInPolygonUnique import PointsInPolygonUnique
from PointsInPolygonWeighted import PointsInPolygonWeighted
from SumLines import SumLines
from BasicStatisticsNumbers import BasicStatisticsNumbers
from BasicStatisticsStrings import BasicStatisticsStrings
from NearestNeighbourAnalysis import NearestNeighbourAnalysis
from LinesIntersection import LinesIntersection
from MeanCoords import MeanCoords
from PointDistance import PointDistance
from UniqueValues import UniqueValues
from ReprojectLayer import ReprojectLayer
from ExportGeometryInfo import ExportGeometryInfo
from Centroids import Centroids
from Delaunay import Delaunay
from VoronoiPolygons import VoronoiPolygons
from DensifyGeometries import DensifyGeometries
from MultipartToSingleparts import MultipartToSingleparts
from SimplifyGeometries import SimplifyGeometries
from LinesToPolygons import LinesToPolygons
from PolygonsToLines import PolygonsToLines
from SinglePartsToMultiparts import SinglePartsToMultiparts
from ExtractNodes import ExtractNodes
from ConvexHull import ConvexHull
from FixedDistanceBuffer import FixedDistanceBuffer
from VariableDistanceBuffer import VariableDistanceBuffer
from Clip import Clip
from Difference import Difference
from Dissolve import Dissolve
from Intersection import Intersection
from ExtentFromLayer import ExtentFromLayer
from RandomSelection import RandomSelection
from RandomSelectionWithinSubsets import RandomSelectionWithinSubsets
from SelectByLocation import SelectByLocation
from Union import Union
from DensifyGeometriesInterval import DensifyGeometriesInterval
from Eliminate import Eliminate
from SpatialJoin import SpatialJoin
from DeleteColumn import DeleteColumn
from DeleteHoles import DeleteHoles
from DeleteDuplicateGeometries import DeleteDuplicateGeometries
from TextToFloat import TextToFloat
from ExtractByAttribute import ExtractByAttribute
from SelectByAttribute import SelectByAttribute
from Grid import Grid
from Gridify import Gridify
from HubDistance import HubDistance
from HubLines import HubLines
from Merge import Merge
from GeometryConvert import GeometryConvert
from ConcaveHull import ConcaveHull
from Polygonize import Polygonize
from RasterLayerStatistics import RasterLayerStatistics
from StatisticsByCategories import StatisticsByCategories
from EquivalentNumField import EquivalentNumField
from AddTableField import AddTableField
from FieldsCalculator import FieldsCalculator
from SaveSelectedFeatures import SaveSelectedFeatures
from Explode import Explode
from AutoincrementalField import AutoincrementalField
from FieldPyculator import FieldsPyculator
from JoinAttributes import JoinAttributes
from CreateConstantRaster import CreateConstantRaster
from PointsLayerFromTable import PointsLayerFromTable
from PointsDisplacement import PointsDisplacement
from ZonalStatistics import ZonalStatistics
from PointsFromPolygons import PointsFromPolygons
from PointsFromLines import PointsFromLines
from RandomPointsExtent import RandomPointsExtent
from RandomPointsLayer import RandomPointsLayer
from RandomPointsPolygonsFixed import RandomPointsPolygonsFixed
from RandomPointsPolygonsVariable import RandomPointsPolygonsVariable
from RandomPointsAlongLines import RandomPointsAlongLines
from PointsToPaths import PointsToPaths
from PostGISExecuteSQL import PostGISExecuteSQL
from ImportIntoPostGIS import ImportIntoPostGIS
from SetVectorStyle import SetVectorStyle
from SetRasterStyle import SetRasterStyle
from SelectByExpression import SelectByExpression
from SelectByAttributeSum import SelectByAttributeSum
from HypsometricCurves import HypsometricCurves
from SplitLinesWithLines import SplitLinesWithLines
from FieldsMapper import FieldsMapper
from Datasources2Vrt import Datasources2Vrt
from CheckValidity import CheckValidity
pluginPath = os.path.normpath(os.path.join(
os.path.split(os.path.dirname(__file__))[0], os.pardir))
class QGISAlgorithmProvider(AlgorithmProvider):
_icon = QIcon(os.path.join(pluginPath, 'images', 'qgis.png'))
def __init__(self):
AlgorithmProvider.__init__(self)
self.alglist = [SumLines(), PointsInPolygon(),
PointsInPolygonWeighted(), PointsInPolygonUnique(),
BasicStatisticsStrings(), BasicStatisticsNumbers(),
NearestNeighbourAnalysis(), MeanCoords(),
LinesIntersection(), UniqueValues(), PointDistance(),
ReprojectLayer(), ExportGeometryInfo(), Centroids(),
Delaunay(), VoronoiPolygons(), SimplifyGeometries(),
DensifyGeometries(), DensifyGeometriesInterval(),
MultipartToSingleparts(), SinglePartsToMultiparts(),
PolygonsToLines(), LinesToPolygons(), ExtractNodes(),
Eliminate(), ConvexHull(), FixedDistanceBuffer(),
VariableDistanceBuffer(), Dissolve(), Difference(),
Intersection(), Union(), Clip(), ExtentFromLayer(),
RandomSelection(), RandomSelectionWithinSubsets(),
SelectByLocation(), RandomExtract(), DeleteHoles(),
RandomExtractWithinSubsets(), ExtractByLocation(),
SpatialJoin(), RegularPoints(), SymmetricalDifference(),
VectorSplit(), VectorGrid(), DeleteColumn(),
DeleteDuplicateGeometries(), TextToFloat(),
ExtractByAttribute(), SelectByAttribute(), Grid(),
Gridify(), HubDistance(), HubLines(), Merge(),
GeometryConvert(), AddTableField(), FieldsCalculator(),
SaveSelectedFeatures(), JoinAttributes(),
AutoincrementalField(), Explode(), FieldsPyculator(),
EquivalentNumField(), PointsLayerFromTable(),
StatisticsByCategories(), ConcaveHull(), Polygonize(),
RasterLayerStatistics(), PointsDisplacement(),
ZonalStatistics(), PointsFromPolygons(),
PointsFromLines(), RandomPointsExtent(),
RandomPointsLayer(), RandomPointsPolygonsFixed(),
RandomPointsPolygonsVariable(),
RandomPointsAlongLines(), PointsToPaths(),
PostGISExecuteSQL(), ImportIntoPostGIS(),
SetVectorStyle(), SetRasterStyle(),
SelectByExpression(), HypsometricCurves(),
SplitLinesWithLines(), CreateConstantRaster(),
FieldsMapper(),SelectByAttributeSum(), Datasources2Vrt(),
CheckValidity()
]
if hasMatplotlib:
from VectorLayerHistogram import VectorLayerHistogram
from RasterLayerHistogram import RasterLayerHistogram
from VectorLayerScatterplot import VectorLayerScatterplot
from MeanAndStdDevPlot import MeanAndStdDevPlot
from BarPlot import BarPlot
from PolarPlot import PolarPlot
self.alglist.extend([
VectorLayerHistogram(), RasterLayerHistogram(),
VectorLayerScatterplot(), MeanAndStdDevPlot(), BarPlot(),
PolarPlot(),
])
folder = os.path.join(os.path.dirname(__file__), 'scripts')
scripts = ScriptUtils.loadFromFolder(folder)
for script in scripts:
script.allowEdit = False
self.alglist.extend(scripts)
for alg in self.alglist:
alg._icon = self._icon
def initializeSettings(self):
AlgorithmProvider.initializeSettings(self)
def unload(self):
AlgorithmProvider.unload(self)
def getName(self):
return 'qgis'
def getDescription(self):
return self.tr('QGIS geoalgorithms')
def getIcon(self):
return self._icon
def _loadAlgorithms(self):
self.algs = self.alglist
def supportsNonFileBasedOutput(self):
return True
| gpl-2.0 |
OshynSong/scikit-learn | sklearn/feature_extraction/hashing.py | 183 | 6155 | # Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
jangorecki/h2o-3 | h2o-py/tests/pyunit_utils/utilsPY.py | 1 | 149810 | from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import range
from past.builtins import basestring
import sys, os
try: # works with python 2.7 not 3
from StringIO import StringIO
except: # works with python 3
from io import StringIO
sys.path.insert(1, "../../")
import h2o
import imp
import random
import re
import subprocess
from subprocess import STDOUT,PIPE
from h2o.utils.shared_utils import temp_ctr
from h2o.model.binomial import H2OBinomialModel
from h2o.model.clustering import H2OClusteringModel
from h2o.model.multinomial import H2OMultinomialModel
from h2o.model.regression import H2ORegressionModel
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.estimators.deeplearning import H2ODeepLearningEstimator
from h2o.estimators.random_forest import H2ORandomForestEstimator
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.estimators.kmeans import H2OKMeansEstimator
from h2o.transforms.decomposition import H2OPCA
from h2o.estimators.naive_bayes import H2ONaiveBayesEstimator
from decimal import *
import urllib.request, urllib.error, urllib.parse
import numpy as np
import shutil
import string
import copy
import json
import math
from random import shuffle
def check_models(model1, model2, use_cross_validation=False, op='e'):
"""
Check that the given models are equivalent.
:param model1:
:param model2:
:param use_cross_validation: boolean. if True, use validation metrics to determine model equality. Otherwise, use
training metrics.
:param op: comparison operator to use. 'e':==, 'g':>, 'ge':>=
:return: None. Throw meaningful error messages if the check fails
"""
# 1. Check model types
model1_type = model1.__class__.__name__
model2_type = model1.__class__.__name__
assert model1_type is model2_type, "The model types differ. The first model is of type {0} and the second " \
"models is of type {1}.".format(model1_type, model2_type)
# 2. Check model metrics
if isinstance(model1,H2OBinomialModel): # 2a. Binomial
# F1
f1_1 = model1.F1(xval=use_cross_validation)
f1_2 = model2.F1(xval=use_cross_validation)
if op == 'e': assert f1_1[0][1] == f1_2[0][1], "The first model has an F1 of {0} and the second model has an F1 of " \
"{1}. Expected the first to be == to the second.".format(f1_1[0][1], f1_2[0][1])
elif op == 'g': assert f1_1[0][1] > f1_2[0][1], "The first model has an F1 of {0} and the second model has an F1 of " \
"{1}. Expected the first to be > than the second.".format(f1_1[0][1], f1_2[0][1])
elif op == 'ge': assert f1_1[0][1] >= f1_2[0][1], "The first model has an F1 of {0} and the second model has an F1 of " \
"{1}. Expected the first to be >= than the second.".format(f1_1[0][1], f1_2[0][1])
elif isinstance(model1,H2ORegressionModel): # 2b. Regression
# MSE
mse1 = model1.mse(xval=use_cross_validation)
mse2 = model2.mse(xval=use_cross_validation)
if op == 'e': assert mse1 == mse2, "The first model has an MSE of {0} and the second model has an MSE of " \
"{1}. Expected the first to be == to the second.".format(mse1, mse2)
elif op == 'g': assert mse1 > mse2, "The first model has an MSE of {0} and the second model has an MSE of " \
"{1}. Expected the first to be > than the second.".format(mse1, mse2)
elif op == 'ge': assert mse1 >= mse2, "The first model has an MSE of {0} and the second model has an MSE of " \
"{1}. Expected the first to be >= than the second.".format(mse1, mse2)
elif isinstance(model1,H2OMultinomialModel): # 2c. Multinomial
# hit-ratio
pass
elif isinstance(model1,H2OClusteringModel): # 2d. Clustering
# totss
totss1 = model1.totss(xval=use_cross_validation)
totss2 = model2.totss(xval=use_cross_validation)
if op == 'e': assert totss1 == totss2, "The first model has an TOTSS of {0} and the second model has an " \
"TOTSS of {1}. Expected the first to be == to the second.".format(totss1,
totss2)
elif op == 'g': assert totss1 > totss2, "The first model has an TOTSS of {0} and the second model has an " \
"TOTSS of {1}. Expected the first to be > than the second.".format(totss1,
totss2)
elif op == 'ge': assert totss1 >= totss2, "The first model has an TOTSS of {0} and the second model has an " \
"TOTSS of {1}. Expected the first to be >= than the second." \
"".format(totss1, totss2)
def check_dims_values(python_obj, h2o_frame, rows, cols, dim_only=False):
"""
Check that the dimensions and values of the python object and H2OFrame are equivalent. Assumes that the python
object conforms to the rules specified in the h2o frame documentation.
:param python_obj: a (nested) list, tuple, dictionary, numpy.ndarray, ,or pandas.DataFrame
:param h2o_frame: an H2OFrame
:param rows: number of rows
:param cols: number of columns
:param dim_only: check the dimensions only
:return: None
"""
h2o_rows, h2o_cols = h2o_frame.dim
assert h2o_rows == rows and h2o_cols == cols, "failed dim check! h2o_rows:{0} rows:{1} h2o_cols:{2} cols:{3}" \
"".format(h2o_rows, rows, h2o_cols, cols)
if not dim_only:
if isinstance(python_obj, (list, tuple)):
for c in range(cols):
for r in range(rows):
pval = python_obj[r]
if isinstance(pval, (list, tuple)): pval = pval[c]
hval = h2o_frame[r, c]
assert pval == hval or abs(pval - hval) < 1e-10, \
"expected H2OFrame to have the same values as the python object for row {0} " \
"and column {1}, but h2o got {2} and python got {3}.".format(r, c, hval, pval)
elif isinstance(python_obj, dict):
for r in range(rows):
for k in list(python_obj.keys()):
pval = python_obj[k][r] if hasattr(python_obj[k],'__iter__') else python_obj[k]
hval = h2o_frame[r,k]
assert pval == hval, "expected H2OFrame to have the same values as the python object for row {0} " \
"and column {1}, but h2o got {2} and python got {3}.".format(r, k, hval, pval)
def np_comparison_check(h2o_data, np_data, num_elements):
"""
Check values achieved by h2o against values achieved by numpy
:param h2o_data: an H2OFrame or H2OVec
:param np_data: a numpy array
:param num_elements: number of elements to compare
:return: None
"""
# Check for numpy
try:
imp.find_module('numpy')
except ImportError:
assert False, "failed comparison check because unable to import numpy"
import numpy as np
rows, cols = h2o_data.dim
for i in range(num_elements):
r = random.randint(0,rows-1)
c = random.randint(0,cols-1)
h2o_val = h2o_data[r,c]
np_val = np_data[r,c] if len(np_data.shape) > 1 else np_data[r]
if isinstance(np_val, np.bool_): np_val = bool(np_val) # numpy haz special bool type :(
assert np.absolute(h2o_val - np_val) < 1e-5, \
"failed comparison check! h2o computed {0} and numpy computed {1}".format(h2o_val, np_val)
def javapredict(algo, equality, train, test, x, y, compile_only=False, **kwargs):
print("Creating model in H2O")
if algo == "gbm": model = H2OGradientBoostingEstimator(**kwargs)
elif algo == "random_forest": model = H2ORandomForestEstimator(**kwargs)
elif algo == "deeplearning": model = H2ODeepLearningEstimator(**kwargs)
elif algo == "glm": model = H2OGeneralizedLinearEstimator(**kwargs)
elif algo == "naive_bayes": model = H2ONaiveBayesEstimator(**kwargs)
elif algo == "kmeans": model = H2OKMeansEstimator(**kwargs)
elif algo == "pca": model = H2OPCA(**kwargs)
else: raise ValueError
if algo == "kmeans" or algo == "pca": model.train(x=x, training_frame=train)
else: model.train(x=x, y=y, training_frame=train)
print(model)
# HACK: munge model._id so that it conforms to Java class name. For example, change K-means to K_means.
# TODO: clients should extract Java class name from header.
regex = re.compile("[+\\-* !@#$%^&()={}\\[\\]|;:'\"<>,.?/]")
pojoname = regex.sub("_", model._id)
print("Downloading Java prediction model code from H2O")
tmpdir = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "results", pojoname))
os.makedirs(tmpdir)
h2o.download_pojo(model, path=tmpdir)
h2o_genmodel_jar = os.path.join(tmpdir, "h2o-genmodel.jar")
assert os.path.exists(h2o_genmodel_jar), "Expected file {0} to exist, but it does not.".format(h2o_genmodel_jar)
print("h2o-genmodel.jar saved in {0}".format(h2o_genmodel_jar))
java_file = os.path.join(tmpdir, pojoname + ".java")
assert os.path.exists(java_file), "Expected file {0} to exist, but it does not.".format(java_file)
print("java code saved in {0}".format(java_file))
print("Compiling Java Pojo")
javac_cmd = ["javac", "-cp", h2o_genmodel_jar, "-J-Xmx12g", "-J-XX:MaxPermSize=256m", java_file]
subprocess.check_call(javac_cmd)
if not compile_only:
print("Predicting in H2O")
predictions = model.predict(test)
predictions.summary()
predictions.head()
out_h2o_csv = os.path.join(tmpdir, "out_h2o.csv")
h2o.download_csv(predictions, out_h2o_csv)
assert os.path.exists(out_h2o_csv), "Expected file {0} to exist, but it does not.".format(out_h2o_csv)
print("H2O Predictions saved in {0}".format(out_h2o_csv))
print("Setting up for Java POJO")
in_csv = os.path.join(tmpdir, "in.csv")
h2o.download_csv(test[x], in_csv)
# hack: the PredictCsv driver can't handle quoted strings, so remove them
f = open(in_csv, "r+")
csv = f.read()
csv = re.sub('\"', "", csv)
f.seek(0)
f.write(csv)
f.truncate()
f.close()
assert os.path.exists(in_csv), "Expected file {0} to exist, but it does not.".format(in_csv)
print("Input CSV to PredictCsv saved in {0}".format(in_csv))
print("Running PredictCsv Java Program")
out_pojo_csv = os.path.join(tmpdir, "out_pojo.csv")
cp_sep = ";" if sys.platform == "win32" else ":"
java_cmd = ["java", "-ea", "-cp", h2o_genmodel_jar + cp_sep + tmpdir, "-Xmx12g", "-XX:MaxPermSize=2g",
"-XX:ReservedCodeCacheSize=256m", "hex.genmodel.tools.PredictCsv",
"--pojo", pojoname, "--input", in_csv, "--output", out_pojo_csv]
p = subprocess.Popen(java_cmd, stdout=PIPE, stderr=STDOUT)
o, e = p.communicate()
print("Java output: {0}".format(o))
assert os.path.exists(out_pojo_csv), "Expected file {0} to exist, but it does not.".format(out_pojo_csv)
predictions2 = h2o.upload_file(path=out_pojo_csv)
print("Pojo predictions saved in {0}".format(out_pojo_csv))
print("Comparing predictions between H2O and Java POJO")
# Dimensions
hr, hc = predictions.dim
pr, pc = predictions2.dim
assert hr == pr, "Expected the same number of rows, but got {0} and {1}".format(hr, pr)
assert hc == pc, "Expected the same number of cols, but got {0} and {1}".format(hc, pc)
# Value
for r in range(hr):
hp = predictions[r, 0]
if equality == "numeric":
pp = float.fromhex(predictions2[r, 0])
assert abs(hp - pp) < 1e-4, \
"Expected predictions to be the same (within 1e-4) for row %d, but got %r and %r" % (r, hp, pp)
elif equality == "class":
pp = predictions2[r, 0]
assert hp == pp, "Expected predictions to be the same for row %d, but got %r and %r" % (r, hp, pp)
else:
raise ValueError
def javamunge(assembly, pojoname, test, compile_only=False):
"""
Here's how to use:
assembly is an already fit H2OAssembly;
The test set should be used to compare the output here and the output of the POJO.
"""
print("Downloading munging POJO code from H2O")
tmpdir = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "results", pojoname))
os.makedirs(tmpdir)
assembly.to_pojo(pojoname, path=tmpdir, get_jar=True)
h2o_genmodel_jar = os.path.join(tmpdir, "h2o-genmodel.jar")
assert os.path.exists(h2o_genmodel_jar), "Expected file {0} to exist, but it does not.".format(h2o_genmodel_jar)
print("h2o-genmodel.jar saved in {0}".format(h2o_genmodel_jar))
java_file = os.path.join(tmpdir, pojoname + ".java")
assert os.path.exists(java_file), "Expected file {0} to exist, but it does not.".format(java_file)
print("java code saved in {0}".format(java_file))
print("Compiling Java Pojo")
javac_cmd = ["javac", "-cp", h2o_genmodel_jar, "-J-Xmx12g", "-J-XX:MaxPermSize=256m", java_file]
subprocess.check_call(javac_cmd)
if not compile_only:
print("Setting up for Java POJO")
in_csv = os.path.join(tmpdir, "in.csv")
h2o.download_csv(test, in_csv)
assert os.path.exists(in_csv), "Expected file {0} to exist, but it does not.".format(in_csv)
print("Input CSV to mungedCSV saved in {0}".format(in_csv))
print("Predicting in H2O")
munged = assembly.fit(test)
munged.head()
out_h2o_csv = os.path.join(tmpdir, "out_h2o.csv")
h2o.download_csv(munged, out_h2o_csv)
assert os.path.exists(out_h2o_csv), "Expected file {0} to exist, but it does not.".format(out_h2o_csv)
print("Munged frame saved in {0}".format(out_h2o_csv))
print("Running PredictCsv Java Program")
out_pojo_csv = os.path.join(tmpdir, "out_pojo.csv")
cp_sep = ";" if sys.platform == "win32" else ":"
java_cmd = ["java", "-ea", "-cp", h2o_genmodel_jar + cp_sep + tmpdir, "-Xmx12g", "-XX:MaxPermSize=2g",
"-XX:ReservedCodeCacheSize=256m", "hex.genmodel.tools.MungeCsv", "--header", "--munger", pojoname,
"--input", in_csv, "--output", out_pojo_csv]
print("JAVA COMMAND: " + " ".join(java_cmd))
p = subprocess.Popen(java_cmd, stdout=PIPE, stderr=STDOUT)
o, e = p.communicate()
print("Java output: {0}".format(o))
assert os.path.exists(out_pojo_csv), "Expected file {0} to exist, but it does not.".format(out_pojo_csv)
munged2 = h2o.upload_file(path=out_pojo_csv, col_types=test.types)
print("Pojo predictions saved in {0}".format(out_pojo_csv))
print("Comparing predictions between H2O and Java POJO")
# Dimensions
hr, hc = munged.dim
pr, pc = munged2.dim
assert hr == pr, "Expected the same number of rows, but got {0} and {1}".format(hr, pr)
assert hc == pc, "Expected the same number of cols, but got {0} and {1}".format(hc, pc)
# Value
import math
import numbers
munged.show()
munged2.show()
for r in range(hr):
for c in range(hc):
hp = munged[r,c]
pp = munged2[r,c]
if isinstance(hp, numbers.Number):
assert isinstance(pp, numbers.Number)
assert (math.fabs(hp-pp) < 1e-8) or (math.isnan(hp) and math.isnan(pp)), "Expected munged rows to be the same for row {0}, but got {1}, and {2}".format(r, hp, pp)
else:
assert hp==pp, "Expected munged rows to be the same for row {0}, but got {1}, and {2}".format(r, hp, pp)
def locate(path):
"""
Search for a relative path and turn it into an absolute path.
This is handy when hunting for data files to be passed into h2o and used by import file.
Note: This function is for unit testing purposes only.
Parameters
----------
path : str
Path to search for
:return: Absolute path if it is found. None otherwise.
"""
if (test_is_on_hadoop()):
# Jenkins jobs create symbolic links to smalldata and bigdata on the machine that starts the test. However,
# in an h2o multinode hadoop cluster scenario, the clustered machines don't know about the symbolic link.
# Consequently, `locate` needs to return the actual path to the data on the clustered machines. ALL jenkins
# machines store smalldata and bigdata in /home/0xdiag/. If ON.HADOOP is set by the run.py, the path arg MUST
# be an immediate subdirectory of /home/0xdiag/. Moreover, the only guaranteed subdirectories of /home/0xdiag/
# are smalldata and bigdata.
p = os.path.realpath(os.path.join("/home/0xdiag/", path))
if not os.path.exists(p): raise ValueError("File not found: " + path)
return p
else:
tmp_dir = os.path.realpath(os.getcwd())
possible_result = os.path.join(tmp_dir, path)
while (True):
if (os.path.exists(possible_result)):
return possible_result
next_tmp_dir = os.path.dirname(tmp_dir)
if (next_tmp_dir == tmp_dir):
raise ValueError("File not found: " + path)
tmp_dir = next_tmp_dir
possible_result = os.path.join(tmp_dir, path)
def hadoop_namenode_is_accessible():
url = "http://{0}:50070".format(hadoop_namenode())
try:
urllib.urlopen(url)
internal = True
except:
internal = False
return internal
def test_is_on_hadoop():
if hasattr(sys.modules["tests.pyunit_utils"], '__on_hadoop__'):
return sys.modules["tests.pyunit_utils"].__on_hadoop__
return False
def hadoop_namenode():
if os.getenv("NAME_NODE"):
return os.getenv("NAME_NODE").split(".")[0]
elif hasattr(sys.modules["tests.pyunit_utils"], '__hadoop_namenode__'):
return sys.modules["tests.pyunit_utils"].__hadoop_namenode__
return None
def pyunit_exec(test_name):
with open(test_name, "r") as t: pyunit = t.read()
pyunit_c = compile(pyunit, os.path.abspath(test_name), 'exec')
exec(pyunit_c, {})
def standalone_test(test):
h2o.init(strict_version_check=False)
h2o.remove_all()
h2o.log_and_echo("------------------------------------------------------------")
h2o.log_and_echo("")
h2o.log_and_echo("STARTING TEST")
h2o.log_and_echo("")
h2o.log_and_echo("------------------------------------------------------------")
test()
def make_random_grid_space(algo, ncols=None, nrows=None):
"""
Construct a dictionary of the form {gbm_parameter:list_of_values, ...}, which will eventually be passed to
H2OGridSearch to build a grid object. The gbm parameters, and their associated values, are randomly selected.
:param algo: a string {"gbm", "rf", "dl", "km", "glm"} representing the algo dimension of the grid space
:param ncols: Used for mtries selection or k (pca)
:param nrows: Used for k (pca)
:return: a dictionary of parameter_name:list_of_values
"""
grid_space = {}
if algo in ["gbm", "rf"]:
if random.randint(0,1): grid_space['ntrees'] = random.sample(list(range(1,6)),random.randint(2,3))
if random.randint(0,1): grid_space['max_depth'] = random.sample(list(range(1,6)),random.randint(2,3))
if random.randint(0,1): grid_space['min_rows'] = random.sample(list(range(1,11)),random.randint(2,3))
if random.randint(0,1): grid_space['nbins'] = random.sample(list(range(2,21)),random.randint(2,3))
if random.randint(0,1): grid_space['nbins_cats'] = random.sample(list(range(2,1025)),random.randint(2,3))
if algo == "gbm":
if random.randint(0,1): grid_space['learn_rate'] = [random.random() for _ in range(random.randint(2,3))]
grid_space['distribution'] = random.sample(['bernoulli', 'multinomial', 'gaussian', 'poisson', 'tweedie', 'gamma'], 1)
if algo == "rf":
if random.randint(0,1): grid_space['mtries'] = random.sample(list(range(1,ncols+1)),random.randint(2,3))
if random.randint(0,1): grid_space['sample_rate'] = [random.random() for r in range(random.randint(2,3))]
elif algo == "km":
grid_space['k'] = random.sample(list(range(1,10)),random.randint(2,3))
if random.randint(0,1): grid_space['max_iterations'] = random.sample(list(range(1,1000)),random.randint(2,3))
if random.randint(0,1): grid_space['standardize'] = [True, False]
if random.randint(0,1): grid_space['seed'] = random.sample(list(range(1,1000)),random.randint(2,3))
if random.randint(0,1): grid_space['init'] = random.sample(['Random','PlusPlus','Furthest'],random.randint(2,3))
elif algo == "glm":
if random.randint(0,1): grid_space['alpha'] = [random.random() for r in range(random.randint(2,3))]
grid_space['family'] = random.sample(['binomial','gaussian','poisson','tweedie','gamma'], 1)
if grid_space['family'] == "tweedie":
if random.randint(0,1):
grid_space['tweedie_variance_power'] = [round(random.random()+1,6) for r in range(random.randint(2,3))]
grid_space['tweedie_link_power'] = 1 - grid_space['tweedie_variance_power']
elif algo == "dl":
if random.randint(0,1): grid_space['activation'] = \
random.sample(["Rectifier", "Tanh", "TanhWithDropout", "RectifierWithDropout", "MaxoutWithDropout"],
random.randint(2,3))
if random.randint(0,1): grid_space['l2'] = [0.001*random.random() for r in range(random.randint(2,3))]
grid_space['distribution'] = random.sample(['bernoulli','multinomial','gaussian','poisson','tweedie','gamma'],1)
return grid_space
elif algo == "naiveBayes":
grid_space['laplace'] = 0
if random.randint(0,1): grid_space['laplace'] = [round(random.random() + r, 6) for r in random.sample(list(range(0,11)), random.randint(2,3))]
if random.randint(0,1): grid_space['min_sdev'] = [round(random.random(),6) for r in range(random.randint(2,3))]
if random.randint(0,1): grid_space['eps_sdev'] = [round(random.random(),6) for r in range(random.randint(2,3))]
elif algo == "pca":
if random.randint(0,1): grid_space['max_iterations'] = random.sample(list(range(1,1000)),random.randint(2,3))
if random.randint(0,1): grid_space['transform'] = random.sample(["NONE","STANDARDIZE","NORMALIZE","DEMEAN","DESCALE"], random.randint(2,3))
grid_space['k'] = random.sample(list(range(1,min(ncols,nrows))),random.randint(2,3))
else:
raise ValueError
return grid_space
# Validate given models' parameters against expected values
def expect_model_param(models, attribute_name, expected_values):
print("param: {0}".format(attribute_name))
actual_values = list(set([m.params[attribute_name]['actual'] \
if type(m.params[attribute_name]['actual']) != list
else m.params[attribute_name]['actual'][0] for m in models.models]))
# possible for actual to be a list (GLM)
if type(expected_values) != list:
expected_values = [expected_values]
# limit precision. Rounding happens in some models like RF
actual_values = [x if isinstance(x,basestring) else round(float(x),5) for x in actual_values]
expected_values = [x if isinstance(x,basestring) else round(float(x),5) for x in expected_values]
print("actual values: {0}".format(actual_values))
print("expected values: {0}".format(expected_values))
actual_values_len = len(actual_values)
expected_values_len = len(expected_values)
assert actual_values_len == expected_values_len, "Expected values len: {0}. Actual values len: " \
"{1}".format(expected_values_len, actual_values_len)
actual_values = sorted(actual_values)
expected_values = sorted(expected_values)
for i in range(len(actual_values)):
if isinstance(actual_values[i], float):
assert abs(actual_values[i]-expected_values[i]) < 1.1e-5, "Too large of a difference betewen actual and " \
"expected value. Actual value: {}. Expected value: {}"\
.format(actual_values[i], expected_values[i])
else:
assert actual_values[i] == expected_values[i], "Expected: {}. Actual: {}"\
.format(expected_values[i], actual_values[i])
def rest_ctr():
return h2o.connection().requests_count
def write_syn_floating_point_dataset_glm(csv_training_data_filename, csv_validation_data_filename,
csv_test_data_filename, csv_weight_name, row_count, col_count, data_type,
max_p_value, min_p_value, max_w_value, min_w_value, noise_std, family_type,
valid_row_count, test_row_count, class_number=2,
class_method=('probability', 'probability', 'probability'),
class_margin=[0.0, 0.0, 0.0]):
"""
Generate random data sets to test the GLM algo using the following steps:
1. randomly generate the intercept and weight vector;
2. generate a set of predictors X;
3. generate the corresponding response y using the formula: y = w^T x+b+e where T is transpose, e is a random
Gaussian noise added. For the Binomial family, the relationship between the response Y and predictor vector X
is assumed to be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)). For the Multinomial family, the
relationship between the response Y (K possible classes) and predictor vector X is assumed to be
Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e))
:param csv_training_data_filename: string representing full path filename to store training data set. Set to
null string if no training data set is to be generated.
:param csv_validation_data_filename: string representing full path filename to store validation data set. Set to
null string if no validation data set is to be generated.
:param csv_test_data_filename: string representing full path filename to store test data set. Set to null string if
no test data set is to be generated.
:param csv_weight_name: string representing full path filename to store intercept and weight used to generate
all data sets.
:param row_count: integer representing number of samples (predictor, response) in training data set
:param col_count: integer representing the number of predictors in the data set
:param data_type: integer representing the type of predictors or weights (1: integers, 2: real)
:param max_p_value: integer representing maximum predictor values
:param min_p_value: integer representing minimum predictor values
:param max_w_value: integer representing maximum intercept/weight values
:param min_w_value: integer representing minimum intercept/weight values
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported
by our GLM algo
:param valid_row_count: integer representing number of samples (predictor, response) in validation data set
:param test_row_count: integer representing number of samples (predictor, response) in test data set
:param class_number: integer, optional, representing number of classes for binomial and multinomial
:param class_method: string tuple, optional, describing how we derive the final response from the class
probabilities generated for binomial and multinomial family_type for training/validation/test data set respectively.
If set to 'probability', response y is generated randomly according to the class probabilities calculated. If set
to 'threshold', response y is set to the class with the maximum class probability if the maximum class probability
exceeds the second highest class probability by the value set in margin. If the maximum class probability fails
to be greater by the margin than the second highest class probability, the data sample is discarded.
:param class_margin: float tuple, optional, denotes the threshold by how much the maximum class probability has to
exceed the second highest class probability in order for us to keep the data sample for
training/validation/test data set respectively. This field is only meaningful if class_method is set to
'threshold'
:return: None
"""
# generate bias b and weight as a column vector
weights = generate_weights_glm(csv_weight_name, col_count, data_type, min_w_value, max_w_value,
family_type=family_type, class_number=class_number)
# generate training data set
if len(csv_training_data_filename) > 0:
generate_training_set_glm(csv_training_data_filename, row_count, col_count, min_p_value, max_p_value, data_type,
family_type, noise_std, weights,
class_method=class_method[0], class_margin=class_margin[0])
# generate validation data set
if len(csv_validation_data_filename) > 0:
generate_training_set_glm(csv_validation_data_filename, valid_row_count, col_count, min_p_value, max_p_value,
data_type, family_type, noise_std, weights,
class_method=class_method[1], class_margin=class_margin[1])
# generate test data set
if len(csv_test_data_filename) > 0:
generate_training_set_glm(csv_test_data_filename, test_row_count, col_count, min_p_value, max_p_value,
data_type, family_type, noise_std, weights,
class_method=class_method[2], class_margin=class_margin[2])
def write_syn_mixed_dataset_glm(csv_training_data_filename, csv_training_data_filename_true_one_hot,
csv_validation_data_filename, csv_validation_filename_true_one_hot,
csv_test_data_filename, csv_test_filename_true_one_hot, csv_weight_filename, row_count,
col_count, max_p_value, min_p_value, max_w_value, min_w_value, noise_std, family_type,
valid_row_count, test_row_count, enum_col, enum_level_vec, class_number=2,
class_method=['probability', 'probability', 'probability'],
class_margin=[0.0, 0.0, 0.0]):
"""
This function differs from write_syn_floating_point_dataset_glm in one small point. The predictors in this case
contains categorical data as well as real data.
Generate random data sets to test the GLM algo using the following steps:
1. randomly generate the intercept and weight vector;
2. generate a set of predictors X;
3. generate the corresponding response y using the formula: y = w^T x+b+e where T is transpose, e is a random
Gaussian noise added. For the Binomial family, the relationship between the response Y and predictor vector X
is assumed to be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)). For the Multinomial family, the
relationship between the response Y (K possible classes) and predictor vector X is assumed to be
Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e))
:param csv_training_data_filename: string representing full path filename to store training data set. Set to null
string if no training data set is to be generated.
:param csv_training_data_filename_true_one_hot: string representing full path filename to store training data set
with true one-hot encoding. Set to null string if no training data set is to be generated.
:param csv_validation_data_filename: string representing full path filename to store validation data set. Set to
null string if no validation data set is to be generated.
:param csv_validation_filename_true_one_hot: string representing full path filename to store validation data set
with true one-hot. Set to null string if no validation data set is to be generated.
:param csv_test_data_filename: string representing full path filename to store test data set. Set to null
string if no test data set is to be generated.
:param csv_test_filename_true_one_hot: string representing full path filename to store test data set with true
one-hot encoding. Set to null string if no test data set is to be generated.
:param csv_weight_filename: string representing full path filename to store intercept and weight used to generate
all data sets.
:param row_count: integer representing number of samples (predictor, response) in training data set
:param col_count: integer representing the number of predictors in the data set
:param max_p_value: integer representing maximum predictor values
:param min_p_value: integer representing minimum predictor values
:param max_w_value: integer representing maximum intercept/weight values
:param min_w_value: integer representing minimum intercept/weight values
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported
by our GLM algo
:param valid_row_count: integer representing number of samples (predictor, response) in validation data set
:param test_row_count: integer representing number of samples (predictor, response) in test data set
:param enum_col: integer representing actual number of categorical columns in data set
:param enum_level_vec: vector containing maximum integer value for each categorical column
:param class_number: integer, optional, representing number classes for binomial and multinomial
:param class_method: string tuple, optional, describing how we derive the final response from the class
probabilities generated for binomial and multinomial family_type for training/validation/test data set respectively.
If set to 'probability', response y is generated randomly according to the class probabilities calculated. If set
to 'threshold', response y is set to the class with the maximum class probability if the maximum class probability
exceeds the second highest class probability by the value set in margin. If the maximum class probability fails
to be greater by margin than the second highest class probability, the data sample is discarded.
:param class_margin: float tuple, optional, denotes the threshold by how much the maximum class probability has to
exceed the second highest class probability by in order for us to keep the data sample for
training/validation/test data set respectively. This field is only meaningful if class_method is set to
'threshold'
:return: None
"""
# add column count of encoded categorical predictors, if maximum value for enum is 3, it has 4 levels.
# hence 4 bits are used to encode it with true one hot encoding. That is why we are adding 1 bit per
# categorical columns added to our predictors
new_col_count = col_count - enum_col + sum(enum_level_vec) + enum_level_vec.shape[0]
# generate the weights to be applied to the training/validation/test data sets
# this is for true one hot encoding. For reference+one hot encoding, will skip
# few extra weights
weights = generate_weights_glm(csv_weight_filename, new_col_count, 2, min_w_value, max_w_value,
family_type=family_type, class_number=class_number)
# generate training data set
if len(csv_training_data_filename) > 0:
generate_training_set_mixed_glm(csv_training_data_filename, csv_training_data_filename_true_one_hot, row_count,
col_count, min_p_value, max_p_value, family_type, noise_std, weights, enum_col,
enum_level_vec, class_number=class_number,
class_method=class_method[0], class_margin=class_margin[0])
# generate validation data set
if len(csv_validation_data_filename) > 0:
generate_training_set_mixed_glm(csv_validation_data_filename, csv_validation_filename_true_one_hot,
valid_row_count, col_count, min_p_value, max_p_value, family_type, noise_std,
weights, enum_col, enum_level_vec, class_number=class_number,
class_method=class_method[1], class_margin=class_margin[1])
# generate test data set
if len(csv_test_data_filename) > 0:
generate_training_set_mixed_glm(csv_test_data_filename, csv_test_filename_true_one_hot, test_row_count,
col_count, min_p_value, max_p_value, family_type, noise_std, weights, enum_col,
enum_level_vec, class_number=class_number,
class_method=class_method[2], class_margin=class_margin[2])
def generate_weights_glm(csv_weight_filename, col_count, data_type, min_w_value, max_w_value, family_type='gaussian',
class_number=2):
"""
Generate random intercept and weight vectors (integer or real) for GLM algo and save
the values in a file specified by csv_weight_filename.
:param csv_weight_filename: string representing full path filename to store intercept and weight used to generate
all data set
:param col_count: integer representing the number of predictors in the data set
:param data_type: integer representing the type of predictors or weights (1: integers, 2: real)
:param max_w_value: integer representing maximum intercept/weight values
:param min_w_value: integer representing minimum intercept/weight values
:param family_type: string ,optional, represents the various distribution families (gaussian, multinomial, binomial)
supported by our GLM algo
:param class_number: integer, optional, representing number classes for binomial and multinomial
:return: column vector of size 1+colCount representing intercept and weight or matrix of size
1+colCount by class_number
"""
# first generate random intercept and weight
if 'gaussian' in family_type.lower():
if data_type == 1: # generate random integer intercept/weight
weight = np.random.random_integers(min_w_value, max_w_value, [col_count+1, 1])
elif data_type == 2: # generate real intercept/weights
weight = np.random.uniform(min_w_value, max_w_value, [col_count+1, 1])
else:
print("dataType must be 1 or 2 for now.")
sys.exit(1)
elif ('binomial' in family_type.lower()) or ('multinomial' in family_type.lower()):
if 'binomial' in family_type.lower(): # for binomial, only need 1 set of weight
class_number -= 1
if class_number <= 0:
print("class_number must be >= 2!")
sys.exit(1)
if isinstance(col_count, np.ndarray):
temp_col_count = col_count[0]
else:
temp_col_count = col_count
if data_type == 1: # generate random integer intercept/weight
weight = np.random.random_integers(min_w_value, max_w_value, [temp_col_count+1, class_number])
elif data_type == 2: # generate real intercept/weights
weight = np.random.uniform(min_w_value, max_w_value, [temp_col_count+1, class_number])
else:
print("dataType must be 1 or 2 for now.")
sys.exit(1)
# save the generated intercept and weight
np.savetxt(csv_weight_filename, weight.transpose(), delimiter=",")
return weight
def generate_training_set_glm(csv_filename, row_count, col_count, min_p_value, max_p_value, data_type, family_type,
noise_std, weight, class_method='probability', class_margin=0.0):
"""
Generate supervised data set given weights for the GLM algo. First randomly generate the predictors, then
call function generate_response_glm to generate the corresponding response y using the formula: y = w^T x+b+e
where T is transpose, e is a random Gaussian noise added. For the Binomial family, the relationship between
the response Y and predictor vector X is assumed to be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)).
For the Multinomial family, the relationship between the response Y (K possible classes) and predictor vector
X is assumed to be Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e)). The predictors and
responses are saved in a file specified by csv_filename.
:param csv_filename: string representing full path filename to store supervised data set
:param row_count: integer representing the number of training samples in the data set
:param col_count: integer representing the number of predictors in the data set
:param max_p_value: integer representing maximum predictor values
:param min_p_value: integer representing minimum predictor values
:param data_type: integer representing the type of predictors or weights (1: integers, 2: real)
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported
by our GLM algo
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param weight: vector representing w in our formula to generate the response.
:param class_method: string tuple, optional, describing how we derive the final response from the class
probabilities generated for binomial and multinomial family-type for training/validation/test data set respectively.
If set to 'probability', response y is generated randomly according to the class probabilities calculated. If set
to 'threshold', response y is set to the class with the maximum class probability if the maximum class probability
exceeds the second highest class probability by the value set in the margin. If the maximum class probability fails
to be greater by the margin than the second highest class probability, the data sample is discarded.
:param class_margin: float tuple, optional, denotes the threshold by how much the maximum class probability has to
exceed the second highest class probability in order for us to keep the data sample for
training/validation/test data set respectively. This field is only meaningful if class_method is set to
'threshold'
:return: None
"""
if data_type == 1: # generate random integers
x_mat = np.random.random_integers(min_p_value, max_p_value, [row_count, col_count])
elif data_type == 2: # generate random real numbers
x_mat = np.random.uniform(min_p_value, max_p_value, [row_count, col_count])
else:
print("dataType must be 1 or 2 for now. ")
sys.exit(1)
# generate the response vector to the input predictors
response_y = generate_response_glm(weight, x_mat, noise_std, family_type,
class_method=class_method, class_margin=class_margin)
# for family_type = 'multinomial' or 'binomial', response_y can be -ve to indicate bad sample data.
# need to delete this data sample before proceeding
if ('multinomial' in family_type.lower()) or ('binomial' in family_type.lower()):
if 'threshold' in class_method.lower():
if np.any(response_y < 0): # remove negative entries out of data set
(x_mat, response_y) = remove_negative_response(x_mat, response_y)
# write to file in csv format
np.savetxt(csv_filename, np.concatenate((x_mat, response_y), axis=1), delimiter=",")
def generate_clusters(cluster_center_list, cluster_pt_number_list, cluster_radius_list):
"""
This function is used to generate clusters of points around cluster_centers listed in
cluster_center_list. The radius of the cluster of points are specified by cluster_pt_number_list.
The size of each cluster could be different and it is specified in cluster_radius_list.
:param cluster_center_list: list of coordinates of cluster centers
:param cluster_pt_number_list: number of points to generate for each cluster center
:param cluster_radius_list: list of size of each cluster
:return: list of sample points that belong to various clusters
"""
k = len(cluster_pt_number_list) # number of clusters to generate clusters for
if (not(k == len(cluster_center_list))) or (not(k == len(cluster_radius_list))):
print("Length of list cluster_center_list, cluster_pt_number_list, cluster_radius_list must be the same!")
sys.exit(1)
training_sets = []
for k_ind in range(k):
new_cluster_data = generate_one_cluster(cluster_center_list[k_ind], cluster_pt_number_list[k_ind],
cluster_radius_list[k_ind])
if k_ind > 0:
training_sets = np.concatenate((training_sets, new_cluster_data), axis=0)
else:
training_sets = new_cluster_data
# want to shuffle the data samples so that the clusters are all mixed up
map(np.random.shuffle, training_sets)
return training_sets
def generate_one_cluster(cluster_center, cluster_number, cluster_size):
"""
This function will generate a full cluster wither cluster_number points centered on cluster_center
with maximum radius cluster_size
:param cluster_center: python list denoting coordinates of cluster center
:param cluster_number: integer denoting number of points to generate for this cluster
:param cluster_size: float denoting radius of cluster
:return: np matrix denoting a cluster
"""
pt_dists = np.random.uniform(0, cluster_size, [cluster_number, 1])
coord_pts = len(cluster_center) # dimension of each cluster point
one_cluster_data = np.zeros((cluster_number, coord_pts), dtype=np.float)
for p_ind in range(cluster_number):
coord_indices = list(range(coord_pts))
random.shuffle(coord_indices) # randomly determine which coordinate to generate
left_radius = pt_dists[p_ind]
for c_ind in range(coord_pts):
coord_index = coord_indices[c_ind]
one_cluster_data[p_ind, coord_index] = random.uniform(-1*left_radius+cluster_center[coord_index],
left_radius+cluster_center[coord_index])
left_radius = math.sqrt(pow(left_radius, 2)-pow((one_cluster_data[p_ind, coord_index]-
cluster_center[coord_index]), 2))
return one_cluster_data
def remove_negative_response(x_mat, response_y):
"""
Recall that when the user chooses to generate a data set for multinomial or binomial using the 'threshold' method,
response y is set to the class with the maximum class probability if the maximum class probability
exceeds the second highest class probability by the value set in margin. If the maximum class probability fails
to be greater by margin than the second highest class probability, the data sample is discarded. However, when we
generate the data set, we keep all samples. For data sample with maximum class probability that fails to be
greater by margin than the second highest class probability, the response is set to be -1. This function will
remove all data samples (predictors and responses) with response set to -1.
:param x_mat: predictor matrix containing all predictor values
:param response_y: response that can be negative if that data sample is to be removed
:return: tuple containing x_mat, response_y with negative data samples removed.
"""
y_response_negative = np.where(response_y < 0) # matrix of True or False
x_mat = np.delete(x_mat,y_response_negative[0].transpose(),axis=0) # remove predictor row with negative response
# remove rows with negative response
response_y = response_y[response_y >= 0]
return x_mat,response_y.transpose()
def generate_training_set_mixed_glm(csv_filename, csv_filename_true_one_hot, row_count, col_count, min_p_value,
max_p_value, family_type, noise_std, weight, enum_col, enum_level_vec,
class_number=2, class_method='probability', class_margin=0.0):
"""
Generate supervised data set given weights for the GLM algo with mixed categorical and real value
predictors. First randomly generate the predictors, then call function generate_response_glm to generate the
corresponding response y using the formula: y = w^T x+b+e where T is transpose, e is a random Gaussian noise
added. For the Binomial family, the relationship between the response Y and predictor vector X is assumed to
be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)). For the Multinomial family, the relationship between
the response Y (K possible classes) and predictor vector X is assumed to be
Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e)) e is the random Gaussian noise added to the
response. The predictors and responses are saved in a file specified by csv_filename.
:param csv_filename: string representing full path filename to store supervised data set
:param csv_filename_true_one_hot: string representing full path filename to store data set with true one-hot
encoding.
:param row_count: integer representing the number of training samples in the data set
:param col_count: integer representing the number of predictors in the data set
:param max_p_value: integer representing maximum predictor values
:param min_p_value: integer representing minimum predictor values
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial)
supported by our GLM algo
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param weight: vector representing w in our formula to generate the response.
:param enum_col: integer representing actual number of categorical columns in data set
:param enum_level_vec: vector containing maximum integer value for each categorical column
:param class_number: integer, optional, representing number classes for binomial and multinomial
:param class_method: string, optional, describing how we derive the final response from the class probabilities
generated for binomial and multinomial family_type. If set to 'probability', response y is generated randomly
according to the class probabilities calculated. If set to 'threshold', response y is set to the class with
the maximum class probability if the maximum class probability exceeds the second highest class probability by
the value set in margin. If the maximum class probability fails to be greater by margin than the second highest
class probability, the data sample is discarded.
:param class_margin: float, optional, denotes the threshold by how much the maximum class probability has to
exceed the second highest class probability by in order for us to keep the data set sample. This field is only
meaningful if class_method is set to 'threshold'
:return: None
"""
# generate the random training data sets
enum_dataset = np.zeros((row_count, enum_col), dtype=np.int) # generate the categorical predictors
# generate categorical data columns
for indc in range(enum_col):
enum_dataset[:, indc] = np.random.random_integers(0, enum_level_vec[indc], row_count)
# generate real data columns
x_mat = np.random.uniform(min_p_value, max_p_value, [row_count, col_count-enum_col])
x_mat = np.concatenate((enum_dataset, x_mat), axis=1) # concatenate categorical and real predictor columns
if len(csv_filename_true_one_hot) > 0:
generate_and_save_mixed_glm(csv_filename_true_one_hot, x_mat, enum_level_vec, enum_col, True, weight, noise_std,
family_type, class_method=class_method, class_margin=class_margin)
if len(csv_filename) > 0:
generate_and_save_mixed_glm(csv_filename, x_mat, enum_level_vec, enum_col, False, weight, noise_std,
family_type, class_method=class_method, class_margin=class_margin)
def generate_and_save_mixed_glm(csv_filename, x_mat, enum_level_vec, enum_col, true_one_hot, weight, noise_std,
family_type, class_method='probability', class_margin=0.0):
"""
Given the weights and input data matrix with mixed categorical and real value predictors, this function will
generate a supervised data set and save the input data and response in a csv format file specified by
csv_filename. It will first encode the enums without using one hot encoding with or without a reference
level first before generating a response Y.
:param csv_filename: string representing full path filename to store supervised data set with reference level
plus true one-hot encoding.
:param x_mat: predictor matrix with mixed columns (categorical/real values)
:param enum_level_vec: vector containing maximum integer value for each categorical column
:param enum_col: integer representing actual number of categorical columns in data set
:param true_one_hot: bool indicating whether we are using true one hot encoding or reference level plus
one hot encoding
:param weight: vector representing w in our formula to generate the response
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported
by our GLM algo
:param class_method: string, optional, describing how we derive the final response from the class probabilities
generated for binomial and multinomial family_type. If set to 'probability', response y is generated randomly
according to the class probabilities calculated. If set to 'threshold', response y is set to the class with the
maximum class probability if the maximum class probability exceeds the second highest class probability by the
value set in the margin. If the maximum class probability fails to be greater by margin than the second highest
class probability, the data sample is discarded.
:param class_margin: float, optional, denotes the threshold by how much the maximum class probability has to exceed
the second highest class probability in order for us to keep the data sample. This field is only meaningful if
class_method is set to 'threshold'
:return: None
"""
# encode the enums
x_mat_encoded = encode_enum_dataset(x_mat, enum_level_vec, enum_col, true_one_hot, False)
# extract the correct weight dimension for the data set
if not true_one_hot:
(num_row, num_col) = x_mat_encoded.shape
weight = weight[0:num_col+1] # +1 to take care of the intercept term
# generate the corresponding response vector given the weight and encoded input predictors
response_y = generate_response_glm(weight, x_mat_encoded, noise_std, family_type,
class_method=class_method, class_margin=class_margin)
# for familyType = 'multinomial' or 'binomial', response_y can be -ve to indicate bad sample data.
# need to delete this before proceeding
if ('multinomial' in family_type.lower()) or ('binomial' in family_type.lower()):
if 'threshold' in class_method.lower():
(x_mat,response_y) = remove_negative_response(x_mat, response_y)
# write generated data set to file in csv format
np.savetxt(csv_filename, np.concatenate((x_mat, response_y), axis=1), delimiter=",")
def encode_enum_dataset(dataset, enum_level_vec, enum_col, true_one_hot, include_nans):
"""
Given 2-d numpy array of predictors with categorical and real columns, this function will
encode the enum columns with 1-hot encoding or with reference plus one hot encoding
:param dataset: 2-d numpy array of predictors with both categorical and real columns
:param enum_level_vec: vector containing maximum level for each categorical column
:param enum_col: number of categorical columns in the data set
:param true_one_hot: bool indicating if we are using true one hot encoding or with one reference level + one hot
encoding
:param include_nans: bool indicating if we have nans in categorical columns
:return: data set with categorical columns encoded with 1-hot encoding or 1-hot encoding plus reference
"""
(num_row, num_col) = dataset.shape
# split the data set into categorical and real parts
enum_arrays = dataset[:, 0:enum_col]
new_enum_arrays = []
# perform the encoding for each element of categorical part
for indc in range(enum_col):
enum_col_num = enum_level_vec[indc]+1
if not true_one_hot:
enum_col_num -= 1
if include_nans and np.any(enum_arrays[:, indc]):
enum_col_num += 1
new_temp_enum = np.zeros((num_row, enum_col_num[0]))
one_hot_matrix = one_hot_encoding(enum_col_num)
last_col_index = enum_col_num-1
# encode each enum using 1-hot encoding or plus reference value
for indr in range(num_row):
enum_val = enum_arrays[indr, indc]
if true_one_hot: # not using true one hot
new_temp_enum[indr, :] = replace_with_encoded_bits(one_hot_matrix, enum_val, 0, last_col_index)
else:
if enum_val:
new_temp_enum[indr, :] = replace_with_encoded_bits(one_hot_matrix, enum_val, 1, last_col_index)
if indc == 0:
new_enum_arrays = new_temp_enum
else:
new_enum_arrays = np.concatenate((new_enum_arrays, new_temp_enum), axis=1)
return np.concatenate((new_enum_arrays, dataset[:, enum_col:num_col]), axis=1)
def replace_with_encoded_bits(one_hot_matrix, enum_val, add_value, last_col_index):
"""
Generate encoded bits for a categorical data value using one hot encoding.
:param one_hot_matrix: matrix representing the encoding of categorical data value to 1-hot encoding
:param enum_val: categorical data value, could be np.nan
:param add_value: set to 1 if a reference value is needed in addition to 1-hot encoding
:param last_col_index: index into encoding for np.nan if exists
:return: vector representing the encoded values for a enum value
"""
if np.isnan(enum_val): # if data value is np.nan
return one_hot_matrix[last_col_index]
else:
return one_hot_matrix[int(enum_val-add_value)]
def one_hot_encoding(enum_level):
"""
Generate the one_hot_encoding matrix given the number of enum_level.
:param enum_level: generate the actual one-hot encoding matrix
:return: numpy array for the enum_level specified. Note, enum_level <= 6
"""
if enum_level >= 2:
base_array = np.array([[0, 1], [1, 0]]) # for 2 enum levels
for enum_index in range(3, enum_level+1): # loop to build encoding for enum levels > 2
(num_row, num_col) = base_array.shape
col_zeros = np.asmatrix(np.zeros(num_row)).transpose() # column of zero matrix
base_array = np.concatenate((col_zeros, base_array), axis=1) # add column of zero
row_zeros = np.asmatrix(np.zeros(num_row+1)) # add row of zeros
row_zeros[0, 0] = 1 # set first element to 1
base_array = np.concatenate((base_array, row_zeros), axis=0)
return base_array
else:
print ("enum_level must be >= 2.")
sys.exit(1)
def generate_response_glm(weight, x_mat, noise_std, family_type, class_method='probability',
class_margin=0.0):
"""
Generate response vector given weight matrix, predictors matrix for the GLM algo.
:param weight: vector representing w in our formula to generate the response
:param x_mat: random numpy matrix (2-D ndarray) containing the predictors
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param family_type: string represents the various distribution families (Gaussian, multinomial, binomial)
supported by our GLM algo
:param class_method: string, optional, describing how we derive the final response from the class probabilities
generated for binomial and multinomial familyType. If set to 'probability', response y is generated randomly
according to the class probabilities calculated. If set to 'threshold', response y is set to the class with the
maximum class probability if the maximum class probability exceeds the second highest class probability by the
value set in the margin. If the maximum class probability fails to be greater by margin than the second highest
class probability, the data sample is discarded.
:param class_margin: float, optional, denotes the threshold by how much the maximum class probability has to exceed
the second highest class probability in order for us to keep the data set sample. This field is only meaningful if
class_method is set to 'threshold'
:return: vector representing the response
"""
(num_row, num_col) = x_mat.shape
# add a column of 1's to x_mat
temp_ones_col = np.asmatrix(np.ones(num_row)).transpose()
x_mat = np.concatenate((temp_ones_col, x_mat), axis=1)
# generate response given predictor and weight and add noise vector, default behavior
response_y = x_mat * weight + noise_std * np.random.standard_normal([num_row, 1])
# added more to form Multinomial response
if ('multinomial' in family_type.lower()) or ('binomial' in family_type.lower()):
temp_mat = np.exp(response_y) # matrix of n by K where K = 1 for binomials
if 'binomial' in family_type.lower():
ntemp_mat = temp_mat + 1
btemp_mat = temp_mat / ntemp_mat
temp_mat = np.concatenate((1-btemp_mat, btemp_mat), axis=1) # inflate temp_mat to 2 classes
response_y = derive_discrete_response(temp_mat, class_method, class_margin)
return response_y
def derive_discrete_response(prob_mat, class_method, class_margin):
"""
This function is written to generate the final class response given the probabilities (Prob(y=k)). There are
two methods that we use and is specified by the class_method. If class_method is set to 'probability',
response y is generated randomly according to the class probabilities calculated. If set to 'threshold',
response y is set to the class with the maximum class probability if the maximum class probability exceeds the
second highest class probability by the value set in margin. If the maximum class probability fails to be
greater by margin than the second highest class probability, the data sample will be discarded later by
marking the final response as -1.
:param prob_mat: probability matrix specifying the probability that y=k where k is a class
:param class_method: string set to 'probability' or 'threshold'
:param class_margin: if class_method='threshold', class_margin is the margin used to determine if a response is to
be kept or discarded.
:return: response vector representing class of y or -1 if an data sample is to be discarded.
"""
(num_sample, num_class) = prob_mat.shape
prob_mat = normalize_matrix(prob_mat)
discrete_y = np.zeros((num_sample, 1), dtype=np.int)
if 'probability' in class_method.lower():
prob_mat = np.cumsum(prob_mat, axis=1)
random_v = np.random.uniform(0, 1, [num_sample, 1])
# choose the class that final response y belongs to according to the
# probability prob(y=k)
class_bool = random_v < prob_mat
for indR in range(num_sample):
for indC in range(num_class):
if class_bool[indR, indC]:
discrete_y[indR, 0] = indC
break
elif 'threshold' in class_method.lower():
discrete_y = np.argmax(prob_mat, axis=1)
temp_mat = np.diff(np.sort(prob_mat, axis=1), axis=1)
# check if max value exceeds second one by at least margin
mat_diff = temp_mat[:, num_class-2]
mat_bool = mat_diff < class_margin
discrete_y[mat_bool] = -1
else:
print('class_method should be set to "probability" or "threshold" only!')
sys.exit(1)
return discrete_y
def normalize_matrix(mat):
"""
This function will normalize a matrix across each row such that the row sum is 1.
:param mat: matrix containing prob(y=k)
:return: normalized matrix containing prob(y=k)
"""
(n, K) = mat.shape
kronmat = np.ones((1, K), dtype=float)
row_sum = np.sum(mat, axis=1)
row_sum_mat = np.kron(row_sum, kronmat)
return mat/row_sum_mat
def move_files(dir_path, old_name, new_file, action='move'):
"""
Simple function to move or copy a data set (old_name) to a special directory (dir_path)
with new name (new_file) so that we will be able to re-run the tests if we
have found something wrong with the algorithm under test with the data set.
This is done to avoid losing the data set.
:param dir_path: string representing full directory path where a file is to be moved to
:param old_name: string representing file (filename with full directory path) to be moved to new directory.
:param new_file: string representing the file name of the moved in the new directory
:param action: string, optional, represent the action 'move' or 'copy' file
:return: None
"""
new_name = os.path.join(dir_path, new_file) # generate new filename including directory path
if os.path.isfile(old_name): # only move/copy file old_name if it actually exists
if 'move' in action:
motion = 'mv '
elif 'copy' in action:
motion = 'cp '
else:
print("Illegal action setting. It can only be 'move' or 'copy'!")
sys.exit(1)
cmd = motion+old_name+' '+new_name # generate cmd line string to move the file
subprocess.call(cmd, shell=True)
def remove_files(filename):
"""
Simple function to remove data set saved in filename if the dynamic test is completed with no
error. Some data sets we use can be rather big. This is performed to save space.
:param filename: string representing the file to be removed. Full path is included.
:return: None
"""
cmd = 'rm ' + filename
subprocess.call(cmd, shell=True)
def random_col_duplication(num_cols, duplication_threshold, max_number, to_scale, max_scale_factor):
"""
This function will randomly determine for each column if it should be duplicated.
If it is to be duplicated, how many times, the duplication should be. In addition, a
scaling factor will be randomly applied to each duplicated column if enabled.
:param num_cols: integer representing number of predictors used
:param duplication_threshold: threshold to determine if a column is to be duplicated. Set
this number to be low if you want to encourage column duplication and vice versa
:param max_number: maximum number of times a column is to be duplicated
:param to_scale: bool indicating if a duplicated column is to be scaled
:param max_scale_factor: real representing maximum scale value for repeated columns
:return: a tuple containing two vectors: col_return, col_scale_return.
col_return: vector indicating the column indices of the original data matrix that will be included
in the new data matrix with duplicated columns
col_scale_return: vector indicating for each new column in the new data matrix with duplicated columns,
what scale should be applied to that column.
"""
col_indices = list(range(num_cols)) # contains column indices of predictors in original data set
col_scales = [1]*num_cols # scaling factor for original data set, all ones.
for ind in range(num_cols): # determine for each column if to duplicate it
temp = random.uniform(0, 1) # generate random number from 0 to 1
if temp > duplication_threshold: # duplicate column if random number generated exceeds duplication_threshold
rep_num = random.randint(1, max_number) # randomly determine how many times to repeat a column
more_col_indices = [ind]*rep_num
col_indices.extend(more_col_indices)
temp_scale = []
for ind in range(rep_num):
if to_scale: # for each duplicated column, determine a scaling factor to multiply the column with
temp_scale.append(random.uniform(0, max_scale_factor))
else:
temp_scale.append(1)
col_scales.extend(temp_scale)
# randomly shuffle the predictor column orders and the corresponding scaling factors
new_col_indices = list(range(len(col_indices)))
random.shuffle(new_col_indices)
col_return = [col_indices[i] for i in new_col_indices]
col_scale_return = [col_scales[i] for i in new_col_indices]
return col_return, col_scale_return
def duplicate_scale_cols(col_indices, col_scale, old_filename, new_filename):
"""
This function actually performs the column duplication with scaling giving the column
indices and scaling factors for each column. It will first load the original data set
from old_filename. After performing column duplication and scaling, the new data set
will be written to file with new_filename.
:param col_indices: vector indicating the column indices of the original data matrix that will be included
in the new data matrix with duplicated columns
:param col_scale: vector indicating for each new column in the new data matrix with duplicated columns,
what scale should be applied to that column
:param old_filename: string representing full directory path and filename where data set is stored
:param new_filename: string representing full directory path and filename where new data set is to be stored
:return: None
"""
# pd_frame = pd.read_csv(old_filename, header=None) # read in original data set
#
# pd_frame_new = pd.DataFrame() # new empty data frame
#
# for ind in range(len(col_indices)): # for each column
# tempc = pd_frame.ix[:, col_indices[ind]]*col_scale[ind] # extract a column from old data frame and scale it
# pd_frame_new = pd.concat([pd_frame_new, tempc], axis=1) # add it to the new data frame
np_frame = np.asmatrix(np.genfromtxt(old_filename, delimiter=',', dtype=None))
(num_row, num_col) = np_frame.shape
np_frame_new = np.asmatrix(np.zeros((num_row, len(col_indices)), dtype=np.float))
for ind in range(len(col_indices)):
np_frame_new[:, ind] = np_frame[:, col_indices[ind]]*col_scale[ind]
# done changing the data frame. Save it in a new file
np.savetxt(new_filename, np_frame_new, delimiter=",")
def insert_nan_in_data(old_filename, new_filename, missing_fraction):
"""
Give the filename of a data set stored in old_filename, this function will randomly determine
for each predictor to replace its value with nan or not with probability missing_frac. The
new data set will be stored in filename new_filename.
:param old_filename: string representing full directory path and filename where data set is stored
:param new_filename: string representing full directory path and filename where new data set with missing
values is to be stored
:param missing_fraction: real value representing the probability of replacing a predictor with nan.
:return: None
"""
# pd_frame = pd.read_csv(old_filename, header=None) # read in a dataset
np_frame = np.asmatrix(np.genfromtxt(old_filename, delimiter=',', dtype=None))
(row_count, col_count) = np_frame.shape
random_matrix = np.random.uniform(0, 1, [row_count, col_count-1])
for indr in range(row_count): # for each predictor value, determine if to replace value with nan
for indc in range(col_count-1):
if random_matrix[indr, indc] < missing_fraction:
np_frame[indr, indc] = np.nan
# save new data set with missing values to new file
np.savetxt(new_filename, np_frame, delimiter=",")
# pd_frame.to_csv(new_filename, sep=',', header=False, index=False, na_rep='nan')
def print_message_values(start_string, nump_array):
"""
This function prints the value of a nump_array with a string message in front of it.
:param start_string: string representing message to be printed
:param nump_array: array storing something
:return: None
"""
print(start_string)
print(nump_array)
def show_test_results(test_name, curr_test_val, new_test_val):
"""
This function prints the test execution results which can be passed or failed. A message will be printed on
screen to warn user of the test result.
:param test_name: string representing test name
:param curr_test_val: integer representing number of tests failed so far before the test specified in test_name
is executed
:param new_test_val: integer representing number of tests failed after the test specified in test_name is
executed
:return: integer: 0 if test passed and 1 if test faild.
"""
failed_string = "Ooops, " + test_name + " failed. I am sorry..."
pass_string = "Yeah, " + test_name + " passed!"
if (curr_test_val < new_test_val): # this test has failed
print(failed_string)
return 1
else:
print(pass_string)
return 0
def equal_two_arrays(array1, array2, eps, tolerance):
"""
This function will compare the values of two python tuples. First, if the values are below
eps which denotes the significance level that we care, no comparison is performed. Next,
False is returned if the different between any elements of the two array exceeds some tolerance.
:param array1: numpy array containing some values of interest
:param array2: numpy array containing some values of interest that we would like to compare it with array1
:param eps: significance level that we care about in order to perform the comparison
:param tolerance: threshold for which we allow the two array elements to be different by
:return: True if elements in array1 and array2 are close and False otherwise
"""
size1 = len(array1)
if size1 == len(array2): # arrays must be the same size
# compare two arrays
for ind in range(size1):
if not ((array1[ind] < eps) and (array2[ind] < eps)):
# values to be compared are not too small, perform comparison
# look at differences between elements of array1 and array2
compare_val_h2o_Py = abs(array1[ind] - array2[ind])
if compare_val_h2o_Py > tolerance: # difference is too high, return false
return False
return True # return True, elements of two arrays are close enough
else:
print("The two arrays are of different size!")
sys.exit(1)
def compare_two_arrays(array1, array2, eps, tolerance, comparison_string, array1_string, array2_string, error_string,
success_string, template_is_better, just_print=False):
"""
This function is written to print out the performance comparison results for various values that
we care about. It will return 1 if the values of the two arrays exceed threshold specified in tolerance.
The actual comparison is performed by calling function equal_two_array.
:param array1: numpy array containing some values of interest
:param array2: numpy array containing some values of interest that we would like to compare it with array1
:param eps: significance level that we care about in order to perform the comparison
:param tolerance: threshold for which we allow the two array elements to be different by
:param comparison_string: string stating what the comparison is about, e.g. "Comparing p-values ...."
:param array1_string: string stating what is the array1 attribute of interest, e.g. "H2O p-values: "
:param array2_string: string stating what is the array2 attribute of interest, e.g. "Theoretical p-values: "
:param error_string: string stating what you want to say if the difference between array1 and array2
exceeds tolerance, e.g "P-values are not equal!"
:param success_string: string stating what you want to say if the difference between array1 and array2 does not
exceed tolerance "P-values are close enough!"
:param template_is_better: bool, True, will return 1 if difference among elements of array1 and array2 exceeds
tolerance. False, will always return 0 even if difference among elements of array1 and array2 exceeds tolerance.
In this case, the system under test actually performs better than the template.
:param just_print: bool if True will print attribute values without doing comparison. False will print
attribute values and perform comparison
:return: if template_is_better = True, return 0 if elements in array1 and array2 are close and 1 otherwise;
if template_is_better = False, will always return 0 since system under tests performs better than
template system.
"""
# display array1, array2 with proper description
print(comparison_string)
print(array1_string, array1)
print(array2_string, array2)
if just_print: # just print the two values and do no comparison
return 0
else: # may need to actually perform comparison
if template_is_better:
try:
assert equal_two_arrays(array1, array2, eps, tolerance), error_string
print(success_string)
sys.stdout.flush()
return 0
except:
sys.stdout.flush()
return 1
else:
print("Test result is actually better than comparison template!")
return 0
def make_Rsandbox_dir(base_dir, test_name, make_dir):
"""
This function will remove directory "Rsandbox/test_name" off directory base_dir and contents if it exists.
If make_dir is True, it will create a clean directory "Rsandbox/test_name" off directory base_dir.
:param base_dir: string contains directory path where we want to build our Rsandbox/test_name off from
:param test_name: string contains unit test name that the Rsandbox is created for
:param make_dir: bool, True: will create directory baseDir/Rsandbox/test_name, False: will not create
directory.
:return: syndatasets_dir: string containing the full path of the directory name specified by base_dir, test_name
"""
# create the Rsandbox directory path for the test.
syndatasets_dir = os.path.join(base_dir, "Rsandbox_" + test_name)
if os.path.exists(syndatasets_dir): # remove Rsandbox directory if it exists
shutil.rmtree(syndatasets_dir)
if make_dir: # create Rsandbox directory if make_dir is True
os.makedirs(syndatasets_dir)
return syndatasets_dir
def get_train_glm_params(model, what_param, family_type='gaussian'):
"""
This function will grab the various attributes (like coefficients, p-values, and others) off a GLM
model that has been built.
:param model: GLM model that we want to extract information from
:param what_param: string indicating the model attribute of interest like 'p-value','weights',...
:param family_type: string, optional, represents the various distribution families (gaussian, multinomial, binomial)
supported by our GLM algo
:return: attribute value of interest
"""
coeff_pvalues = model._model_json["output"]["coefficients_table"].cell_values
if what_param == 'p-values':
if 'gaussian' in family_type.lower():
p_value_h2o = []
for ind in range(len(coeff_pvalues)):
p_value_h2o.append(coeff_pvalues[ind][-1])
return p_value_h2o
else:
print("P-values are only available to Gaussian family.")
sys.exit(1)
elif what_param == 'weights':
if 'gaussian' in family_type.lower():
weights = []
for ind in range(len(coeff_pvalues)):
weights.append(coeff_pvalues[ind][1])
return weights
elif ('multinomial' in family_type.lower()) or ('binomial' in family_type.lower()):
# for multinomial, the coefficients are organized as features by number of classes for
# nonstandardized and then standardized weights. Need to grab the correct matrix as
# number of classes by n_features matrix
num_feature = len(coeff_pvalues)
num_class = (len(coeff_pvalues[0])-1)/2
coeffs = np.zeros((num_class,num_feature), dtype=np.float)
end_index = int(num_class+1)
for col_index in range(len(coeff_pvalues)):
coeffs[:, col_index] = coeff_pvalues[col_index][1:end_index]
return coeffs
elif what_param == 'best_lambda':
lambda_str = model._model_json["output"]["model_summary"].cell_values[0][4].split('=')
return float(str(lambda_str[-2]).split(',')[0])
elif what_param == 'confusion_matrix':
if 'multinomial' in family_type.lower():
return model._model_json["output"]["training_metrics"]._metric_json["cm"]["table"]
elif 'binomial' in family_type.lower():
return model.confusion_matrix().table
else:
print("parameter value not found in GLM model")
sys.exit(1)
def less_than(val1, val2):
"""
Simple function that returns True if val1 <= val2 and False otherwise.
:param val1: first value of interest
:param val2: second value of interest
:return: bool: True if val1 <= val2 and False otherwise
"""
if round(val1, 3) <= round(val2, 3): # only care to the 3rd position after decimal point
return True
else:
return False
def replace_nan_with_mean(data_with_nans, nans_row_col_indices, col_means):
"""
Given a data set with nans, row and column indices of where the nans are and the col_means, this
function will replace the nans with the corresponding col_means.
:param data_with_nans: data set matrix with nans
:param nans_row_col_indices: matrix containing the row and column indices of where the nans are
:param col_means: vector containing the column means of data_with_NAs
:return: data_with_NAs: data set with nans replaced with column means
"""
num_NAs = len(nans_row_col_indices[0])
for ind in range(num_NAs):
data_with_nans[nans_row_col_indices[0][ind], nans_row_col_indices[1][ind]] = \
col_means[nans_row_col_indices[1][ind]]
return data_with_nans
def remove_csv_files(dir_path, suffix=".csv", action='remove', new_dir_path=""):
"""
Given a directory, this function will gather all function ending with string specified
in suffix. Next, it is going to delete those files if action is set to 'remove'. If
action is set to 'copy', a new_dir_path must be specified where the files ending with suffix
will be moved to this new directory instead.
:param dir_path: string representing full path to directory of interest
:param suffix: string representing suffix of filename that are to be found and deleted
:param action: string, optional, denote the action to perform on files, 'remove' or 'move'
:param new_dir_path: string, optional, representing full path to new directory
:return: None
"""
filenames = os.listdir(dir_path) # list all files in directory
# only collect files with filename ending with suffix
to_remove = [filename for filename in filenames if filename.endswith(suffix)]
# delete files ending with suffix
for fn in to_remove:
temp_fn = os.path.join(dir_path, fn)
# only remove if file actually exists.
if os.path.isfile(temp_fn):
if 'remove' in action:
remove_files(temp_fn)
elif 'copy' in action:
move_files(new_dir_path, temp_fn, fn, action=action)
else:
print("action string can only be 'remove' or 'copy.")
sys.exit(1)
def extract_comparison_attributes_and_print(model_h2o, h2o_model_test_metrics, end_test_str, want_p_values,
attr1_bool, attr2_bool, att1_template, att2_template, att3_template,
att4_template, compare_att1_str, h2o_att1_str, template_att1_str,
att1_str_fail, att1_str_success, compare_att2_str, h2o_att2_str,
template_att2_str, att2_str_fail, att2_str_success, compare_att3_str,
h2o_att3_str, template_att3_str, att3_str_fail, att3_str_success,
compare_att4_str, h2o_att4_str, template_att4_str, att4_str_fail,
att4_str_success, failed_test_number, ignored_eps, allowed_diff,
noise_var, template_must_be_better, attr3_bool=True, attr4_bool=True):
"""
This function basically will compare four attributes (weight, p-values, training data MSE, test data MSE) of a test
with a template model. If the difference of comparison exceeds a certain threshold, the test will be determined as
failed and vice versa. There are times when we do not care about p-values and/or weight comparisons but mainly
concerned with MSEs. We can set the input parameters to indicate if this is the case.
:param model_h2o: H2O model that we want to evaluate
:param h2o_model_test_metrics: test performance of H2O model under evaluation
:param end_test_str: string representing end test banner to be printed
:param want_p_values: bool True if we want to care about p-values and False if we don't
:param attr1_bool: bool True if we want to compare weight difference between H2O model and template model
and False otherwise.
:param attr2_bool: bool True if we want to compare p-value difference between H2O model and template model
and False otherwise.
:param att1_template: value of first template attribute, the weight vector
:param att2_template: value of second template attribute, the p-value vector
:param att3_template: value of third template attribute, the training data set MSE
:param att4_template: value of fourth template attribute, the test data set MSE
:param compare_att1_str: string describing the comparison of first attribute, e.g. "Comparing intercept and
weights ...."
:param h2o_att1_str: string describing H2O model first attribute values, e.g. "H2O intercept and weights: "
:param template_att1_str: string describing template first attribute values, e.g. "Theoretical intercept and
weights: "
:param att1_str_fail: string describing message to print out if difference exceeds threshold, e.g.
"Intercept and weights are not equal!"
:param att1_str_success: string describing message to print out if difference < threshold, e.g.
"Intercept and weights are close enough!"
:param compare_att2_str: string describing the comparison of first attribute, e.g. "Comparing p-values ...."
:param h2o_att2_str: string describing H2O model first attribute values, e.g. "H2O p-values: "
:param template_att2_str: string describing template first attribute values, e.g. "Theoretical p-values: "
:param att2_str_fail: string describing message to print out if difference exceeds threshold, e.g.
"P-values are not equal!"
:param att2_str_success: string describing message to print out if difference < threshold, e.g.
"P-values are close enough!"
:param compare_att3_str: string describing the comparison of first attribute, e.g. "Comparing training MSEs ...."
:param h2o_att3_str: string describing H2O model first attribute values, e.g. "H2O training MSE: "
:param template_att3_str: string describing template first attribute values, e.g. "Theoretical train MSE: "
:param att3_str_fail: string describing message to print out if difference exceeds threshold, e.g.
"Training MSEs are not equal!"
:param att3_str_success: string describing message to print out if difference < threshold, e.g.
"Training MSEs are close enough!"
:param compare_att4_str: string describing the comparison of first attribute, e.g. "Comparing test MSEs ...."
:param h2o_att4_str: string describing H2O model first attribute values, e.g. "H2O test MSE: "
:param template_att4_str: string describing template first attribute values, e.g. "Theoretical test MSE: "
:param att4_str_fail: string describing message to print out if difference exceeds threshold, e.g.
"Test MSEs are not equal!"
:param att4_str_success: string describing message to print out if difference < threshold, e.g.
"Test MSEs are close enough!"
:param failed_test_number: integer denote the number of tests failed
:param ignored_eps: if value < than this value, no comparison is performed
:param allowed_diff: threshold if exceeded will fail a test
:param noise_var: Gaussian noise variance used to generate data set
:param template_must_be_better: bool: True: template value must be lower, False: don't care
:param attr3_bool: bool denoting if we should compare attribute 3 values
:param attr4_bool: bool denoting if we should compare attribute 4 values
:return: a tuple containing test h2o model training and test performance metrics that include: weight, pValues,
mse_train, r2_train, mse_test, r2_test
"""
# grab weight from h2o model
test1_weight = get_train_glm_params(model_h2o, 'weights')
# grab p-values from h2o model
test1_p_values = []
if want_p_values:
test1_p_values = get_train_glm_params(model_h2o, 'p-values')
# grab other performance metrics
test1_mse_train = model_h2o.mse()
test1_r2_train = model_h2o.r2()
test1_mse_test = h2o_model_test_metrics.mse()
test1_r2_test = h2o_model_test_metrics.r2()
# compare performances of template and h2o model weights
failed_test_number += compare_two_arrays(test1_weight, att1_template, ignored_eps, allowed_diff*100, compare_att1_str,
h2o_att1_str, template_att1_str, att1_str_fail, att1_str_success,
attr1_bool)
# p-values
if want_p_values:
if np.isnan(np.asarray(test1_p_values)).any(): # p-values contain nan
failed_test_number += 1
failed_test_number += compare_two_arrays(test1_p_values, att2_template, ignored_eps, allowed_diff,
compare_att2_str, h2o_att2_str, template_att2_str, att2_str_fail,
att2_str_success, attr2_bool)
# Training MSE
need_to_compare = less_than(att3_template, test1_mse_train)
# in some cases, template value should always be better. Training data MSE should always
# be better without regularization than with regularization
if (not need_to_compare) and template_must_be_better:
failed_test_number += 1
failed_test_number += compare_two_arrays([test1_mse_train], [att3_template], ignored_eps, noise_var,
compare_att3_str, h2o_att3_str,
template_att3_str, att3_str_fail, att3_str_success, attr3_bool)
# Test MSE
need_to_compare = less_than(att4_template, test1_mse_test)
failed_test_number += compare_two_arrays([test1_mse_test], [att4_template], ignored_eps, noise_var,
compare_att4_str, h2o_att4_str, template_att4_str, att4_str_fail,
att4_str_success, need_to_compare, attr4_bool)
# print end test banner
print(end_test_str)
print("*******************************************************************************************")
sys.stdout.flush()
return test1_weight, test1_p_values, test1_mse_train, test1_r2_train, test1_mse_test,\
test1_r2_test, failed_test_number
def extract_comparison_attributes_and_print_multinomial(model_h2o, h2o_model_test_metrics, family_type, end_test_str,
compare_att_str=["", "", "", "", "", "", ""],
h2o_att_str=["", "", "", "", "", "", ""],
template_att_str=["", "", "", "", "", "", ""],
att_str_fail=["", "", "", "", "", "", ""],
att_str_success=["", "", "", "", "", "", ""],
test_model=None, test_model_metric=None, template_params=None,
can_be_better_than_template=[
False, False, False, False, False, False],
just_print=[True, True, True, True, True, True],
ignored_eps=1e-15, allowed_diff=1e-5, failed_test_number=0):
"""
This function basically will compare and print out six performance metrics of a test with a
template model. If the difference of comparison exceeds a certain threshold, the test will be determined as
failed and vice versa. There are times when we do not care about comparisons but mainly concerned with
logloss/prediction accuracy in determining if a test shall fail. We can set the input parameters to indicate
if this is the case.
:param model_h2o: H2O model that we want to evaluate
:param h2o_model_test_metrics: test performance of H2O model under evaluation
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial)
supported by our GLM algo
:param end_test_str: string to be printed at the end of a test
:param compare_att_str: array of strings describing what we are trying to compare
:param h2o_att_str: array of strings describing each H2O attribute of interest
:param template_att_str: array of strings describing template attribute of interest
:param att_str_fail: array of strings to be printed if the comparison failed
:param att_str_success: array of strings to be printed if comparison succeeded
:param test_model: template model whose attributes we want to compare our H2O model with
:param test_model_metric: performance on test data set of template model
:param template_params: array containing template attribute values that we want to compare our H2O model with
:param can_be_better_than_template: array of bool: True: template value must be lower, False: don't care
:param just_print: array of bool for each attribute if True, no comparison is performed, just print the attributes
and if False, will compare the attributes and print the attributes as well
:param ignored_eps: if value < than this value, no comparison is performed
:param allowed_diff: threshold if exceeded will fail a test
:param failed_test_number: integer denote the number of tests failed so far
:return: accumulated number of tests that have failed so far
"""
# grab performance metrics from h2o model
(h2o_weight, h2o_logloss_train, h2o_confusion_matrix_train, h2o_accuracy_train, h2o_logloss_test,
h2o_confusion_matrix_test, h2o_accuracy_test) = grab_model_params_metrics(model_h2o, h2o_model_test_metrics,
family_type)
# grab performance metrics from template model
if test_model and test_model_metric:
(template_weight, template_logloss_train, template_confusion_matrix_train, template_accuracy_train,
template_logloss_test, template_confusion_matrix_test, template_accuracy_test) = \
grab_model_params_metrics(test_model, test_model_metric, family_type)
elif template_params:
# grab template comparison values from somewhere else
(template_weight, template_logloss_train, template_confusion_matrix_train, template_accuracy_train,
template_logloss_test, template_confusion_matrix_test, template_accuracy_test) = template_params
else:
print("No valid template parameters are given for comparison.")
sys.exit(1)
# print and/or compare the weights between template and H2O
compare_index = 0
failed_test_number += compare_two_arrays(h2o_weight, template_weight, ignored_eps, allowed_diff,
compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], True, just_print[compare_index])
compare_index += 1
# this is logloss from training data set,
if not(just_print[compare_index]) and not(can_be_better_than_template[compare_index]):
if (h2o_logloss_train < template_logloss_train) and \
(abs(h2o_logloss_train-template_logloss_train) > 1e-5):
# H2O performed better than template which is not allowed
failed_test_number += 1 # increment failed_test_number and just print the results
compare_two_arrays([h2o_logloss_train], [template_logloss_train], ignored_eps, allowed_diff,
compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], True, True)
else:
failed_test_number += compare_two_arrays([h2o_logloss_train], [template_logloss_train], ignored_eps,
allowed_diff, compare_att_str[compare_index],
h2o_att_str[compare_index], template_att_str[compare_index],
att_str_fail[compare_index], att_str_success[compare_index], True,
False)
else:
template_better = is_template_better(just_print[compare_index], can_be_better_than_template[compare_index],
h2o_logloss_train, template_logloss_train, False)
# print and compare the logloss between template and H2O for training data
failed_test_number += compare_two_arrays([h2o_logloss_train], [template_logloss_train], ignored_eps,
allowed_diff, compare_att_str[compare_index],
h2o_att_str[compare_index], template_att_str[compare_index],
att_str_fail[compare_index], att_str_success[compare_index],
template_better, just_print[compare_index])
compare_index += 1
template_better = is_template_better(just_print[compare_index], can_be_better_than_template[compare_index],
h2o_logloss_test, template_logloss_test, False)
# print and compare the logloss between template and H2O for test data
failed_test_number += compare_two_arrays([h2o_logloss_test], [template_logloss_test], ignored_eps, allowed_diff,
compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], template_better, just_print[compare_index])
compare_index += 1
# print the confusion matrix from training data
failed_test_number += compare_two_arrays(h2o_confusion_matrix_train, template_confusion_matrix_train, ignored_eps,
allowed_diff, compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], True, just_print[compare_index])
compare_index += 1
# print the confusion matrix from test data
failed_test_number += compare_two_arrays(h2o_confusion_matrix_test, template_confusion_matrix_test, ignored_eps,
allowed_diff, compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], True, just_print[compare_index])
compare_index += 1
template_better = is_template_better(just_print[compare_index], can_be_better_than_template[compare_index],
h2o_accuracy_train, template_accuracy_train, True)
# print accuracy from training dataset
failed_test_number += compare_two_arrays([h2o_accuracy_train], [template_accuracy_train], ignored_eps, allowed_diff,
compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], template_better, just_print[compare_index])
compare_index += 1
# print accuracy from test dataset
template_better = is_template_better(just_print[compare_index], can_be_better_than_template[compare_index],
h2o_accuracy_test, template_accuracy_test, True)
failed_test_number += compare_two_arrays([h2o_accuracy_test], [template_accuracy_test], ignored_eps, allowed_diff,
compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], template_better, just_print[compare_index])
# print end test banner
print(end_test_str)
print("*******************************************************************************************")
sys.stdout.flush()
return failed_test_number
def is_template_better(just_print, can_be_better_than_template, h2o_att, template_att, bigger_is_better):
"""
This function is written to determine if the system under test performs better than the template model
performance.
:param just_print: bool representing if we are just interested in printing the attribute values
:param can_be_better_than_template: bool stating that it is okay in this case for the system under test to perform
better than the template system.
:param h2o_att: number representing the h2o attribute under test
:param template_att: number representing the template attribute
:param bigger_is_better: bool representing if metric is perceived to be better if its value is higher
:return: bool indicating if the template attribute is better.
"""
if just_print: # not interested in comparison, just want to print attribute values
return True # does not matter what we return here
else:
if bigger_is_better: # metric is better if it is greater
return not(h2o_att > template_att)
else: # metric is better if it is less
return not(h2o_att < template_att)
def grab_model_params_metrics(model_h2o, h2o_model_test_metrics, family_type):
"""
This function will extract and return the various metrics from a H2O GLM model and the corresponding H2O model
test metrics.
:param model_h2o: GLM H2O model
:param h2o_model_test_metrics: performance on test data set from H2O GLM model
:param family_type: string representing 'gaussian', 'binomial' or 'multinomial'
:return: tuple containing weight, logloss/confusion matrix/prediction accuracy calculated from training data set
and test data set respectively
"""
# grab weight from h2o model
h2o_weight = get_train_glm_params(model_h2o, 'weights', family_type=family_type)
# grab other performance metrics
h2o_logloss_train = model_h2o.logloss()
h2o_confusion_matrix_train = get_train_glm_params(model_h2o, 'confusion_matrix', family_type=family_type)
last_index = len(h2o_confusion_matrix_train.cell_values)-1
h2o_logloss_test = h2o_model_test_metrics.logloss()
if 'multinomial' in family_type.lower():
h2o_confusion_matrix_test = h2o_model_test_metrics.confusion_matrix()
h2o_accuracy_train = 1-h2o_confusion_matrix_train.cell_values[last_index][last_index]
h2o_accuracy_test = 1-h2o_confusion_matrix_test.cell_values[last_index][last_index]
elif 'binomial' in family_type.lower():
h2o_confusion_matrix_test = h2o_model_test_metrics.confusion_matrix().table
real_last_index = last_index+1
h2o_accuracy_train = 1-float(h2o_confusion_matrix_train.cell_values[last_index][real_last_index])
h2o_accuracy_test = 1-float(h2o_confusion_matrix_test.cell_values[last_index][real_last_index])
else:
print("Only 'multinomial' and 'binomial' distribution families are supported for grab_model_params_metrics "
"function!")
sys.exit(1)
return h2o_weight, h2o_logloss_train, h2o_confusion_matrix_train, h2o_accuracy_train, h2o_logloss_test,\
h2o_confusion_matrix_test, h2o_accuracy_test
def prepare_data_sklearn_multinomial(training_data_xy):
"""
Sklearn model requires that the input matrix should contain a column of ones in order for
it to generate the intercept term. In addition, it wants the response vector to be in a
certain format as well.
:param training_data_xy: matrix containing both the predictors and response column
:return: tuple containing the predictor columns with a column of ones as the first column and
the response vector in the format that Sklearn wants.
"""
(num_row, num_col) = training_data_xy.shape
# change response to be enum and not real
y_ind = num_col-1
training_data_xy[y_ind] = training_data_xy[y_ind].astype(int)
# prepare response column for sklearn logistic regression
response_y = training_data_xy[:, y_ind]
response_y = np.ravel(response_y)
training_data = training_data_xy[:, range(0, y_ind)]
# added column of ones into data matrix X_MAT
temp_ones = np.asmatrix(np.ones(num_row)).transpose()
x_mat = np.concatenate((temp_ones, training_data), axis=1)
return response_y, x_mat
def get_gridables(params_in_json):
"""
This function is written to walk through all parameters of a model and grab the parameters, its type and
its default values as three lists of all the gridable parameters.
:param params_in_json: a list of parameters associated with a H2O model. Each list is a dict containing fields
of interest like name, type, gridable, default values, ....
:return: three lists: gridable_params, gridable_types and gridable_defaults containing the names of the parameter,
its associated type like int, float, unicode, bool and default parameter values
"""
# grab all gridable parameters and its type
gridable_parameters = []
gridable_types = []
gridable_defaults = []
for each_param in params_in_json:
if each_param['gridable']:
gridable_parameters.append(str(each_param["name"]))
gridable_types.append(each_param["type"])
if type(each_param["default_value"]) == 'unicode': # hyper-parameters cannot be unicode
gridable_defaults.append(str(each_param["default_value"]))
else:
gridable_defaults.append(each_param["default_value"])
return gridable_parameters, gridable_types, gridable_defaults
def add_fold_weights_offset_columns(h2o_frame, nfold_max_weight_offset, column_names, column_type='fold_assignment'):
"""
Add fold_columns to H2O training frame specified in h2o_frame according to nfold. The new added
columns should use the names in column_names. Returns a h2o_frame with newly added fold_columns.
Copied from Eric's code.
:param h2o_frame: H2O frame containing training data
:param nfold_max_weight_offset: integer, number of fold in the cross-validation or maximum weight scale or offset
:param column_names: list of strings denoting the column names for the new fold columns
:param column_type: optional string denoting whether we are trying to generate fold_assignment or
weights_column or offset_column
:return: H2O frame with added fold column assignments
"""
number_row = h2o_frame.nrow
# copied this part from Eric's code
for index in range(len(column_names)):
if 'fold_assignment' in column_type:
temp_a = np.random.random_integers(0, nfold_max_weight_offset - 1, [number_row, 1]) # inclusive
elif 'weights_column' in column_type:
temp_a = np.random.uniform(0, nfold_max_weight_offset, [number_row, 1])
elif 'offset_column' in column_type:
temp_a = random.uniform(0, nfold_max_weight_offset)*np.asmatrix(np.ones(number_row)).transpose()
else:
print("column_type must be either 'fold_assignment' or 'weights_column'!")
sys.exit(1)
fold_assignments = h2o.H2OFrame(temp_a)
fold_assignments.set_names([column_names[index]])
h2o_frame = h2o_frame.cbind(fold_assignments)
return h2o_frame
def gen_grid_search(model_params, hyper_params, exclude_parameters, gridable_parameters, gridable_types,
gridable_defaults, max_int_number, max_int_val, min_int_val, max_real_number, max_real_val,
min_real_val, quantize_level='1.00000000'):
"""
This function is written to randomly generate griddable parameters for a gridsearch. For parameters already
found in hyper_params, no random list will be generated. In addition, we will check to make sure that the
griddable parameters are actually used by the model before adding them to the hyper_params dict.
:param model_params: list of string containing names of argument to the model
:param hyper_params: dict structure containing a list of gridable parameters names with their list
:param exclude_parameters: list containing parameter names not to be added to hyper_params
:param gridable_parameters: list of gridable parameter names
:param gridable_types: list of gridable parameter types
:param gridable_defaults: list of gridable parameter default values
:param max_int_number: integer, size of integer gridable parameter list
:param max_int_val: integer, maximum integer value for integer gridable parameter
:param min_int_val: integer, minimum integer value for integer gridable parameter
:param max_real_number: integer, size of real gridable parameter list
:param max_real_val: float, maximum real value for real gridable parameter
:param min_real_val: float, minimum real value for real gridable parameter
:param quantize_level: string representing the quantization level of floating point values generated randomly.
:return: a tuple of hyper_params: dict of hyper parameters for gridsearch, true_gridable_parameters:
a list of string containing names of truely gridable parameters, true_gridable_types: a list of string
denoting parameter types and true_gridable_defaults: default values of those truly gridable parameters
"""
count_index = 0
true_gridable_parameters = []
true_gridable_types = []
true_gridable_defaults = []
for para_name in gridable_parameters:
# parameter must not in exclusion list
if (para_name in model_params) and (para_name not in exclude_parameters):
true_gridable_parameters.append(para_name)
true_gridable_types.append(gridable_types[count_index])
true_gridable_defaults.append(gridable_defaults[count_index])
if para_name not in hyper_params.keys(): # add default value to user defined parameter list
# gridable parameter not seen before. Randomly generate values for it
if ('int' in gridable_types[count_index]) or ('long' in gridable_types[count_index]):
# make sure integer values are not duplicated, using set action to remove duplicates
hyper_params[para_name] = list(set([random.randint(min_int_val, max_int_val) for p in
range(0, max_int_number)]))
elif ('double' in gridable_types[count_index]) or ('float' in gridable_types[count_index]):
hyper_params[para_name] = fix_float_precision(list(np.random.uniform(min_real_val, max_real_val,
max_real_number)), quantize_level=quantize_level)
count_index += 1
return hyper_params, true_gridable_parameters, true_gridable_types, true_gridable_defaults
def fix_float_precision(float_list, quantize_level='1.00000000'):
"""
This function takes in a floating point tuple and attempt to change it to floating point number with fixed
precision.
:param float_list: tuple/list of floating point numbers
:param quantize_level: string, optional, represent the number of fix points we care
:return: tuple of floats to the exact precision specified in quantize_level
"""
fixed_float = []
for num in float_list:
fixed_float.append(float(Decimal(num).quantize(Decimal(quantize_level))))
return list(set(fixed_float))
def extract_used_params_xval(a_grid_model, model_param_names, params_dict, algo="GBM"):
"""
This function performs similar functions to function extract_used_params. However, for max_runtime_secs,
we need to go into each cross-valudation model and grab the max_runtime_secs and add them up in order to
get the correct value. In addition, we put your algo model specific parameters into params_dict.
:param a_grid_model: list of models generated by gridsearch
:param model_param_names: hyper-parameter names that are specified for the gridsearch.
:param params_dict: dict containing name/value pairs specified to an algo.
:param algo: string, optional, denoting the algo we are looking at.
:return: params_used: a dict structure containing parameters that take on values as name/value pairs which
will be used to build a model by hand using the same parameter setting as the model built by gridsearch.
"""
params_used = dict()
# need to extract the max_runtime_secs ONE cross-validation model or the base model
if a_grid_model._is_xvalidated:
xv_keys = a_grid_model._xval_keys
for id in xv_keys: # only need to get info from one model
each_xv_model = h2o.get_model(id) # get each model
params_used = extract_used_params(model_param_names, each_xv_model.params, params_dict, algo)
break
else:
params_used = extract_used_params(model_param_names, a_grid_model.params, params_dict, algo)
return params_used
def extract_used_params(model_param_names, grid_model_params, params_dict, algo="GLM"):
"""
This function is used to build a dict out of parameters used by our gridsearch to build a H2O model given
the dict structure that describes the parameters and their values used by gridsearch to build that
particular mode.
:param model_param_names: list contains parameter names that we are interested in extracting
:param grid_model_params: dict contains key as names of parameter and values as list of two values: default and
actual.
:param params_dict: dict containing extra parameters to add to params_used like family, e.g. 'gaussian',
'binomial', ...
:return: params_used: a dict structure containing parameters that take on values as name/value pairs which
will be used to build a model by hand using the same parameter setting as the model built by gridsearch.
"""
params_used = dict()
grid_model_params_keys = grid_model_params.keys()
for each_parameter in model_param_names:
parameter_name = str(each_parameter)
if parameter_name in grid_model_params_keys:
params_used[parameter_name] = grid_model_params[each_parameter]['actual']
if params_dict:
for key, value in params_dict.items():
params_used[key] = value # add distribution family to parameters used list
# only for GLM, change lambda to Lambda
if algo =="GLM":
if 'lambda' in params_used.keys():
params_used['Lambda'] = params_used['lambda']
del params_used['lambda']
return params_used
def insert_error_grid_search(hyper_params, gridable_parameters, gridable_types, error_number):
"""
This function will randomly introduce errors into a copy of hyper_params. Depending on the random number
error_number generated, the following errors can be introduced:
error_number = 0: randomly alter the name of a hyper-parameter name;
error_number = 1: randomly choose a hyper-parameter and remove all elements in its list
error_number = 2: add randomly generated new hyper-parameter names with random list
error_number other: randomly choose a hyper-parameter and insert an illegal type into it
:param hyper_params: dict containing all legal hyper-parameters for our grid search
:param gridable_parameters: name of griddable parameters (some may not be griddable)
:param gridable_types: type of griddable parameters
:param error_number: integer representing which errors to introduce into the gridsearch hyper-parameters
:return: new dict with errors in either parameter names or parameter values
"""
error_hyper_params = copy.deepcopy(hyper_params)
# error_hyper_params = {k : v for k, v in hyper_params.items()}
param_index = random.randint(0, len(hyper_params)-1)
param_name = list(hyper_params)[param_index]
param_type = gridable_types[gridable_parameters.index(param_name)]
if error_number == 0: # grab a hyper-param randomly and copy its name twice
new_name = param_name+param_name
error_hyper_params[new_name] = error_hyper_params[param_name]
del error_hyper_params[param_name]
elif error_number == 1:
error_hyper_params[param_name] = []
elif error_number == 2:
new_param = generate_random_words(random.randint(20,100))
error_hyper_params[new_param] = error_hyper_params[param_name]
else:
error_hyper_params = insert_bad_value(error_hyper_params, param_name, param_type)
return error_hyper_params
def insert_bad_value(error_hyper_params, param_name, param_type):
"""
This function is written to insert a value that is of a different type into an array than the one
its other elements are for.
:param error_hyper_params: dict containing all hyper-parameters for a grid search
:param param_name: string denoting the hyper-parameter we want to insert bad element to
:param param_type: string denoting hyper-parameter type
:return: dict containing new inserted error value
"""
if 'int' in param_type: # insert a real number into integer
error_hyper_params[param_name].append(random.uniform(-10,10))
elif 'enum' in param_type: # insert an float into enums
error_hyper_params[param_name].append(random.uniform(-10,10))
elif 'double' in param_type: # insert an enum into float
error_hyper_params[param_name].append(random.uniform(0,1) > 0.5)
else: # insert a random string for all other cases
error_hyper_params[param_name].append(generate_random_words(random.randint(20,100)))
return error_hyper_params
def generate_random_words(word_length):
"""
This function will generate a random word consisting of letters, numbers and
punctuation given the word_length.
:param word_length: integer denoting length of the word
:return: string representing the random word
"""
if word_length > 0:
all_chars = string.ascii_letters + string.digits + string.punctuation
return ''.join((random.choice(all_chars)) for index in range(int(word_length)))
else:
print("word_length must be an integer greater than 0.")
sys.exit(1)
def generate_redundant_parameters(hyper_params, gridable_parameters, gridable_defaults, error_number):
"""
This function will randomly choose a set of hyper_params and make a dict out of it so we can
duplicate the parameter specification in both the model and grid search.
:param hyper_params: dict containing all griddable parameters as hyper_param to grid search
:param gridable_parameters: list of gridable parameters (not truly)
:param gridable_defaults: list of default values for gridable parameters
:param error_number: int, indicate ways to change the model parameter and the hyper-parameter
Here are the actions performed on the model parameter and hyper-parameters.
error_number = 0: set model parameter to be a value out of the hyper-parameter value list, should not
generate error;
error_number = 1: set model parameter to be default value, should not generate error in this case;
error_number = 3: make sure model parameter is not set to default and choose a value not in the
hyper-parameter value list.
:return: 2 dicts containing duplicated parameters with specification, new hyperparameter specification
"""
error_hyper_params = copy.deepcopy(hyper_params)
# error_hyper_params = {k : v for k, v in hyper_params.items()}
params_dict = {}
num_params = random.randint(1, len(error_hyper_params))
params_list = list(error_hyper_params)
# remove default values out of hyper_params
for key in params_list:
default_value = gridable_defaults[gridable_parameters.index(key )]
if default_value in error_hyper_params[key]:
error_hyper_params[key].remove(default_value)
for index in range(num_params):
param_name = params_list[index]
hyper_params_len = len(error_hyper_params[param_name])
if error_number == 0:
# randomly assigned the parameter to take one value out of the list
param_value_index = random.randint(0, len(error_hyper_params[param_name])-1)
params_dict[param_name] = error_hyper_params[param_name][param_value_index]
elif error_number == 1:
param_value_index = gridable_parameters.index(param_name)
params_dict[param_name] = gridable_defaults[param_value_index]
else:
# randomly assign model parameter to one of the hyper-parameter values, should create error condition here
param_value_index = random.randint(0, hyper_params_len-1)
params_dict[param_name] = error_hyper_params[param_name][param_value_index]
# final check to make sure lambda is Lambda
if 'lambda' in list(params_dict):
params_dict["Lambda"] = params_dict['lambda']
del params_dict["lambda"]
return params_dict, error_hyper_params
def count_models(hyper_params):
"""
Given a hyper_params dict, this function will return the maximum number of models that can be built out of all
the combination of hyper-parameters.
:param hyper_params: dict containing parameter name and a list of values to iterate over
:return: max_model_number: int representing maximum number of models built
"""
max_model_number = 1
for key in list(hyper_params):
max_model_number *= len(hyper_params[key])
return max_model_number
def error_diff_2_models(grid_table1, grid_table2, metric_name):
"""
This function will take two models generated by gridsearch and calculate the mean absolute differences of
the metric values specified by the metric_name in the two model. It will return the mean differences.
:param grid_table1: first H2OTwoDimTable generated by gridsearch
:param grid_table2: second H2OTwoDimTable generated by gridsearch
:param metric_name: string, name of the metric of interest
:return: real number which is the mean absolute metric difference between the two models
"""
num_model = len(grid_table1.cell_values)
metric_diff = 0
for model_index in range(num_model):
metric_diff += abs(grid_table1.cell_values[model_index][-1] - grid_table2.cell_values[model_index][-1])
if (num_model > 0):
return metric_diff/num_model
else:
print("error_diff_2_models: your table contains zero models.")
sys.exit(1)
def find_grid_runtime(model_list):
"""
This function given a grid_model built by gridsearch will go into the model and calculate the total amount of
time it took to actually build all the models in second
:param model_list: list of model built by gridsearch, cartesian or randomized with cross-validation
enabled.
:return: total_time_sec: total number of time in seconds in building all the models
"""
total_time_sec = 0
for each_model in model_list:
total_time_sec += each_model._model_json["output"]["run_time"] # time in ms
# if cross validation is used, need to add those run time in here too
if each_model._is_xvalidated:
xv_keys = each_model._xval_keys
for id in xv_keys:
each_xv_model = h2o.get_model(id)
total_time_sec += each_xv_model._model_json["output"]["run_time"]
return total_time_sec/1000.0 # return total run time in seconds
def evaluate_metrics_stopping(model_list, metric_name, bigger_is_better, search_criteria, possible_model_number):
"""
This function given a list of dict that contains the value of metric_name will manually go through the
early stopping condition and see if the randomized grid search will give us the correct number of models
generated. Note that you cannot assume the model_list is in the order of when a model is built. It actually
already come sorted which we do not want....
:param model_list: list of models built sequentially that contains metric of interest among other fields
:param metric_name: string representing name of metric that we want to based our stopping condition on
:param bigger_is_better: bool indicating if the metric is optimized by getting bigger if True and vice versa
:param search_criteria: dict structure containing the search criteria for randomized gridsearch
:param possible_model_number: integer, represent the absolute possible number of models built based on the
hyper-parameter size
:return: bool indicating if the early topping condition is justified
"""
tolerance = search_criteria["stopping_tolerance"]
stop_round = search_criteria["stopping_rounds"]
min_list_len = 2*stop_round # minimum length of metrics needed before we start early stopping evaluation
metric_list = [] # store metric of optimization
stop_now = False
# provide metric list sorted by time. Oldest model appear first.
metric_list_time_ordered = sort_model_by_time(model_list, metric_name)
for metric_value in metric_list_time_ordered:
metric_list.append(metric_value)
if len(metric_list) > min_list_len: # start early stopping evaluation now
stop_now = evaluate_early_stopping(metric_list, stop_round, tolerance, bigger_is_better)
if stop_now:
if len(metric_list) < len(model_list): # could have stopped early in randomized gridsearch
return False
else: # randomized gridsearch stopped at the correct condition
return True
if len(metric_list) == possible_model_number: # never meet early stopping condition at end of random gridsearch
return True # if max number of model built, still ok
else:
return False # early stopping condition never met but random gridsearch did not build all models, bad!
def sort_model_by_time(model_list, metric_name):
"""
This function is written to sort the metrics that we care in the order of when the model was built. The
oldest model metric will be the first element.
:param model_list: list of models built sequentially that contains metric of interest among other fields
:param metric_name: string representing name of metric that we want to based our stopping condition on
:return: model_metric_list sorted by time
"""
model_num = len(model_list)
model_metric_list = [None] * model_num
for index in range(model_num):
model_index = int(model_list[index]._id.split('_')[-1])
model_metric_list[model_index] = \
model_list[index]._model_json["output"]["cross_validation_metrics"]._metric_json[metric_name]
return model_metric_list
def evaluate_early_stopping(metric_list, stop_round, tolerance, bigger_is_better):
"""
This function mimics the early stopping function as implemented in ScoreKeeper.java. Please see the Java file
comment to see the explanation of how the early stopping works.
:param metric_list: list containing the optimization metric under consideration for gridsearch model
:param stop_round: integer, determine averaging length
:param tolerance: real, tolerance to see if the grid search model has improved enough to keep going
:param bigger_is_better: bool: True if metric is optimized as it gets bigger and vice versa
:return: bool indicating if we should stop early and sorted metric_list
"""
metric_len = len(metric_list)
metric_list.sort(reverse=bigger_is_better)
shortest_len = 2*stop_round
bestInLastK = 1.0*sum(metric_list[0:stop_round])/stop_round
lastBeforeK = 1.0*sum(metric_list[stop_round:shortest_len])/stop_round
if not(np.sign(bestInLastK) == np.sign(lastBeforeK)):
return False
ratio = bestInLastK/lastBeforeK
if math.isnan(ratio):
return False
if bigger_is_better:
return not (ratio > 1+tolerance)
else:
return not (ratio < 1-tolerance)
def check_and_count_models(hyper_params, params_zero_one, params_more_than_zero, params_more_than_one,
params_zero_positive, max_grid_model):
"""
This function will look at the hyper-parameter space set in hyper_params, generate a new hyper_param space that
will contain a smaller number of grid_models. It will determine how many models will be built from
this new hyper_param space. In order to arrive at the correct answer, it must discount parameter settings that
are illegal.
:param hyper_params: dict containing model parameter names and list of values to set it to
:param params_zero_one: list containing model parameter names whose values must be between 0 and 1
:param params_more_than_zero: list containing model parameter names whose values must exceed zero
:param params_more_than_one: list containing model parameter names whose values must exceed one
:param params_zero_positive: list containing model parameter names whose values must equal to or exceed zero
:param max_grid_model: maximum number of grid_model that can be generated from the new hyper_params space
:return: total model: integer denoting number of grid models that can be built from all legal parameter settings
in new hyper_parameter space
final_hyper_params: dict of new hyper parameter space derived from the original hyper_params
"""
total_model = 1
param_len = 0
hyper_keys = list(hyper_params)
shuffle(hyper_keys) # get all hyper_parameter names in random order
final_hyper_params = dict()
for param in hyper_keys:
# this param should be between 0 and 2
if param == "col_sample_rate_change_per_level":
param_len = len([x for x in hyper_params["col_sample_rate_change_per_level"] if (x >= 0)
and (x <= 2)])
elif param in params_zero_one:
param_len = len([x for x in hyper_params[param] if (x >= 0)
and (x <= 1)])
elif param in params_more_than_zero:
param_len = len([x for x in hyper_params[param] if (x > 0)])
elif param in params_more_than_one:
param_len = len([x for x in hyper_params[param] if (x > 1)])
elif param in params_zero_positive:
param_len = len([x for x in hyper_params[param] if (x >= 0)])
else:
param_len = len(hyper_params[param])
if (param_len >= 0) and ((total_model*param_len) <= max_grid_model):
total_model *= param_len
final_hyper_params[param] = hyper_params[param]
elif (total_model*param_len) > max_grid_model:
break
return total_model, final_hyper_params
def write_hyper_parameters_json(dir1, dir2, json_filename, hyper_parameters):
"""
Write a json file of the hyper_parameters in directories dir1 and dir2 for debugging purposes.
:param dir1: String containing first directory where you want to write the json file to
:param dir2: String containing second directory where you want to write the json file to
:param json_filename: String containing json file name
:param hyper_parameters: dict containing hyper-parameters used
"""
# save hyper-parameter file in test directory
with open(os.path.join(dir1, json_filename), 'w') as test_file:
json.dump(hyper_parameters, test_file)
# save hyper-parameter file in sandbox
with open(os.path.join(dir2, json_filename), 'w') as test_file:
json.dump(hyper_parameters, test_file)
def compare_frames(frame1, frame2, numElements, tol_time=0, tol_numeric=0, strict=False, compare_NA=True):
"""
This function will compare two H2O frames to make sure their dimension, and values in all cells are the same.
It will not compare the column names though.
:param frame1: H2O frame to be compared
:param frame2: H2O frame to be compared
:param numElements: integer to denote number of rows to compare. Done to reduce compare time.
Set to 0 or negative number if you want to compare all elements.
:param tol_time: optional parameter to limit time value difference.
:param tol_numerica: optional parameter to limit numeric value difference.
:param strict: optional parameter to enforce strict comparison or not. If True, column type must
match in order to pass the test.
:param compare_NA: optional parameter to compare NA or not. For csv file generated from orc file, the
NAs are represented as some other symbol but our CSV will not be able to parse it correctly as NA.
In this case, do not compare the number of NAs.
:return: boolean: True, the two frames are equal and False otherwise.
"""
# check frame dimensions
rows1, cols1 = frame1.dim
rows2, cols2 = frame2.dim
assert rows1 == rows2 and cols1 == cols2, "failed dim check! frame 1 rows:{0} frame 2 rows:{1} frame 1 cols:{2} " \
"frame2 cols:{3}".format(rows1, rows2, cols1, cols2)
na_frame1 = frame1.isna().sum()
na_frame2 = frame2.isna().sum()
if compare_NA: # check number of missing values
assert na_frame1 == na_frame2, "failed numbers of NA check! Frame 1 NA number: {0}, frame 2 " \
"NA number: {1}".format(na_frame1, na_frame2)
# check column types are the same before proceeding to check each row content.
for col_ind in range(cols1):
c1_key = frame1.columns[col_ind]
c2_key = frame2.columns[col_ind]
c2_type = frame2.types[c2_key]
c1_type = frame1.types[c1_key]
print("###### Comparing column: {0} and column type is {1}.".format(col_ind, c1_type))
if strict: # every column type must match
assert c1_type == c2_type, "failed column type check! frame1 col type: {0}, frame2 col type: " \
"{1}".format(c1_type, c2_type)
else:
if str(c2_type) == 'enum': # orc files do not have enum column type. We convert it here
frame1[col_ind].asfactor()
else:
assert c1_type == c2_type, "failed column type check! frame1 col type: {0}, frame2 col type: " \
"{1}".format(c1_type, c2_type)
# compare string
if (str(c1_type) == 'string') or (str(c1_type) == 'enum'):
compareOneStringColumn(frame1, frame2, col_ind, rows1, numElements)
else:
if str(c2_type) == 'time': # compare time columns
compareOneNumericColumn(frame1, frame2, col_ind, rows1, tol_time, numElements)
else:
compareOneNumericColumn(frame1, frame2, col_ind, rows1, tol_numeric, numElements)
return True
def compareOneStringColumn(frame1, frame2, col_ind, rows, numElements):
"""
This function will compare two String columns of two H2O frames to make sure that they are the same.
:param frame1: H2O frame to be compared
:param frame2: H2O frame to be compared
:param col_ind: integer denoting column index to compare the two frames
:param rows: integer denoting number of rows in the column
:param numElements: integer to denote number of rows to compare. Done to reduce compare time
:return: None. Will throw exceptions if comparison failed.
"""
row_indices = list(range(rows))
if numElements > 0:
random.shuffle(row_indices)
else:
numElements = rows
for ele_ind in range(numElements):
row_ind = row_indices[ele_ind]
val1 = frame1[row_ind, col_ind]
val2 = frame2[row_ind, col_ind]
assert val1 == val2, "failed frame values check! frame1 value: {0}, frame2 value: {1} at row {2}, column " \
"{3}".format(val1, val2, row_ind, col_ind)
def compareOneNumericColumn(frame1, frame2, col_ind, rows, tolerance, numElements):
"""
This function compares two numeric columns of two H2O frames to make sure that they are close.
:param frame1: H2O frame to be compared
:param frame2: H2O frame to be compared
:param col_ind: integer denoting column index to compare the two frames
:param rows: integer denoting number of rows in the column
:param tolerance: double parameter to limit numerical value difference.
:param numElements: integer to denote number of rows to compare. Done to reduce compare time.
:return: None. Will throw exceptions if comparison failed.
"""
row_indices = []
if numElements > 0:
row_indices = random.sample(xrange(rows), numElements)
else:
numElements = rows # Compare all elements
row_indices = list(range(rows))
for ele_ind in range(numElements):
row_ind = row_indices[ele_ind]
val1 = frame1[row_ind, col_ind]
val2 = frame2[row_ind, col_ind]
if not(math.isnan(val1)) and not(math.isnan(val2)): # both frames contain valid elements
diff = abs(val1-val2)
assert diff <= tolerance, "failed frame values check! frame1 value = {0}, frame2 value = {1}, " \
"at row {2}, column {3}. The difference is {4}.".format(val1, val2, row_ind,
col_ind, diff)
elif math.isnan(val1) and math.isnan(val2): # both frame contains missing values
continue
else: # something is wrong, one frame got a missing value while the other is fine.
assert 1 == 2, "failed frame values check! frame1 value {0}, frame2 value {1} at row {2}, " \
"column {3}".format(val1, val2, row_ind, col_ind)
import warnings
def expect_warnings(filewithpath, warn_phrase="warn", warn_string_of_interest="warn", number_of_times=1, in_hdfs=False):
"""
This function will execute a command to run and analyze the print outs of
running the command. The goal here is to capture any warnings that we may expect
out of running those commands.
:param filewithpath: name of file to be parsed with path
:param warn_phrase: capture the warning header, sometimes it is warn or userwarn.
:param warn_string_of_interest: specific warning message string
:param number_of_times: number of warning lines we are expecting.
:return: True if warning was found and False otherwise
"""
number_warngings = 0
buffer = StringIO() # redirect warning messages to string buffer for later analysis
sys.stderr = buffer
frame = None
if in_hdfs:
frame = h2o.import_file(filewithpath)
else:
frame = h2o.import_file(path=locate(filewithpath))
sys.stderr = sys.__stderr__ # redirect it back to stdout.
try: # for python 2.7
if len(buffer.buflist) > 0:
for index in range(len(buffer.buflist)):
print("*** captured warning message: {0}".format(buffer.buflist[index]))
if (warn_phrase in buffer.buflist[index]) and (warn_string_of_interest in buffer.buflist[index]):
number_warngings = number_warngings+1
except: # for python 3.
warns = buffer.getvalue()
print("*** captured warning message: {0}".format(warns))
if (warn_phrase in warns) and (warn_string_of_interest in warns):
number_warngings = number_warngings+1
print("Number of warnings found: {0} and number of times that warnings should appear {1}.".format(number_warngings,
number_of_times))
if number_warngings >= number_of_times:
return True
else:
return False
def compare_frame_summary(frame1_summary, frame2_summary, compareNames=False, compareTypes=False):
"""
This method is written to compare the frame summary between two frames.
:param frame1_summary:
:param frame2_summary:
:param compareNames:
:param compareTypes:
:return:
"""
frame1_column_number = len(frame1_summary)
frame2_column_number = len(frame2_summary)
assert frame1_column_number == frame2_column_number, "failed column number check! Frame 1 column number: {0}," \
"frame 2 column number: {1}".format(frame1_column_number,
frame2_column_number)
for col_index in range(frame1_column_number): # check summary for each column
for key_val in list(frame1_summary[col_index]):
if not(compareNames) and (str(key_val) == 'label'):
continue
if not(compareTypes) and (str(key_val) == 'type'):
continue
if str(key_val) == 'precision': # skip comparing precision
continue
val1 = frame1_summary[col_index][key_val]
val2 = frame2_summary[col_index][key_val]
if isinstance(val1, list) or isinstance(val1, dict):
if isinstance(val1, dict):
assert val1 == val2, "failed column summary comparison for column {0} and summary " \
"type {1}, frame 1 value is {2}, frame 2 value is " \
"{3}".format(col_index, str(key_val), val1, val2)
else:
if len(val1) > 0:
# find if elements are float
float_found = False
for ind in range(len(val1)):
if isinstance(val1[ind], float):
float_found = True
break
if float_found:
for ind in range(len(val1)):
if not(str(val1[ind] == 'NaN')):
assert abs(val1[ind]-val2[ind]) < 1e-5, "failed column summary comparison for " \
"column {0} and summary type {1}, frame 1" \
" value is {2}, frame 2 value is " \
"{3}".format(col_index, str(key_val),
val1[ind], val2[ind])
else:
assert val1 == val2, "failed column summary comparison for column {0} and summary" \
" type {1}, frame 1 value is {2}, frame 2 value is " \
"{3}".format(col_index, str(key_val), val1, val2)
else:
if isinstance(val1, float):
assert abs(val1-val2) < 1e-5, "failed column summary comparison for column {0} and summary type " \
"{1}, frame 1 value is {2}, frame 2 value is " \
"{3}".format(col_index, str(key_val), val1, val2)
else:
assert val1 == val2, "failed column summary comparison for column {0} and summary type " \
"{1}, frame 1 value is {2}, frame 2 value is " \
"{3}".format(col_index, str(key_val), val1, val2)
def cannaryHDFSTest(hdfs_name_node, file_name):
"""
This function is written to detect if the hive-exec version is too old. It will return
True if it is too old and false otherwise.
:param hdfs_name_node:
:param file_name:
:return:
"""
url_orc = "hdfs://{0}{1}".format(hdfs_name_node, file_name)
try:
tempFrame = h2o.import_file(url_orc)
h2o.remove(tempFrame)
print("Your hive-exec version is good. Parsing success for {0}.".format(url_orc))
return False
except Exception as e:
print("Error exception is {0}".format(str(e)))
if "NoSuchFieldError: vector" in str(e):
return True
else: # exception is caused by other reasons.
return False
| apache-2.0 |
CforED/Machine-Learning | examples/decomposition/plot_ica_blind_source_separation.py | 349 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
jpautom/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 338 | 4324 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
lin-credible/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 40 | 23697 | # Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
# Test that setting precompute="auto" gives a Deprecation Warning.
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
| bsd-3-clause |
rajat1994/scikit-learn | sklearn/decomposition/pca.py | 192 | 23117 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
rgommers/statsmodels | statsmodels/tools/tests/test_tools.py | 1 | 18390 | """
Test functions for models.tools
"""
from statsmodels.compat.python import lrange, range
import numpy as np
from numpy.random import standard_normal
from numpy.testing import (assert_equal, assert_array_equal,
assert_almost_equal, assert_string_equal, TestCase)
from nose.tools import (assert_true, assert_false, assert_raises)
from statsmodels.datasets import longley
from statsmodels.tools import tools
from statsmodels.tools.tools import pinv_extended
from statsmodels.compat.numpy import np_matrix_rank
class TestTools(TestCase):
def test_add_constant_list(self):
x = lrange(1,5)
x = tools.add_constant(x)
y = np.asarray([[1,1,1,1],[1,2,3,4.]]).T
assert_equal(x, y)
def test_add_constant_1d(self):
x = np.arange(1,5)
x = tools.add_constant(x)
y = np.asarray([[1,1,1,1],[1,2,3,4.]]).T
assert_equal(x, y)
def test_add_constant_has_constant1d(self):
x = np.ones(5)
x = tools.add_constant(x)
assert_equal(x, np.ones(5))
def test_add_constant_has_constant2d(self):
x = np.asarray([[1,1,1,1],[1,2,3,4.]])
y = tools.add_constant(x)
assert_equal(x,y)
def test_recipr(self):
X = np.array([[2,1],[-1,0]])
Y = tools.recipr(X)
assert_almost_equal(Y, np.array([[0.5,1],[0,0]]))
def test_recipr0(self):
X = np.array([[2,1],[-4,0]])
Y = tools.recipr0(X)
assert_almost_equal(Y, np.array([[0.5,1],[-0.25,0]]))
def test_rank(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
X = standard_normal((40,10))
self.assertEquals(tools.rank(X), np_matrix_rank(X))
X[:,0] = X[:,1] + X[:,2]
self.assertEquals(tools.rank(X), np_matrix_rank(X))
def test_extendedpinv(self):
X = standard_normal((40, 10))
np_inv = np.linalg.pinv(X)
np_sing_vals = np.linalg.svd(X, 0, 0)
sm_inv, sing_vals = pinv_extended(X)
assert_almost_equal(np_inv, sm_inv)
assert_almost_equal(np_sing_vals, sing_vals)
def test_extendedpinv_singular(self):
X = standard_normal((40, 10))
X[:, 5] = X[:, 1] + X[:, 3]
np_inv = np.linalg.pinv(X)
np_sing_vals = np.linalg.svd(X, 0, 0)
sm_inv, sing_vals = pinv_extended(X)
assert_almost_equal(np_inv, sm_inv)
assert_almost_equal(np_sing_vals, sing_vals)
def test_fullrank(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
X = standard_normal((40,10))
X[:,0] = X[:,1] + X[:,2]
Y = tools.fullrank(X)
self.assertEquals(Y.shape, (40,9))
self.assertEquals(tools.rank(Y), 9)
X[:,5] = X[:,3] + X[:,4]
Y = tools.fullrank(X)
self.assertEquals(Y.shape, (40,8))
warnings.simplefilter("ignore")
self.assertEquals(tools.rank(Y), 8)
def test_estimable():
rng = np.random.RandomState(20120713)
N, P = (40, 10)
X = rng.normal(size=(N, P))
C = rng.normal(size=(1, P))
isestimable = tools.isestimable
assert_true(isestimable(C, X))
assert_true(isestimable(np.eye(P), X))
for row in np.eye(P):
assert_true(isestimable(row, X))
X = np.ones((40, 2))
assert_true(isestimable([1, 1], X))
assert_false(isestimable([1, 0], X))
assert_false(isestimable([0, 1], X))
assert_false(isestimable(np.eye(2), X))
halfX = rng.normal(size=(N, 5))
X = np.hstack([halfX, halfX])
assert_false(isestimable(np.hstack([np.eye(5), np.zeros((5, 5))]), X))
assert_false(isestimable(np.hstack([np.zeros((5, 5)), np.eye(5)]), X))
assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), X))
# Test array-like for design
XL = X.tolist()
assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), XL))
# Test ValueError for incorrect number of columns
X = rng.normal(size=(N, 5))
for n in range(1, 4):
assert_raises(ValueError, isestimable, np.ones((n,)), X)
assert_raises(ValueError, isestimable, np.eye(4), X)
class TestCategoricalNumerical(object):
#TODO: use assert_raises to check that bad inputs are taken care of
def __init__(self):
#import string
stringabc = 'abcdefghijklmnopqrstuvwxy'
self.des = np.random.randn(25,2)
self.instr = np.floor(np.arange(10,60, step=2)/10)
x=np.zeros((25,5))
x[:5,0]=1
x[5:10,1]=1
x[10:15,2]=1
x[15:20,3]=1
x[20:25,4]=1
self.dummy = x
structdes = np.zeros((25,1),dtype=[('var1', 'f4'),('var2', 'f4'),
('instrument','f4'),('str_instr','a10')])
structdes['var1'] = self.des[:,0][:,None]
structdes['var2'] = self.des[:,1][:,None]
structdes['instrument'] = self.instr[:,None]
string_var = [stringabc[0:5], stringabc[5:10],
stringabc[10:15], stringabc[15:20],
stringabc[20:25]]
string_var *= 5
self.string_var = np.array(sorted(string_var))
structdes['str_instr'] = self.string_var[:,None]
self.structdes = structdes
self.recdes = structdes.view(np.recarray)
def test_array2d(self):
des = np.column_stack((self.des, self.instr, self.des))
des = tools.categorical(des, col=2)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],10)
def test_array1d(self):
des = tools.categorical(self.instr)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],6)
def test_array2d_drop(self):
des = np.column_stack((self.des, self.instr, self.des))
des = tools.categorical(des, col=2, drop=True)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],9)
def test_array1d_drop(self):
des = tools.categorical(self.instr, drop=True)
assert_array_equal(des, self.dummy)
assert_equal(des.shape[1],5)
def test_recarray2d(self):
des = tools.categorical(self.recdes, col='instrument')
# better way to do this?
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray2dint(self):
des = tools.categorical(self.recdes, col=2)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray1d(self):
instr = self.structdes['instrument'].view(np.recarray)
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_recarray1d_drop(self):
instr = self.structdes['instrument'].view(np.recarray)
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_recarray2d_drop(self):
des = tools.categorical(self.recdes, col='instrument', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray2d(self):
des = tools.categorical(self.structdes, col='instrument')
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray2dint(self):
des = tools.categorical(self.structdes, col=2)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray1d(self):
instr = self.structdes['instrument'].view(dtype=[('var1', 'f4')])
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_structarray2d_drop(self):
des = tools.categorical(self.structdes, col='instrument', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray1d_drop(self):
instr = self.structdes['instrument'].view(dtype=[('var1', 'f4')])
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
# def test_arraylike2d(self):
# des = tools.categorical(self.structdes.tolist(), col=2)
# test_des = des[:,-5:]
# assert_array_equal(test_des, self.dummy)
# assert_equal(des.shape[1], 9)
# def test_arraylike1d(self):
# instr = self.structdes['instrument'].tolist()
# dum = tools.categorical(instr)
# test_dum = dum[:,-5:]
# assert_array_equal(test_dum, self.dummy)
# assert_equal(dum.shape[1], 6)
# def test_arraylike2d_drop(self):
# des = tools.categorical(self.structdes.tolist(), col=2, drop=True)
# test_des = des[:,-5:]
# assert_array_equal(test__des, self.dummy)
# assert_equal(des.shape[1], 8)
# def test_arraylike1d_drop(self):
# instr = self.structdes['instrument'].tolist()
# dum = tools.categorical(instr, drop=True)
# assert_array_equal(dum, self.dummy)
# assert_equal(dum.shape[1], 5)
class TestCategoricalString(TestCategoricalNumerical):
# comment out until we have type coercion
# def test_array2d(self):
# des = np.column_stack((self.des, self.instr, self.des))
# des = tools.categorical(des, col=2)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],10)
# def test_array1d(self):
# des = tools.categorical(self.instr)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],6)
# def test_array2d_drop(self):
# des = np.column_stack((self.des, self.instr, self.des))
# des = tools.categorical(des, col=2, drop=True)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],9)
def test_array1d_drop(self):
des = tools.categorical(self.string_var, drop=True)
assert_array_equal(des, self.dummy)
assert_equal(des.shape[1],5)
def test_recarray2d(self):
des = tools.categorical(self.recdes, col='str_instr')
# better way to do this?
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray2dint(self):
des = tools.categorical(self.recdes, col=3)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray1d(self):
instr = self.structdes['str_instr'].view(np.recarray)
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_recarray1d_drop(self):
instr = self.structdes['str_instr'].view(np.recarray)
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_recarray2d_drop(self):
des = tools.categorical(self.recdes, col='str_instr', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray2d(self):
des = tools.categorical(self.structdes, col='str_instr')
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray2dint(self):
des = tools.categorical(self.structdes, col=3)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray1d(self):
instr = self.structdes['str_instr'].view(dtype=[('var1', 'a10')])
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_structarray2d_drop(self):
des = tools.categorical(self.structdes, col='str_instr', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray1d_drop(self):
instr = self.structdes['str_instr'].view(dtype=[('var1', 'a10')])
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_arraylike2d(self):
pass
def test_arraylike1d(self):
pass
def test_arraylike2d_drop(self):
pass
def test_arraylike1d_drop(self):
pass
def test_rec_issue302():
arr = np.rec.fromrecords([[10], [11]], names='group')
actual = tools.categorical(arr)
expected = np.rec.array([(10, 1.0, 0.0), (11, 0.0, 1.0)],
dtype=[('group', int), ('group_10', float), ('group_11', float)])
assert_array_equal(actual, expected)
def test_issue302():
arr = np.rec.fromrecords([[10, 12], [11, 13]], names=['group', 'whatever'])
actual = tools.categorical(arr, col=['group'])
expected = np.rec.array([(10, 12, 1.0, 0.0), (11, 13, 0.0, 1.0)],
dtype=[('group', int), ('whatever', int), ('group_10', float),
('group_11', float)])
assert_array_equal(actual, expected)
def test_pandas_const_series():
dta = longley.load_pandas()
series = dta.exog['GNP']
series = tools.add_constant(series, prepend=False)
assert_string_equal('const', series.columns[1])
assert_equal(series.var(0)[1], 0)
def test_pandas_const_series_prepend():
dta = longley.load_pandas()
series = dta.exog['GNP']
series = tools.add_constant(series, prepend=True)
assert_string_equal('const', series.columns[0])
assert_equal(series.var(0)[0], 0)
def test_pandas_const_df():
dta = longley.load_pandas().exog
dta = tools.add_constant(dta, prepend=False)
assert_string_equal('const', dta.columns[-1])
assert_equal(dta.var(0)[-1], 0)
def test_pandas_const_df_prepend():
dta = longley.load_pandas().exog
# regression test for #1025
dta['UNEMP'] /= dta['UNEMP'].std()
dta = tools.add_constant(dta, prepend=True)
assert_string_equal('const', dta.columns[0])
assert_equal(dta.var(0)[0], 0)
def test_chain_dot():
A = np.arange(1,13).reshape(3,4)
B = np.arange(3,15).reshape(4,3)
C = np.arange(5,8).reshape(3,1)
assert_equal(tools.chain_dot(A,B,C), np.array([[1820],[4300],[6780]]))
class TestNanDot(object):
@classmethod
def setupClass(cls):
nan = np.nan
cls.mx_1 = np.array([[nan, 1.], [2., 3.]])
cls.mx_2 = np.array([[nan, nan], [2., 3.]])
cls.mx_3 = np.array([[0., 0.], [0., 0.]])
cls.mx_4 = np.array([[1., 0.], [1., 0.]])
cls.mx_5 = np.array([[0., 1.], [0., 1.]])
cls.mx_6 = np.array([[1., 2.], [3., 4.]])
def test_11(self):
test_res = tools.nan_dot(self.mx_1, self.mx_1)
expected_res = np.array([[ np.nan, np.nan], [ np.nan, 11.]])
assert_array_equal(test_res, expected_res)
def test_12(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_2)
expected_res = np.array([[ nan, nan], [ nan, nan]])
assert_array_equal(test_res, expected_res)
def test_13(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_3)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_14(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_4)
expected_res = np.array([[ nan, 0.], [ 5., 0.]])
assert_array_equal(test_res, expected_res)
def test_41(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_4, self.mx_1)
expected_res = np.array([[ nan, 1.], [ nan, 1.]])
assert_array_equal(test_res, expected_res)
def test_23(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_3)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_32(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_3, self.mx_2)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_24(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_4)
expected_res = np.array([[ nan, 0.], [ 5., 0.]])
assert_array_equal(test_res, expected_res)
def test_25(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_5)
expected_res = np.array([[ 0., nan], [ 0., 5.]])
assert_array_equal(test_res, expected_res)
def test_66(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_6, self.mx_6)
expected_res = np.array([[ 7., 10.], [ 15., 22.]])
assert_array_equal(test_res, expected_res)
| bsd-3-clause |
dssg/wikienergy | disaggregator/build/pandas/pandas/tseries/tests/test_timedeltas.py | 1 | 55479 | # pylint: disable-msg=E1101,W0612
from __future__ import division
from datetime import datetime, timedelta, time
import nose
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, Timestamp, Timedelta, TimedeltaIndex, isnull, notnull,
bdate_range, date_range, timedelta_range, Int64Index)
import pandas.core.common as com
from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long, PY3_2
from pandas import compat, to_timedelta, tslib
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_almost_equal,
assert_index_equal,
ensure_clean)
from pandas.tseries.offsets import Day, Second, Hour
import pandas.util.testing as tm
from numpy.random import rand, randn
from pandas import _np_version_under1p8
iNaT = tslib.iNaT
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_construction(self):
expected = np.timedelta64(10,'D').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10,unit='d').value, expected)
self.assertEqual(Timedelta(10.0,unit='d').value, expected)
self.assertEqual(Timedelta('10 days').value, expected)
self.assertEqual(Timedelta(days=10).value, expected)
self.assertEqual(Timedelta(days=10.0).value, expected)
expected += np.timedelta64(10,'s').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta('10 days 00:00:10').value, expected)
self.assertEqual(Timedelta(days=10,seconds=10).value, expected)
self.assertEqual(Timedelta(days=10,milliseconds=10*1000).value, expected)
self.assertEqual(Timedelta(days=10,microseconds=10*1000*1000).value, expected)
# test construction with np dtypes
# GH 8757
timedelta_kwargs = {'days':'D', 'seconds':'s', 'microseconds':'us',
'milliseconds':'ms', 'minutes':'m', 'hours':'h', 'weeks':'W'}
npdtypes = [np.int64, np.int32, np.int16,
np.float64, np.float32, np.float16]
for npdtype in npdtypes:
for pykwarg, npkwarg in timedelta_kwargs.items():
expected = np.timedelta64(1, npkwarg).astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(**{pykwarg:npdtype(1)}).value, expected)
# rounding cases
self.assertEqual(Timedelta(82739999850000).value, 82739999850000)
self.assertTrue('0 days 22:58:59.999850' in str(Timedelta(82739999850000)))
self.assertEqual(Timedelta(123072001000000).value, 123072001000000)
self.assertTrue('1 days 10:11:12.001' in str(Timedelta(123072001000000)))
# more strings
# GH 8190
self.assertEqual(Timedelta('1 h'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hour'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hours'), timedelta(hours=1))
self.assertEqual(Timedelta('-1 hours'), -timedelta(hours=1))
self.assertEqual(Timedelta('1 m'), timedelta(minutes=1))
self.assertEqual(Timedelta('1.5 m'), timedelta(seconds=90))
self.assertEqual(Timedelta('1 minute'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 minutes'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 s'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 second'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 seconds'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 ms'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 milli'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 millisecond'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 us'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 micros'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 microsecond'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1.5 microsecond'), Timedelta('00:00:00.000001500'))
self.assertEqual(Timedelta('1 ns'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nano'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nanosecond'), Timedelta('00:00:00.000000001'))
# combos
self.assertEqual(Timedelta('10 days 1 hour'), timedelta(days=10,hours=1))
self.assertEqual(Timedelta('10 days 1 h'), timedelta(days=10,hours=1))
self.assertEqual(Timedelta('10 days 1 h 1m 1s'), timedelta(days=10,hours=1,minutes=1,seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -timedelta(days=10,hours=1,minutes=1,seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -timedelta(days=10,hours=1,minutes=1,seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s 3us'), -timedelta(days=10,hours=1,minutes=1,seconds=1,microseconds=3))
self.assertEqual(Timedelta('-10 days 1 h 1.5m 1s 3us'), -timedelta(days=10,hours=1,minutes=1,seconds=31,microseconds=3))
# currently invalid as it has a - on the hhmmdd part (only allowed on the days)
self.assertRaises(ValueError, lambda : Timedelta('-10 days -1 h 1.5m 1s 3us'))
# roundtripping both for string and value
for v in ['1s',
'-1s',
'1us',
'-1us',
'1 day',
'-1 day',
'-23:59:59.999999',
'-1 days +23:59:59.999999',
'-1ns',
'1ns',
'-23:59:59.999999999']:
td = Timedelta(v)
self.assertEqual(Timedelta(td.value),td)
# str does not normally display nanos
if not td.nanoseconds:
self.assertEqual(Timedelta(str(td)),td)
self.assertEqual(Timedelta(td._repr_base(format='all')),td)
# floats
expected = np.timedelta64(10,'s').astype('m8[ns]').view('i8') + np.timedelta64(500,'ms').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10.5,unit='s').value, expected)
# nat
self.assertEqual(Timedelta('').value,iNaT)
self.assertEqual(Timedelta('nat').value,iNaT)
self.assertEqual(Timedelta('NAT').value,iNaT)
self.assertTrue(isnull(Timestamp('nat')))
self.assertTrue(isnull(Timedelta('nat')))
# offset
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Hour(2)),Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Second(2)),Timedelta('0 days, 00:00:02'))
# invalid
tm.assertRaisesRegexp(ValueError,
"cannot construct a TimeDelta",
lambda : Timedelta())
tm.assertRaisesRegexp(ValueError,
"cannot create timedelta string convert",
lambda : Timedelta('foo'))
tm.assertRaisesRegexp(ValueError,
"cannot construct a TimeDelta from the passed arguments, allowed keywords are ",
lambda : Timedelta(day=10))
def test_repr(self):
self.assertEqual(repr(Timedelta(10,unit='d')),"Timedelta('10 days 00:00:00')")
self.assertEqual(repr(Timedelta(10,unit='s')),"Timedelta('0 days 00:00:10')")
self.assertEqual(repr(Timedelta(10,unit='ms')),"Timedelta('0 days 00:00:00.010000')")
self.assertEqual(repr(Timedelta(-10,unit='ms')),"Timedelta('-1 days +23:59:59.990000')")
def test_identity(self):
td = Timedelta(10,unit='d')
self.assertTrue(isinstance(td, Timedelta))
self.assertTrue(isinstance(td, timedelta))
def test_conversion(self):
for td in [ Timedelta(10,unit='d'), Timedelta('1 days, 10:11:12.012345') ]:
pydt = td.to_pytimedelta()
self.assertTrue(td == Timedelta(pydt))
self.assertEqual(td, pydt)
self.assertTrue(isinstance(pydt, timedelta)
and not isinstance(pydt, Timedelta))
self.assertEqual(td, np.timedelta64(td.value, 'ns'))
td64 = td.to_timedelta64()
self.assertEqual(td64, np.timedelta64(td.value, 'ns'))
self.assertEqual(td, td64)
self.assertTrue(isinstance(td64, np.timedelta64))
# this is NOT equal and cannot be roundtriped (because of the nanos)
td = Timedelta('1 days, 10:11:12.012345678')
self.assertTrue(td != td.to_pytimedelta())
def test_ops(self):
td = Timedelta(10,unit='d')
self.assertEqual(-td,Timedelta(-10,unit='d'))
self.assertEqual(+td,Timedelta(10,unit='d'))
self.assertEqual(td - td, Timedelta(0,unit='ns'))
self.assertTrue((td - pd.NaT) is pd.NaT)
self.assertEqual(td + td, Timedelta(20,unit='d'))
self.assertTrue((td + pd.NaT) is pd.NaT)
self.assertEqual(td * 2, Timedelta(20,unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5,unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is pd.NaT)
# invert
self.assertEqual(-td,Timedelta('-10d'))
self.assertEqual(td * -1,Timedelta('-10d'))
self.assertEqual(-1 * td,Timedelta('-10d'))
self.assertEqual(abs(-td),Timedelta('10d'))
# invalid
self.assertRaises(TypeError, lambda : Timedelta(11,unit='d') // 2)
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda : td * td)
# can't operate with integers
self.assertRaises(TypeError, lambda : td + 2)
self.assertRaises(TypeError, lambda : td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1))
self.assertEqual(Timedelta(241, unit='h'), pd.offsets.Hour(1) + td)
self.assertEqual(240, td / pd.offsets.Hour(1))
self.assertEqual(1 / 240.0, pd.offsets.Hour(1) / td)
self.assertEqual(Timedelta(239, unit='h'), td - pd.offsets.Hour(1))
self.assertEqual(Timedelta(-239, unit='h'), pd.offsets.Hour(1) - td)
def test_freq_conversion(self):
td = Timedelta('1 days 2 hours 3 ns')
result = td / np.timedelta64(1,'D')
self.assertEquals(result, td.value/float(86400*1e9))
result = td / np.timedelta64(1,'s')
self.assertEquals(result, td.value/float(1e9))
result = td / np.timedelta64(1,'ns')
self.assertEquals(result, td.value)
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
self.assertRaises(TypeError, lambda: td + np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
self.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(-other + td, expected)
self.assertRaises(TypeError, lambda: td - np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td * np.array([2]), expected)
self.assert_numpy_array_equal(np.array([2]) * td, expected)
self.assertRaises(TypeError, lambda: td * other)
self.assertRaises(TypeError, lambda: other * td)
self.assert_numpy_array_equal(td / other, np.array([1]))
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other / td, np.array([1]))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
self.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_compare_timedelta_series(self):
# regresssion test for GH5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_ops_notimplemented(self):
class Other:
pass
other = Other()
td = Timedelta('1 day')
self.assertTrue(td.__add__(other) is NotImplemented)
self.assertTrue(td.__sub__(other) is NotImplemented)
self.assertTrue(td.__truediv__(other) is NotImplemented)
self.assertTrue(td.__mul__(other) is NotImplemented)
self.assertTrue(td.__floordiv__(td) is NotImplemented)
def test_fields(self):
rng = to_timedelta('1 days, 10:11:12')
self.assertEqual(rng.days,1)
self.assertEqual(rng.hours,10)
self.assertEqual(rng.minutes,11)
self.assertEqual(rng.seconds,12)
self.assertEqual(rng.milliseconds,0)
self.assertEqual(rng.microseconds,0)
self.assertEqual(rng.nanoseconds,0)
td = Timedelta('-1 days, 10:11:12')
self.assertEqual(abs(td),Timedelta('13:48:48'))
self.assertTrue(str(td) == "-1 days +10:11:12")
self.assertEqual(-td,Timedelta('0 days 13:48:48'))
self.assertEqual(-Timedelta('-1 days, 10:11:12').value,49728000000000)
self.assertEqual(Timedelta('-1 days, 10:11:12').value,-49728000000000)
rng = to_timedelta('-1 days, 10:11:12')
self.assertEqual(rng.days,-1)
self.assertEqual(rng.hours,10)
self.assertEqual(rng.minutes,11)
self.assertEqual(rng.seconds,12)
self.assertEqual(rng.milliseconds,0)
self.assertEqual(rng.microseconds,0)
self.assertEqual(rng.nanoseconds,0)
# components
tup = pd.to_timedelta(-1, 'us').components
self.assertEqual(tup.days,-1)
self.assertEqual(tup.hours,23)
self.assertEqual(tup.minutes,59)
self.assertEqual(tup.seconds,59)
self.assertEqual(tup.milliseconds,999)
self.assertEqual(tup.microseconds,999)
self.assertEqual(tup.nanoseconds,0)
tup = Timedelta('-1 days 1 us').components
self.assertEqual(tup.days,-2)
self.assertEqual(tup.hours,23)
self.assertEqual(tup.minutes,59)
self.assertEqual(tup.seconds,59)
self.assertEqual(tup.milliseconds,999)
self.assertEqual(tup.microseconds,999)
self.assertEqual(tup.nanoseconds,0)
def test_timedelta_range(self):
expected = to_timedelta(np.arange(5),unit='D')
result = timedelta_range('0 days',periods=5,freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(11),unit='D')
result = timedelta_range('0 days','10 days',freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(5),unit='D') + Second(2) + Day()
result = timedelta_range('1 days, 00:00:02','5 days, 00:00:02',freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta([1,3,5,7,9],unit='D') + Second(2)
result = timedelta_range('1 days, 00:00:02',periods=5,freq='2D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(50),unit='T')*30
result = timedelta_range('0 days',freq='30T',periods=50)
tm.assert_index_equal(result, expected)
def test_numeric_conversions(self):
self.assertEqual(ct(0), np.timedelta64(0,'ns'))
self.assertEqual(ct(10), np.timedelta64(10,'ns'))
self.assertEqual(ct(10,unit='ns'), np.timedelta64(10,'ns').astype('m8[ns]'))
self.assertEqual(ct(10,unit='us'), np.timedelta64(10,'us').astype('m8[ns]'))
self.assertEqual(ct(10,unit='ms'), np.timedelta64(10,'ms').astype('m8[ns]'))
self.assertEqual(ct(10,unit='s'), np.timedelta64(10,'s').astype('m8[ns]'))
self.assertEqual(ct(10,unit='d'), np.timedelta64(10,'D').astype('m8[ns]'))
def test_timedelta_conversions(self):
self.assertEqual(ct(timedelta(seconds=1)), np.timedelta64(1,'s').astype('m8[ns]'))
self.assertEqual(ct(timedelta(microseconds=1)), np.timedelta64(1,'us').astype('m8[ns]'))
self.assertEqual(ct(timedelta(days=1)), np.timedelta64(1,'D').astype('m8[ns]'))
def test_short_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
self.assertEqual(ct('10'), np.timedelta64(10,'ns'))
self.assertEqual(ct('10ns'), np.timedelta64(10,'ns'))
self.assertEqual(ct('100'), np.timedelta64(100,'ns'))
self.assertEqual(ct('100ns'), np.timedelta64(100,'ns'))
self.assertEqual(ct('1000'), np.timedelta64(1000,'ns'))
self.assertEqual(ct('1000ns'), np.timedelta64(1000,'ns'))
self.assertEqual(ct('1000NS'), np.timedelta64(1000,'ns'))
self.assertEqual(ct('10us'), np.timedelta64(10000,'ns'))
self.assertEqual(ct('100us'), np.timedelta64(100000,'ns'))
self.assertEqual(ct('1000us'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('1000Us'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('1000uS'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('1ms'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('10ms'), np.timedelta64(10000000,'ns'))
self.assertEqual(ct('100ms'), np.timedelta64(100000000,'ns'))
self.assertEqual(ct('1000ms'), np.timedelta64(1000000000,'ns'))
self.assertEqual(ct('-1s'), -np.timedelta64(1000000000,'ns'))
self.assertEqual(ct('1s'), np.timedelta64(1000000000,'ns'))
self.assertEqual(ct('10s'), np.timedelta64(10000000000,'ns'))
self.assertEqual(ct('100s'), np.timedelta64(100000000000,'ns'))
self.assertEqual(ct('1000s'), np.timedelta64(1000000000000,'ns'))
self.assertEqual(ct('1d'), conv(np.timedelta64(1,'D')))
self.assertEqual(ct('-1d'), -conv(np.timedelta64(1,'D')))
self.assertEqual(ct('1D'), conv(np.timedelta64(1,'D')))
self.assertEqual(ct('10D'), conv(np.timedelta64(10,'D')))
self.assertEqual(ct('100D'), conv(np.timedelta64(100,'D')))
self.assertEqual(ct('1000D'), conv(np.timedelta64(1000,'D')))
self.assertEqual(ct('10000D'), conv(np.timedelta64(10000,'D')))
# space
self.assertEqual(ct(' 10000D '), conv(np.timedelta64(10000,'D')))
self.assertEqual(ct(' - 10000D '), -conv(np.timedelta64(10000,'D')))
# invalid
self.assertRaises(ValueError, ct, '1foo')
self.assertRaises(ValueError, ct, 'foo')
def test_full_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1,'D')
self.assertEqual(ct('1days'), conv(d1))
self.assertEqual(ct('1days,'), conv(d1))
self.assertEqual(ct('- 1days,'), -conv(d1))
self.assertEqual(ct('00:00:01'), conv(np.timedelta64(1,'s')))
self.assertEqual(ct('06:00:01'), conv(np.timedelta64(6*3600+1,'s')))
self.assertEqual(ct('06:00:01.0'), conv(np.timedelta64(6*3600+1,'s')))
self.assertEqual(ct('06:00:01.01'), conv(np.timedelta64(1000*(6*3600+1)+10,'ms')))
self.assertEqual(ct('- 1days, 00:00:01'), conv(-d1+np.timedelta64(1,'s')))
self.assertEqual(ct('1days, 06:00:01'), conv(d1+np.timedelta64(6*3600+1,'s')))
self.assertEqual(ct('1days, 06:00:01.01'), conv(d1+np.timedelta64(1000*(6*3600+1)+10,'ms')))
# invalid
self.assertRaises(ValueError, ct, '- 1days, 00')
def test_nat_converters(self):
self.assertEqual(to_timedelta('nat',box=False).astype('int64'), tslib.iNaT)
self.assertEqual(to_timedelta('nan',box=False).astype('int64'), tslib.iNaT)
def test_to_timedelta(self):
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1,'D')
self.assertEqual(to_timedelta('1 days 06:05:01.00003',box=False), conv(d1+np.timedelta64(6*3600+5*60+1,'s')+np.timedelta64(30,'us')))
self.assertEqual(to_timedelta('15.5us',box=False), conv(np.timedelta64(15500,'ns')))
# empty string
result = to_timedelta('',box=False)
self.assertEqual(result.astype('int64'), tslib.iNaT)
result = to_timedelta(['', ''])
self.assertTrue(isnull(result).all())
# pass thru
result = to_timedelta(np.array([np.timedelta64(1,'s')]))
expected = np.array([np.timedelta64(1,'s')])
tm.assert_almost_equal(result,expected)
# ints
result = np.timedelta64(0,'ns')
expected = to_timedelta(0,box=False)
self.assertEqual(result, expected)
# Series
expected = Series([timedelta(days=1), timedelta(days=1, seconds=1)])
result = to_timedelta(Series(['1d','1days 00:00:01']))
tm.assert_series_equal(result, expected)
# with units
result = TimedeltaIndex([ np.timedelta64(0,'ns'), np.timedelta64(10,'s').astype('m8[ns]') ])
expected = to_timedelta([0,10],unit='s')
tm.assert_index_equal(result, expected)
# single element conversion
v = timedelta(seconds=1)
result = to_timedelta(v,box=False)
expected = np.timedelta64(timedelta(seconds=1))
self.assertEqual(result, expected)
v = np.timedelta64(timedelta(seconds=1))
result = to_timedelta(v,box=False)
expected = np.timedelta64(timedelta(seconds=1))
self.assertEqual(result, expected)
# arrays of various dtypes
arr = np.array([1]*5,dtype='int64')
result = to_timedelta(arr,unit='s')
expected = TimedeltaIndex([ np.timedelta64(1,'s') ]*5)
tm.assert_index_equal(result, expected)
arr = np.array([1]*5,dtype='int64')
result = to_timedelta(arr,unit='m')
expected = TimedeltaIndex([ np.timedelta64(1,'m') ]*5)
tm.assert_index_equal(result, expected)
arr = np.array([1]*5,dtype='int64')
result = to_timedelta(arr,unit='h')
expected = TimedeltaIndex([ np.timedelta64(1,'h') ]*5)
tm.assert_index_equal(result, expected)
arr = np.array([1]*5,dtype='timedelta64[s]')
result = to_timedelta(arr)
expected = TimedeltaIndex([ np.timedelta64(1,'s') ]*5)
tm.assert_index_equal(result, expected)
arr = np.array([1]*5,dtype='timedelta64[D]')
result = to_timedelta(arr)
expected = TimedeltaIndex([ np.timedelta64(1,'D') ]*5)
tm.assert_index_equal(result, expected)
# Test with lists as input when box=false
expected = np.array(np.arange(3)*1000000000, dtype='timedelta64[ns]')
result = to_timedelta(range(3), unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
result = to_timedelta(np.arange(3), unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
result = to_timedelta([0, 1, 2], unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
# Tests with fractional seconds as input:
expected = np.array([0, 500000000, 800000000, 1200000000], dtype='timedelta64[ns]')
result = to_timedelta([0., 0.5, 0.8, 1.2], unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
def testit(unit, transform):
# array
result = to_timedelta(np.arange(5),unit=unit)
expected = TimedeltaIndex([ np.timedelta64(i,transform(unit)) for i in np.arange(5).tolist() ])
tm.assert_index_equal(result, expected)
# scalar
result = to_timedelta(2,unit=unit)
expected = Timedelta(np.timedelta64(2,transform(unit)).astype('timedelta64[ns]'))
self.assertEqual(result, expected)
# validate all units
# GH 6855
for unit in ['Y','M','W','D','y','w','d']:
testit(unit,lambda x: x.upper())
for unit in ['days','day','Day','Days']:
testit(unit,lambda x: 'D')
for unit in ['h','m','s','ms','us','ns','H','S','MS','US','NS']:
testit(unit,lambda x: x.lower())
# offsets
# m
testit('T',lambda x: 'm')
# ms
testit('L',lambda x: 'ms')
# these will error
self.assertRaises(ValueError, lambda : to_timedelta([1,2],unit='foo'))
self.assertRaises(ValueError, lambda : to_timedelta(1,unit='foo'))
# time not supported ATM
self.assertRaises(ValueError, lambda :to_timedelta(time(second=1)))
def test_to_timedelta_via_apply(self):
# GH 5458
expected = Series([np.timedelta64(1,'s')])
result = Series(['00:00:01']).apply(to_timedelta)
tm.assert_series_equal(result, expected)
result = Series([to_timedelta('00:00:01')])
tm.assert_series_equal(result, expected)
def test_timedelta_ops(self):
# GH4984
# make sure ops return Timedelta
s = Series([Timestamp('20130101') + timedelta(seconds=i*i) for i in range(10) ])
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
self.assertEqual(result, expected)
result = td.to_frame().mean()
self.assertEqual(result[0], expected)
result = td.quantile(.1)
expected = Timedelta(np.timedelta64(2600,'ms'))
self.assertEqual(result, expected)
result = td.median()
expected = to_timedelta('00:00:08')
self.assertEqual(result, expected)
result = td.to_frame().median()
self.assertEqual(result[0], expected)
# GH 6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta('00:01:21')
self.assertEqual(result, expected)
result = td.to_frame().sum()
self.assertEqual(result[0], expected)
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
self.assertEqual(result, expected)
result = td.to_frame().std()
self.assertEqual(result[0], expected)
# invalid ops
for op in ['skew','kurt','sem','var','prod']:
self.assertRaises(TypeError, lambda : getattr(td,op)())
def test_timedelta_ops_scalar(self):
# GH 6808
base = pd.to_datetime('20130101 09:01:12.123456')
expected_add = pd.to_datetime('20130101 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta(10,unit='s'),
timedelta(seconds=10),
np.timedelta64(10,'s'),
np.timedelta64(10000000000,'ns'),
pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
base = pd.to_datetime('20130102 09:01:12.123456')
expected_add = pd.to_datetime('20130103 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta('1 day, 00:00:10'),
pd.to_timedelta('1 days, 00:00:10'),
timedelta(days=1,seconds=10),
np.timedelta64(1,'D')+np.timedelta64(10,'s'),
pd.offsets.Day()+pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
def test_to_timedelta_on_missing_values(self):
# GH5438
timedelta_NaT = np.timedelta64('NaT')
actual = pd.to_timedelta(Series(['00:00:01', np.nan]))
expected = Series([np.timedelta64(1000000000, 'ns'), timedelta_NaT], dtype='<m8[ns]')
assert_series_equal(actual, expected)
actual = pd.to_timedelta(Series(['00:00:01', pd.NaT]))
assert_series_equal(actual, expected)
actual = pd.to_timedelta(np.nan)
self.assertEqual(actual.value, timedelta_NaT.astype('int64'))
actual = pd.to_timedelta(pd.NaT)
self.assertEqual(actual.value, timedelta_NaT.astype('int64'))
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = DataFrame(['00:00:02']).apply(pd.to_timedelta)
dfn = DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
NA = np.nan
actual = scalar1 + scalar1
self.assertEqual(actual, scalar2)
actual = scalar2 - scalar1
self.assertEqual(actual, scalar1)
actual = s1 + s1
assert_series_equal(actual, s2)
actual = s2 - s1
assert_series_equal(actual, s1)
actual = s1 + scalar1
assert_series_equal(actual, s2)
actual = s2 - scalar1
assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
assert_series_equal(actual, sn)
actual = s1 + NA
assert_series_equal(actual, sn)
actual = s1 - NA
assert_series_equal(actual, sn)
actual = s1 + pd.NaT # NaT is datetime, not timedelta
assert_series_equal(actual, sn)
actual = s2 - pd.NaT
assert_series_equal(actual, sn)
actual = s1 + df1
assert_frame_equal(actual, df2)
actual = s2 - df1
assert_frame_equal(actual, df1)
actual = df1 + s1
assert_frame_equal(actual, df2)
actual = df2 - s1
assert_frame_equal(actual, df1)
actual = df1 + df1
assert_frame_equal(actual, df2)
actual = df2 - df1
assert_frame_equal(actual, df1)
actual = df1 + scalar1
assert_frame_equal(actual, df2)
actual = df2 - scalar1
assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 + NA
assert_frame_equal(actual, dfn)
actual = df1 - NA
assert_frame_equal(actual, dfn)
actual = df1 + pd.NaT # NaT is datetime, not timedelta
assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
assert_frame_equal(actual, dfn)
def test_apply_to_timedelta(self):
timedelta_NaT = pd.to_timedelta('NaT')
list_of_valid_strings = ['00:00:01', '00:00:02']
a = pd.to_timedelta(list_of_valid_strings)
b = Series(list_of_valid_strings).apply(pd.to_timedelta)
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
list_of_strings = ['00:00:01', np.nan, pd.NaT, timedelta_NaT]
a = pd.to_timedelta(list_of_strings)
b = Series(list_of_strings).apply(pd.to_timedelta)
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
def test_pickle(self):
v = Timedelta('1 days 10:11:12.0123456')
v_p = self.round_trip_pickle(v)
self.assertEqual(v,v_p)
class TestTimedeltaIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_pass_TimedeltaIndex_to_index(self):
rng = timedelta_range('1 days','10 days')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pytimedelta(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_pickle(self):
rng = timedelta_range('1 days', periods=10)
rng_p = self.round_trip_pickle(rng)
tm.assert_index_equal(rng,rng_p)
def test_hash_error(self):
index = timedelta_range('1 days', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_append_join_nondatetimeindex(self):
rng = timedelta_range('1 days', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timedelta)
# it works
rng.join(idx, how='outer')
def test_append_numpy_bug_1681(self):
td = timedelta_range('1 days','10 days',freq='2D')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': td}, index=td)
str(c)
result = a.append(c)
self.assertTrue((result['B'] == td).all())
def test_astype(self):
rng = timedelta_range('1 days', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_fields(self):
rng = timedelta_range('1 days, 10:11:12', periods=2, freq='s')
self.assert_numpy_array_equal(rng.days, np.array([1,1],dtype='int64'))
self.assert_numpy_array_equal(rng.hours, np.array([10,10],dtype='int64'))
self.assert_numpy_array_equal(rng.minutes, np.array([11,11],dtype='int64'))
self.assert_numpy_array_equal(rng.seconds, np.array([12,13],dtype='int64'))
self.assert_numpy_array_equal(rng.milliseconds, np.array([0,0],dtype='int64'))
self.assert_numpy_array_equal(rng.microseconds, np.array([0,0],dtype='int64'))
self.assert_numpy_array_equal(rng.nanoseconds, np.array([0,0],dtype='int64'))
# with nat
s = Series(rng)
s[1] = np.nan
tm.assert_series_equal(s.dt.days,Series([1,np.nan],index=[0,1]))
tm.assert_series_equal(s.dt.hours,Series([10,np.nan],index=[0,1]))
tm.assert_series_equal(s.dt.milliseconds,Series([0,np.nan],index=[0,1]))
def test_components(self):
rng = timedelta_range('1 days, 10:11:12', periods=2, freq='s')
rng.components
# with nat
s = Series(rng)
s[1] = np.nan
result = s.dt.components
self.assertFalse(result.iloc[0].isnull().all())
self.assertTrue(result.iloc[1].isnull().all())
def test_constructor(self):
expected = TimedeltaIndex(['1 days','1 days 00:00:05',
'2 days','2 days 00:00:02','0 days 00:00:03'])
result = TimedeltaIndex(['1 days','1 days, 00:00:05',
np.timedelta64(2,'D'),
timedelta(days=2,seconds=2),
pd.offsets.Second(3)])
tm.assert_index_equal(result,expected)
expected = TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01', '0 days 00:00:02'])
tm.assert_index_equal(TimedeltaIndex(range(3), unit='s'), expected)
expected = TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:05', '0 days 00:00:09'])
tm.assert_index_equal(TimedeltaIndex([0, 5, 9], unit='s'), expected)
expected = TimedeltaIndex(['0 days 00:00:00.400', '0 days 00:00:00.450', '0 days 00:00:01.200'])
tm.assert_index_equal(TimedeltaIndex([400, 450, 1200], unit='ms'), expected)
def test_constructor_coverage(self):
rng = timedelta_range('1 days', periods=10.5)
exp = timedelta_range('1 days', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, TimedeltaIndex, start='1 days',
periods='foo', freq='D')
self.assertRaises(ValueError, TimedeltaIndex, start='1 days',
end='10 days')
self.assertRaises(ValueError, TimedeltaIndex, '1 days')
# generator expression
gen = (timedelta(i) for i in range(10))
result = TimedeltaIndex(gen)
expected = TimedeltaIndex([timedelta(i) for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['1 days', '2 days', '3 days'])
result = TimedeltaIndex(strings)
expected = to_timedelta([1,2,3],unit='d')
self.assertTrue(result.equals(expected))
from_ints = TimedeltaIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming freq
self.assertRaises(ValueError, TimedeltaIndex,
['1 days', '2 days', '4 days'],
freq='D')
self.assertRaises(ValueError, TimedeltaIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = TimedeltaIndex(start='1 days', periods=1, freq='D',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_freq_conversion(self):
# doc example
# series
td = Series(date_range('20130101',periods=4)) - \
Series(date_range('20121201',periods=4))
td[2] += timedelta(minutes=5,seconds=3)
td[3] = np.nan
result = td / np.timedelta64(1,'D')
expected = Series([31,31,(31*86400+5*60+3)/86400.0,np.nan])
assert_series_equal(result,expected)
result = td.astype('timedelta64[D]')
expected = Series([31,31,31,np.nan])
assert_series_equal(result,expected)
result = td / np.timedelta64(1,'s')
expected = Series([31*86400,31*86400,31*86400+5*60+3,np.nan])
assert_series_equal(result,expected)
result = td.astype('timedelta64[s]')
assert_series_equal(result,expected)
# tdi
td = TimedeltaIndex(td)
result = td / np.timedelta64(1,'D')
expected = Index([31,31,(31*86400+5*60+3)/86400.0,np.nan])
assert_index_equal(result,expected)
result = td.astype('timedelta64[D]')
expected = Index([31,31,31,np.nan])
assert_index_equal(result,expected)
result = td / np.timedelta64(1,'s')
expected = Index([31*86400,31*86400,31*86400+5*60+3,np.nan])
assert_index_equal(result,expected)
result = td.astype('timedelta64[s]')
assert_index_equal(result,expected)
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
exp = np.array([True, True, True]+[False]*7)
self.assert_numpy_array_equal(result, exp)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_comparisons_nat(self):
if PY3_2:
raise nose.SkipTest('nat comparisons on 3.2 broken')
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2,'D'),
np.timedelta64(2,'D'),
np.timedelta64('nat'), np.timedelta64('nat'),
np.timedelta64(1,'D') + np.timedelta64(2,'s'),
np.timedelta64(5,'D') + np.timedelta64(3,'s')])
if _np_version_under1p8:
# cannot test array because np.datetime('nat') returns today's date
cases = [(tdidx1, tdidx2)]
else:
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
self.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
self.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
self.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
self.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
self.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
self.assert_numpy_array_equal(result, expected)
def test_map(self):
rng = timedelta_range('1 day', periods=10)
f = lambda x: x.days
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_misc_coverage(self):
rng = timedelta_range('1 day', periods=5)
result = rng.groupby(rng.days)
tm.assert_isinstance(list(result.values())[0][0], Timedelta)
idx = TimedeltaIndex(['3d','1d','2d'])
self.assertTrue(idx.equals(list(idx)))
non_td = Index(list('abc'))
self.assertFalse(idx.equals(list(non_td)))
def test_union(self):
i1 = timedelta_range('1day',periods=5)
i2 = timedelta_range('3day',periods=5)
result = i1.union(i2)
expected = timedelta_range('1day',periods=7)
self.assert_numpy_array_equal(result, expected)
i1 = Int64Index(np.arange(0, 20, 2))
i2 = TimedeltaIndex(start='1 day', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_union_coverage(self):
idx = TimedeltaIndex(['3d','1d','2d'])
ordered = TimedeltaIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = timedelta_range('1 day', periods=4, freq='3H')
rng_b = timedelta_range('1 day', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = TimedeltaIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = TimedeltaIndex(['1 day 15:19:49.695000'])
right = TimedeltaIndex(['2 day 13:04:21.322000',
'1 day 15:27:24.873000',
'1 day 15:31:05.350000'])
result = left.union(right)
exp = TimedeltaIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
left = timedelta_range("1 day","30d")
right = left + pd.offsets.Minute(15)
result = left.union(right)
exp = TimedeltaIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
index_1 = timedelta_range('1 day', periods=4, freq='h')
index_2 = index_1 + pd.offsets.Hour(5)
result = index_1 & index_2
self.assertEqual(len(result), 0)
index_1 = timedelta_range('1 day', periods=4, freq='h')
index_2 = index_1 + pd.offsets.Hour(1)
result = index_1 & index_2
expected = timedelta_range('1 day 01:00:00',periods=3,freq='h')
tm.assert_index_equal(result,expected)
def test_get_duplicates(self):
idx = TimedeltaIndex(['1 day','2 day','2 day','3 day','3day', '4day'])
result = idx.get_duplicates()
ex = TimedeltaIndex(['2 day','3day'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = TimedeltaIndex(['1 day 00:00:05','1 day 00:00:01','1 day 00:00:02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = TimedeltaIndex(['4d','1d','2d'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = TimedeltaIndex(['4day','1day','2day'], name='idx')
result = idx.insert(2, timedelta(days=5))
exp = TimedeltaIndex(['4day','1day','5day','2day'],name='idx')
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([Timedelta('4day'), 'inserted', Timedelta('1day'),
Timedelta('2day')], name='idx')
self.assertNotIsInstance(result, TimedeltaIndex)
tm.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
idx = timedelta_range('1day 00:00:01', periods=3, freq='s', name='idx')
# preserve freq
expected_0 = TimedeltaIndex(['1day','1day 00:00:01','1day 00:00:02','1day 00:00:03'],
name='idx', freq='s')
expected_3 = TimedeltaIndex(['1day 00:00:01','1day 00:00:02','1day 00:00:03','1day 00:00:04'],
name='idx', freq='s')
# reset freq to None
expected_1_nofreq = TimedeltaIndex(['1day 00:00:01','1day 00:00:01','1day 00:00:02','1day 00:00:03'],
name='idx', freq=None)
expected_3_nofreq = TimedeltaIndex(['1day 00:00:01','1day 00:00:02','1day 00:00:03','1day 00:00:05'],
name='idx', freq=None)
cases = [(0, Timedelta('1day'), expected_0),
(-3, Timedelta('1day'), expected_0),
(3, Timedelta('1day 00:00:04'), expected_3),
(1, Timedelta('1day 00:00:01'), expected_1_nofreq),
(3, Timedelta('1day 00:00:05'), expected_3_nofreq)]
for n, d, expected in cases:
result = idx.insert(n, d)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
def test_delete(self):
idx = timedelta_range(start='1 Days', periods=5, freq='D', name='idx')
# prserve freq
expected_0 = timedelta_range(start='2 Days', periods=4, freq='D', name='idx')
expected_4 = timedelta_range(start='1 Days', periods=4, freq='D', name='idx')
# reset freq to None
expected_1 = TimedeltaIndex(['1 day','3 day','4 day', '5 day'],freq=None,name='idx')
cases ={0: expected_0, -5: expected_0,
-1: expected_4, 4: expected_4,
1: expected_1}
for n, expected in compat.iteritems(cases):
result = idx.delete(n)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_delete_slice(self):
idx = timedelta_range(start='1 days', periods=10, freq='D', name='idx')
# prserve freq
expected_0_2 = timedelta_range(start='4 days', periods=7, freq='D', name='idx')
expected_7_9 = timedelta_range(start='1 days', periods=7, freq='D', name='idx')
# reset freq to None
expected_3_5 = TimedeltaIndex(['1 d','2 d','3 d',
'7 d','8 d','9 d','10d'], freq=None, name='idx')
cases ={(0, 1, 2): expected_0_2,
(7, 8, 9): expected_7_9,
(3, 4, 5): expected_3_5}
for n, expected in compat.iteritems(cases):
result = idx.delete(n)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
result = idx.delete(slice(n[0], n[-1] + 1))
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
def test_take(self):
tds = ['1day 02:00:00','1 day 04:00:00','1 day 10:00:00']
idx = TimedeltaIndex(start='1d',end='2d',freq='H',name='idx')
expected = TimedeltaIndex(tds, freq=None, name='idx')
taken1 = idx.take([2, 4, 10])
taken2 = idx[[2,4,10]]
for taken in [taken1, taken2]:
self.assertTrue(taken.equals(expected))
tm.assert_isinstance(taken, TimedeltaIndex)
self.assertIsNone(taken.freq)
self.assertEqual(taken.name, expected.name)
def test_isin(self):
index = tm.makeTimedeltaIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
randn(), r_idx_type='i', c_idx_type='td')
str(df)
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
tm.assert_index_equal(cols, joined)
def test_slice_keeps_name(self):
# GH4226
dr = pd.timedelta_range('1d','5d', freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = timedelta_range('1 day', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assertIs(index, joined)
def test_factorize(self):
idx1 = TimedeltaIndex(['1 day','1 day','2 day',
'2 day','3 day','3 day'])
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = TimedeltaIndex(['1 day','2 day','3 day'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# freq must be preserved
idx3 = timedelta_range('1 day', periods=4, freq='s')
exp_arr = np.array([0, 1, 2, 3])
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(idx3))
class TestSlicing(tm.TestCase):
def test_partial_slice(self):
rng = timedelta_range('1 day 10:11:12', freq='h',periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['5 day':'6 day']
expected = s.iloc[86:134]
assert_series_equal(result, expected)
result = s['5 day':]
expected = s.iloc[86:]
assert_series_equal(result, expected)
result = s[:'6 day']
expected = s.iloc[:134]
assert_series_equal(result, expected)
result = s['6 days, 23:11:12']
self.assertEqual(result, s.irow(133))
self.assertRaises(KeyError, s.__getitem__, '50 days')
def test_partial_slice_high_reso(self):
# higher reso
rng = timedelta_range('1 day 10:11:12', freq='us',periods=2000)
s = Series(np.arange(len(rng)), index=rng)
result = s['1 day 10:11:12':]
expected = s.iloc[0:]
assert_series_equal(result, expected)
result = s['1 day 10:11:12.001':]
expected = s.iloc[1000:]
assert_series_equal(result, expected)
result = s['1 days, 10:11:12.001001']
self.assertEqual(result, s.irow(1001))
def test_slice_with_negative_step(self):
ts = Series(np.arange(20),
timedelta_range('0', periods=20, freq='H'))
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
assert_series_equal(ts[l_slc], ts.iloc[i_slc])
assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
assert_series_equal(ts.ix[l_slc], ts.iloc[i_slc])
assert_slices_equivalent(SLC[Timedelta(hours=7)::-1], SLC[7::-1])
assert_slices_equivalent(SLC['7 hours'::-1], SLC[7::-1])
assert_slices_equivalent(SLC[:Timedelta(hours=7):-1], SLC[:6:-1])
assert_slices_equivalent(SLC[:'7 hours':-1], SLC[:6:-1])
assert_slices_equivalent(SLC['15 hours':'7 hours':-1], SLC[15:6:-1])
assert_slices_equivalent(SLC[Timedelta(hours=15):Timedelta(hours=7):-1], SLC[15:6:-1])
assert_slices_equivalent(SLC['15 hours':Timedelta(hours=7):-1], SLC[15:6:-1])
assert_slices_equivalent(SLC[Timedelta(hours=15):'7 hours':-1], SLC[15:6:-1])
assert_slices_equivalent(SLC['7 hours':'15 hours':-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
ts = Series(np.arange(20),
timedelta_range('0', periods=20, freq='H'))
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts.loc[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts.ix[::0])
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
rafwiewiora/msmbuilder | msmbuilder/tests/test_vmhmm.py | 6 | 5110 | from __future__ import print_function, division
import random
from itertools import permutations
import numpy as np
from scipy.stats.distributions import vonmises
import pickle
import tempfile
from sklearn.pipeline import Pipeline
from msmbuilder.example_datasets import AlanineDipeptide
from msmbuilder.featurizer import DihedralFeaturizer
from msmbuilder.hmm import VonMisesHMM
def test_code_works():
# creates a 4-state HMM on the ALA2 data. Nothing fancy, just makes
# sure the code runs without erroring out
trajectories = AlanineDipeptide().get_cached().trajectories
topology = trajectories[0].topology
indices = topology.select('symbol C or symbol O or symbol N')
featurizer = DihedralFeaturizer(['phi', 'psi'], trajectories[0][0])
sequences = featurizer.transform(trajectories)
hmm = VonMisesHMM(n_states=4, n_init=1)
hmm.fit(sequences)
assert len(hmm.timescales_ == 3)
assert np.any(hmm.timescales_ > 50)
def circwrap(x):
"""Wrap an array on (-pi, pi)"""
return x - 2 * np.pi * np.floor(x / (2 * np.pi) + 0.5)
def create_timeseries(means, kappas, transmat):
"""Construct a random timeseries based on a specified Markov model."""
numStates = len(means)
state = random.randint(0, numStates - 1)
cdf = np.cumsum(transmat, 1)
numFrames = 1000
X = np.empty((numFrames, 1))
for i in range(numFrames):
rand = random.random()
state = (cdf[state] > rand).argmax()
X[i, 0] = circwrap(vonmises.rvs(kappas[state], means[state]))
return X
def validate_timeseries(means, kappas, transmat, model, meantol,
kappatol, transmattol):
"""Test our model matches the one used to create the timeseries."""
numStates = len(means)
assert len(model.means_) == numStates
assert (model.transmat_ >= 0.0).all()
assert (model.transmat_ <= 1.0).all()
totalProbability = sum(model.transmat_.T)
assert (abs(totalProbability - 1.0) < 1e-5).all()
# The states may have come out in a different order,
# so we need to test all possible permutations.
for order in permutations(range(len(means))):
match = True
for i in range(numStates):
if abs(circwrap(means[i] - model.means_[order[i]])) > meantol:
match = False
break
if abs(kappas[i] - model.kappas_[order[i]]) > kappatol:
match = False
break
for j in range(numStates):
diff = transmat[i, j] - model.transmat_[order[i], order[j]]
if abs(diff) > transmattol:
match = False
break
if match:
# It matches.
return
# No permutation matched.
assert False
def test_2_state():
transmat = np.array([[0.7, 0.3], [0.4, 0.6]])
means = np.array([[0.0], [2.0]])
kappas = np.array([[4.0], [8.0]])
X = [create_timeseries(means, kappas, transmat) for i in range(10)]
# For each value of various options,
# create a 2 state HMM and see if it is correct.
for reversible_type in ('mle', 'transpose'):
model = VonMisesHMM(n_states=2, reversible_type=reversible_type,
thresh=1e-4, n_iter=30)
model.fit(X)
validate_timeseries(means, kappas, transmat, model, 0.1, 0.5, 0.05)
assert abs(model.fit_logprob_[-1] - model.score(X)) < 0.5
def test_3_state():
transmat = np.array([[0.2, 0.3, 0.5], [0.4, 0.4, 0.2], [0.8, 0.2, 0.0]])
means = np.array([[0.0], [2.0], [4.0]])
kappas = np.array([[8.0], [8.0], [6.0]])
X = [create_timeseries(means, kappas, transmat) for i in range(20)]
# For each value of various options,
# create a 3 state HMM and see if it is correct.
for reversible_type in ('mle', 'transpose'):
model = VonMisesHMM(n_states=3, reversible_type=reversible_type,
thresh=1e-4, n_iter=30)
model.fit(X)
validate_timeseries(means, kappas, transmat, model, 0.1, 0.5, 0.1)
assert abs(model.fit_logprob_[-1] - model.score(X)) < 0.5
def test_pipeline():
trajs = AlanineDipeptide().get_cached().trajectories
p = Pipeline([
('diheds', DihedralFeaturizer(['phi', 'psi'], sincos=False)),
('hmm', VonMisesHMM(n_states=4))
])
predict = p.fit_predict(trajs)
p.named_steps['hmm'].summarize()
def test_pickle():
"""Test pickling an HMM"""
trajectories = AlanineDipeptide().get_cached().trajectories
topology = trajectories[0].topology
indices = topology.select('symbol C or symbol O or symbol N')
featurizer = DihedralFeaturizer(['phi', 'psi'], trajectories[0][0])
sequences = featurizer.transform(trajectories)
hmm = VonMisesHMM(n_states=4, n_init=1)
hmm.fit(sequences)
logprob, hidden = hmm.predict(sequences)
with tempfile.TemporaryFile() as savefile:
pickle.dump(hmm, savefile)
savefile.seek(0, 0)
hmm2 = pickle.load(savefile)
logprob2, hidden2 = hmm2.predict(sequences)
assert(logprob == logprob2) | lgpl-2.1 |
ChinaQuants/zipline | zipline/data/treasuries.py | 9 | 3406 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from operator import itemgetter
import re
import numpy as np
import pandas as pd
get_unit_and_periods = itemgetter('unit', 'periods')
def parse_treasury_csv_column(column):
"""
Parse a treasury CSV column into a more human-readable format.
Columns start with 'RIFLGFC', followed by Y or M (year or month), followed
by a two-digit number signifying number of years/months, followed by _N.B.
We only care about the middle two entries, which we turn into a string like
3month or 30year.
"""
column_re = re.compile(
r"^(?P<prefix>RIFLGFC)"
"(?P<unit>[YM])"
"(?P<periods>[0-9]{2})"
"(?P<suffix>_N.B)$"
)
match = column_re.match(column)
if match is None:
raise ValueError("Couldn't parse CSV column %r." % column)
unit, periods = get_unit_and_periods(match.groupdict())
# Roundtrip through int to coerce '06' into '6'.
return str(int(periods)) + ('year' if unit == 'Y' else 'month')
def earliest_possible_date():
"""
The earliest date for which we can load data from this module.
"""
# The US Treasury actually has data going back further than this, but it's
# pretty rare to find pricing data going back that far, and there's no
# reason to make people download benchmarks back to 1950 that they'll never
# be able to use.
return pd.Timestamp('1980', tz='UTC')
def get_treasury_data(start_date, end_date):
return pd.read_csv(
"http://www.federalreserve.gov/datadownload/Output.aspx"
"?rel=H15"
"&series=bf17364827e38702b42a58cf8eaa3f78"
"&lastObs="
"&from=" # An unbounded query is ~2x faster than specifying dates.
"&to="
"&filetype=csv"
"&label=omit"
"&layout=seriescolumn"
"&type=package",
skiprows=1, # First row is a useless header.
parse_dates=['Time Period'],
na_values=['ND'], # Presumably this stands for "No Data".
index_col=0,
).loc[
start_date:end_date
].dropna(
how='all'
).rename(
columns=parse_treasury_csv_column
).tz_localize('UTC') * 0.01 # Convert from 2.57% to 0.0257.
def dataconverter(s):
try:
return float(s) / 100
except:
return np.nan
def get_daily_10yr_treasury_data():
"""Download daily 10 year treasury rates from the Federal Reserve and
return a pandas.Series."""
url = "http://www.federalreserve.gov/datadownload/Output.aspx?rel=H15" \
"&series=bcb44e57fb57efbe90002369321bfb3f&lastObs=&from=&to=" \
"&filetype=csv&label=include&layout=seriescolumn"
return pd.read_csv(url, header=5, index_col=0, names=['DATE', 'BC_10YEAR'],
parse_dates=True, converters={1: dataconverter},
squeeze=True)
| apache-2.0 |
rexshihaoren/scikit-learn | examples/plot_isotonic_regression.py | 303 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
twankim/weaksemi | main_local.py | 1 | 8601 | # -*- coding: utf-8 -*-
# @Author: twankim
# @Date: 2017-02-24 17:46:51
# @Last Modified by: twankim
# @Last Modified time: 2018-03-09 22:14:15
import numpy as np
import time
import sys
import os
import argparse
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from ssac import weakSSAC
from gen_data import genData
from utils import *
weak = "local"
delta = 0.99
base_dir= os.path.join('./results',weak)
def main(args):
plotted = False
rep = args.rep
k = args.k
n = args.n
m = args.m
std = args.std
# qs = [float(q) for q in args.qs.split(',')]
etas = [float(eta) for eta in args.etas.split(',')]
beta = args.beta
i_plot = np.random.randint(0,rep) # Index of experiment to plot the figure
verbose = args.verbose
cs = [float(q) for q in args.cs.split(',')]
res_acc = np.zeros((rep,len(cs),len(etas))) # Accuracy of clustering
res_mean_acc = np.zeros((rep,len(cs),len(etas))) # Mean accuracy of clustering (per cluster)
# res_err = np.zeros((rep,len(qs),len(etas))) # Number of misclustered points
res_fail = np.zeros((rep,len(cs),len(etas))) # Number of Failure
gammas = np.zeros(rep)
nus = np.zeros((rep,len(cs)))
rhos = np.zeros((rep,len(cs)))
# Make directories to save results
if not os.path.exists(base_dir):
os.makedirs(base_dir)
res_dir = base_dir + '/{}_{}'.format(args.min_gamma,args.max_gamma)
if not os.path.exists(res_dir):
os.makedirs(res_dir)
for i_rep in xrange(rep):
# Generate Synthetic data
# m dimensional, n points, k cluster
# min_gamma: minimum gamma margin
if verbose:
print "({}/{})... Generating data".format(i_rep+1,rep)
dataset = genData(n,m,k,args.min_gamma,args.max_gamma,std)
X,y_true,ris = dataset.gen()
gamma = dataset.gamma
gammas[i_rep] = gamma
print "({}/{})... Synthetic data is generated: gamma={}, (n,m,k,std)=({},{},{},{})".format(
i_rep+1,rep,gamma,n,m,k,std)
algo = weakSSAC(X,y_true,k,wtype=weak,ris=ris)
# Test SSAC algorithm for different c's and eta's (fix beta in this case)
for i_c,c_dist in enumerate(cs):
assert (c_dist>0.5) & (c_dist<=1.0), "c_dist must be in (0.5,1]"
nus[i_rep,i_c] = float(gamma) + 1.5*(1-c_dist)
rhos[i_rep,i_c] = c_dist
# Calculate proper eta and beta based on parameters including delta
if verbose:
print " - Proper eta={}, beta={} (delta={})".format(
dataset.calc_eta(delta,weak=weak,nu=nus[i_rep,i_c],rho=rhos[i_rep,i_c]),
dataset.calc_beta(delta,weak=weak,nu=nus[i_rep,i_c],rho=rhos[i_rep,i_c]),
delta)
for i_eta,eta in enumerate(etas):
if verbose:
print " <Test: c_dist={}, eta={}, beta={}>".format(c_dist,eta,beta)
algo.set_params(eta,beta,rho=rhos[i_rep,i_c],nu=nus[i_rep,i_c])
if not algo.fit():
# Algorithm has failed
res_fail[i_rep,i_c,i_eta] = 1
if not plotted:
i_plot = np.random.randint(i_rep+1,rep) # Index of experiment to plot the figure
y_pred = algo.y
mpps = algo.mpps # Estimated cluster centers
# print " ... Clustering is done. Number of binary search steps = {}\n".format(algo.bs_num)
# For evaluation & plotting, find best permutation of cluster assignment
y_pred_perm = find_permutation(dataset,algo)
# Calculate accuracy and mean accuracy
res_acc[i_rep,i_c,i_eta] = accuracy(y_true,y_pred_perm)
res_mean_acc[i_rep,i_c,i_eta] = mean_accuracy(y_true,y_pred_perm)
# # Calculate number of errors
# res_err[i_rep,i_c,i_eta] = error(y_true,y_pred_perm)
if (i_rep == i_plot) and (m<=2) and (not plotted):
if (i_eta==len(etas)-1) and (i_c==len(cs)-1):
plotted = True
title = r"SSAC with {} weak oracle ($\eta={}, \beta={}, \nu={:.2f}, \rho={:.2f}$)".format(
weak,eta,beta,nus[i_rep,i_c],rhos[i_rep,i_c])
f_name = res_dir+'/fig_n{}_m{}_k{}_c{:03d}_e{:d}.png'.format(n,m,k,int(100*c_dist),int(eta))
plot_cluster(X,y_true,y_pred_perm,k,mpps,gamma,
title,f_name,verbose)
# Write result as table
print_eval("Accuracy(%)",res_acc,etas,
res_dir+'/res_{}_n{}_m{}_k{}.csv'.format("acc",n,m,k),weak=weak,params=cs)
print_eval("Mean Accuracy(%)",res_mean_acc,etas,
res_dir+'/res_{}_n{}_m{}_k{}.csv'.format("meanacc",n,m,k),weak=weak,params=cs)
# print_eval("# Error(%)",res_err,qs,etas,
# res_dir+'/res_{}_n{}_m{}_k{}.csv'.format("err",n,m,k))
print_eval("# Failures",res_fail,etas,
res_dir+'/res_{}_n{}_m{}_k{}.csv'.format("fail",n,m,k),
is_sum=True,weak=weak,params=cs)
# if args.isplot:
# Plot Accuracy vs. eta
fig_name = res_dir+'/fig_{}_n{}_m{}_k{}.pdf'.format("acc",n,m,k)
plot_eval("Accuracy(%)",res_acc,etas,fig_name,weak=weak,params=cs)
# Plot Mean Accuracy vs. eta
fig_name = res_dir+'/fig_{}_n{}_m{}_k{}.pdf'.format("meanacc",n,m,k)
plot_eval("Mean Accuracy(%)",res_mean_acc,etas,fig_name,weak=weak,params=cs)
# Plot Failure vs. eta
fig_name = res_dir+'/fig_{}_n{}_m{}_k{}.pdf'.format("fail",n,m,k)
plot_eval("# Failures",res_fail,etas,fig_name,is_sum=True,weak=weak,params=cs)
# Plot histogram of gammas
fig_name = res_dir+'/fig_gamma_hist.pdf'
plot_hist(gammas,args.min_gamma,args.max_gamma,fig_name)
if args.isplot:
plt.show()
def parse_args():
def str2bool(v):
return v.lower() in ('true', '1')
parser = argparse.ArgumentParser(description=
'Test Semi-Supervised Active Clustering with Weak Oracles: Random-weak model')
parser.add_argument('-rep', dest='rep',
help='Number of experiments to repeat',
default = 10000, type = int)
parser.add_argument('-k', dest='k',
help='Number of clusters in synthetic data',
default = 3, type = int)
parser.add_argument('-n', dest='n',
help='Number of data points in synthetic data',
default = 600, type = int)
parser.add_argument('-m', dest='m',
help='Dimension of data points in synthetic data',
default = 2, type = int)
parser.add_argument('-std', dest='std',
help='standard deviation of Gaussian distribution (default:1.5)',
default = 2.0, type = float)
parser.add_argument('-qs', dest='qs',
help='Probabilities q (not-sure with 1-q) ex) 0.7,0.85,1',
default = '0.7,0.85,1', type = str)
parser.add_argument('-etas', dest='etas',
help='etas: parameter for sampling (phase 1) ex) 10,50',
default = '2,5,10,20,30', type = str)
parser.add_argument('-beta', dest='beta',
help='beta: parameter for sampling (phase 2)',
default = 1, type = int)
parser.add_argument('-g_min', dest='min_gamma',
help='minimum gamma margin (default:1)',
default = 1.0, type = float)
parser.add_argument('-g_max', dest='max_gamma',
help='minimum gamma margin (default:1)',
default = 1.1, type = float)
parser.add_argument('-cs', dest='cs',
help='Fractions to set distance-weak parameters (0.5,1] ex) 0.7,0.85,1',
default = '0.6,0.8,1', type = str)
parser.add_argument('-isplot', dest='isplot',
help='plot the result: True/False',
default = False, type = str2bool)
parser.add_argument('-verbose', dest='verbose',
help='verbose: True/False',
default = False, type = str2bool)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print "Called with args:"
print args
sys.exit(main(args))
| mit |
fengzhyuan/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
gfyoung/pandas | pandas/tests/extension/test_categorical.py | 1 | 9395 | """
This file contains a minimal set of tests for compliance with the extension
array interface test suite, and should contain no other tests.
The test suite for the full functionality of the array is located in
`pandas/tests/arrays/`.
The tests in this file are inherited from the BaseExtensionTests, and only
minimal tweaks should be applied to get the tests passing (by overwriting a
parent method).
Additional tests should either be added to one of the BaseExtensionTests
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
import string
import numpy as np
import pytest
import pandas as pd
from pandas import Categorical, CategoricalIndex, Timestamp
import pandas._testing as tm
from pandas.api.types import CategoricalDtype
from pandas.tests.extension import base
def make_data():
while True:
values = np.random.choice(list(string.ascii_letters), size=100)
# ensure we meet the requirements
# 1. first two not null
# 2. first and second are different
if values[0] != values[1]:
break
return values
@pytest.fixture
def dtype():
return CategoricalDtype()
@pytest.fixture
def data():
"""Length-100 array for this type.
* data[0] and data[1] should both be non missing
* data[0] and data[1] should not gbe equal
"""
return Categorical(make_data())
@pytest.fixture
def data_missing():
"""Length 2 array with [NA, Valid]"""
return Categorical([np.nan, "A"])
@pytest.fixture
def data_for_sorting():
return Categorical(["A", "B", "C"], categories=["C", "A", "B"], ordered=True)
@pytest.fixture
def data_missing_for_sorting():
return Categorical(["A", None, "B"], categories=["B", "A"], ordered=True)
@pytest.fixture
def na_value():
return np.nan
@pytest.fixture
def data_for_grouping():
return Categorical(["a", "a", None, None, "b", "b", "a", "c"])
class TestDtype(base.BaseDtypeTests):
pass
class TestInterface(base.BaseInterfaceTests):
@pytest.mark.skip(reason="Memory usage doesn't match")
def test_memory_usage(self, data):
# Is this deliberate?
super().test_memory_usage(data)
def test_contains(self, data, data_missing):
# GH-37867
# na value handling in Categorical.__contains__ is deprecated.
# See base.BaseInterFaceTests.test_contains for more details.
na_value = data.dtype.na_value
# ensure data without missing values
data = data[~data.isna()]
# first elements are non-missing
assert data[0] in data
assert data_missing[0] in data_missing
# check the presence of na_value
assert na_value in data_missing
assert na_value not in data
# Categoricals can contain other nan-likes than na_value
for na_value_obj in tm.NULL_OBJECTS:
if na_value_obj is na_value:
continue
assert na_value_obj not in data
assert na_value_obj in data_missing # this line differs from super method
class TestConstructors(base.BaseConstructorsTests):
pass
class TestReshaping(base.BaseReshapingTests):
@pytest.mark.xfail(reason="Deliberately upcast to object?")
def test_concat_with_reindex(self, data):
super().test_concat_with_reindex(data)
class TestGetitem(base.BaseGetitemTests):
@pytest.mark.skip(reason="Backwards compatibility")
def test_getitem_scalar(self, data):
# CategoricalDtype.type isn't "correct" since it should
# be a parent of the elements (object). But don't want
# to break things by changing.
super().test_getitem_scalar(data)
class TestSetitem(base.BaseSetitemTests):
pass
class TestMissing(base.BaseMissingTests):
@pytest.mark.skip(reason="Not implemented")
def test_fillna_limit_pad(self, data_missing):
super().test_fillna_limit_pad(data_missing)
@pytest.mark.skip(reason="Not implemented")
def test_fillna_limit_backfill(self, data_missing):
super().test_fillna_limit_backfill(data_missing)
class TestReduce(base.BaseNoReduceTests):
pass
class TestMethods(base.BaseMethodsTests):
@pytest.mark.skip(reason="Unobserved categories included")
def test_value_counts(self, all_data, dropna):
return super().test_value_counts(all_data, dropna)
def test_combine_add(self, data_repeated):
# GH 20825
# When adding categoricals in combine, result is a string
orig_data1, orig_data2 = data_repeated(2)
s1 = pd.Series(orig_data1)
s2 = pd.Series(orig_data2)
result = s1.combine(s2, lambda x1, x2: x1 + x2)
expected = pd.Series(
[a + b for (a, b) in zip(list(orig_data1), list(orig_data2))]
)
self.assert_series_equal(result, expected)
val = s1.iloc[0]
result = s1.combine(val, lambda x1, x2: x1 + x2)
expected = pd.Series([a + val for a in list(orig_data1)])
self.assert_series_equal(result, expected)
@pytest.mark.skip(reason="Not Applicable")
def test_fillna_length_mismatch(self, data_missing):
super().test_fillna_length_mismatch(data_missing)
class TestCasting(base.BaseCastingTests):
@pytest.mark.parametrize("cls", [Categorical, CategoricalIndex])
@pytest.mark.parametrize("values", [[1, np.nan], [Timestamp("2000"), pd.NaT]])
def test_cast_nan_to_int(self, cls, values):
# GH 28406
s = cls(values)
msg = "Cannot (cast|convert)"
with pytest.raises((ValueError, TypeError), match=msg):
s.astype(int)
@pytest.mark.parametrize(
"expected",
[
pd.Series(["2019", "2020"], dtype="datetime64[ns, UTC]"),
pd.Series([0, 0], dtype="timedelta64[ns]"),
pd.Series([pd.Period("2019"), pd.Period("2020")], dtype="period[A-DEC]"),
pd.Series([pd.Interval(0, 1), pd.Interval(1, 2)], dtype="interval"),
pd.Series([1, np.nan], dtype="Int64"),
],
)
def test_cast_category_to_extension_dtype(self, expected):
# GH 28668
result = expected.astype("category").astype(expected.dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dtype, expected",
[
(
"datetime64[ns]",
np.array(["2015-01-01T00:00:00.000000000"], dtype="datetime64[ns]"),
),
(
"datetime64[ns, MET]",
pd.DatetimeIndex(
[Timestamp("2015-01-01 00:00:00+0100", tz="MET")]
).array,
),
],
)
def test_consistent_casting(self, dtype, expected):
# GH 28448
result = Categorical(["2015-01-01"]).astype(dtype)
assert result == expected
class TestArithmeticOps(base.BaseArithmeticOpsTests):
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request):
# frame & scalar
op_name = all_arithmetic_operators
if op_name == "__rmod__":
request.node.add_marker(
pytest.mark.xfail(
reason="rmod never called when string is first argument"
)
)
super().test_arith_frame_with_scalar(data, op_name)
def test_arith_series_with_scalar(self, data, all_arithmetic_operators, request):
op_name = all_arithmetic_operators
if op_name == "__rmod__":
request.node.add_marker(
pytest.mark.xfail(
reason="rmod never called when string is first argument"
)
)
super().test_arith_series_with_scalar(data, op_name)
def test_add_series_with_extension_array(self, data):
ser = pd.Series(data)
with pytest.raises(TypeError, match="cannot perform|unsupported operand"):
ser + data
def test_divmod_series_array(self):
# GH 23287
# skipping because it is not implemented
pass
def _check_divmod_op(self, s, op, other, exc=NotImplementedError):
return super()._check_divmod_op(s, op, other, exc=TypeError)
class TestComparisonOps(base.BaseComparisonOpsTests):
def _compare_other(self, s, data, op_name, other):
op = self.get_op_from_name(op_name)
if op_name == "__eq__":
result = op(s, other)
expected = s.combine(other, lambda x, y: x == y)
assert (result == expected).all()
elif op_name == "__ne__":
result = op(s, other)
expected = s.combine(other, lambda x, y: x != y)
assert (result == expected).all()
else:
msg = "Unordered Categoricals can only compare equality or not"
with pytest.raises(TypeError, match=msg):
op(data, other)
@pytest.mark.parametrize(
"categories",
[["a", "b"], [0, 1], [Timestamp("2019"), Timestamp("2020")]],
)
def test_not_equal_with_na(self, categories):
# https://github.com/pandas-dev/pandas/issues/32276
c1 = Categorical.from_codes([-1, 0], categories=categories)
c2 = Categorical.from_codes([0, 1], categories=categories)
result = c1 != c2
assert result.all()
class TestParsing(base.BaseParsingTests):
pass
| bsd-3-clause |
FRidh/python-acoustics | acoustics/aio.py | 1 | 3402 | from __future__ import unicode_literals
import csv
import io
import re
import pandas as pd
def read_csv_cirrus(filename):
"""Read a Cirrus CSV file. Currently exists support for some types of
CSV files extracted with NoiseTools. There is no support for CSVs related
with occupational noise.
If there are NC and NR values in the csv file, they will be stored in the
returned object with attributes ``nc`` and ``nr``. If the CSV file contains
time history, you can access to date and time with the ``time`` attribute.
Also, it is possible to know the integration time with the
``integration_time`` attribute.
:param filename: CSV file name.
:returns: Pandas dataframe with all data extracted from the CSV file.
:rtype: Pandas dataframe.
"""
with open(filename, "r") as csvfile:
csvreader = csvfile.read()
csvreader = re.sub(r" dB", "", csvreader) # Clean " dB" from data
dialect = csv.Sniffer().sniff(csvreader, delimiters=",;")
separator = dialect.delimiter
# Guess decimal separator
decimal_sep = re.search(r"\"\d{2,3}"
r"(\.|,)" # Decimal separator
r"\d{1,2}\"",
csvreader).group(1)
n_cols = re.search("(.+)\n", csvreader).group(1).count(separator) + 1
if n_cols < 5:
unsorted_data = []
pdindex = ["Z"]
for i, c in enumerate(csvreader.splitlines()):
if c[:4] == '"NR"':
nr = int(re.search(r"\d{2}", c).group(0))
continue
elif c[:4] == '"NC"':
nc = int(re.search(r"\d{2}", c).group(0))
continue
if i != 0:
unsorted_data.append(c.split(separator))
else:
if n_cols == 3:
pdindex.append(c[-2:-1])
elif n_cols == 4:
pdindex.append("A")
pdindex.append("C")
# Create a sorted temporary csv-like file
csv_data = list(zip(*unsorted_data))
temp_csv = ""
for row in csv_data:
temp_csv += separator.join(row) + "\n"
# Then, read it with pandas
data = pd.read_csv(io.StringIO(temp_csv), sep=separator,
decimal=decimal_sep)
# Assign NC and NR data if they are present
try:
data.nc = nc
data.nr = nr
except:
pass
# If the csv file contains global data from the "Details" tab in
# NoiseTools, skip row names
if n_cols != 2:
data.index = pdindex
else:
data = pd.read_csv(filename, parse_dates=[[0, 1]], sep=separator,
decimal=decimal_sep)
# Fix time name column
en_columns = data.columns.values
en_columns[0] = "time"
data.columns = en_columns
# Guess integration time with statistical mode because the csv could
# have been cleaned from unwanted noise
data["time"] = pd.to_datetime(data.time)
delta = data.time.diff().fillna(0)
# Mode and change from ns to s
int_time = int(delta.mode().astype(int) * 1e-9)
if round(int_time, 2) == 0.06: # Fix for 1/16 s
int_time = 0.0625
data.integration_time = int_time
return data
| bsd-3-clause |
marionleborgne/nupic.research | projects/sequence_prediction/continuous_sequence/data/processSantaFeDataset.py | 13 | 2104 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import pandas as pd
import csv
def convertDatToCSV(inputFileName, outputFileName, Nrpts=1, maxLength=None):
df = pd.read_table(inputFileName, header=None, names=['value'])
if maxLength is None:
maxLength = len(df)
outputFile = open(outputFileName,"w")
csvWriter = csv.writer(outputFile)
if Nrpts==1:
csvWriter.writerow(['step', 'data'])
csvWriter.writerow(['float', 'float'])
csvWriter.writerow(['', ''])
for i in xrange(maxLength):
csvWriter.writerow([i,df['value'][i]])
else:
csvWriter.writerow(['step', 'data', 'reset'])
csvWriter.writerow(['float', 'float', 'int'])
csvWriter.writerow(['', '', 'R'])
for _ in xrange(Nrpts):
for i in xrange(maxLength):
if i==0:
reset = 1
else:
reset = 0
csvWriter.writerow([i, df['value'][i], reset])
outputFile.close()
inputFileName = 'SantaFe_A_cont.dat'
outputFileName = 'SantaFe_A_cont.csv'
convertDatToCSV(inputFileName, outputFileName, maxLength=100)
inputFileName = 'SantaFe_A.dat'
outputFileName = 'SantaFe_A.csv'
convertDatToCSV(inputFileName, outputFileName, Nrpts=1)
| agpl-3.0 |
ajrichards/bayesian-examples | munging/missing-values/imputation-improves-classification.py | 1 | 2206 | """
http://scikit-learn.org/stable/auto_examples/plot_missing_values.html#sphx-glr-auto-examples-plot-missing-values-py
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.model_selection import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = int(np.floor(n_samples * missing_rate))
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
sammosummo/sammosummo.github.io | assets/scripts/neals-funnel-c.py | 1 | 1700 | """Generate data and sample from Neal's funnel distribution.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
import pymc3 as pm
from scipy.stats import norm
def main():
with pm.Model():
# set up figure
fs = rcParams["figure.figsize"]
rcParams["figure.figsize"] = (fs[0], fs[0] / 2)
rcParams["lines.linewidth"] = 2
rcParams["font.size"] = 14
# simulate data
np.random.seed(0)
k = 9
n = 10000
v = norm.rvs(0, 3, n)
x = norm.rvs(0, np.exp(v / 2), (k, n))
# set up model
v_ = pm.Normal("v", mu=0, sd=3)
xt_ = pm.Normal("xt", mu=0, sd=1, shape=k)
x_ = pm.Deterministic("x", pm.math.exp(v_ / 2) * xt_)
# sample and save samples
trace = pm.sample(n, chains=1)
v_samples = trace["v"][:]
xt_samples = trace["xt"][:].T
x_samples = trace["x"][:].T
# plot samples
# plot simulated data
fig, axes = plt.subplots(1, 2, constrained_layout=True)
ax = axes[0]
ax.scatter(
xt_samples[0], v_samples, marker=".", alpha=0.05, rasterized=True, color="r"
)
ax.set_xlim(-3.5, 3.5)
ax.set_ylim(-9, 9)
ax.set_xlabel(r"$\tilde{x}_0$")
ax.set_ylabel("$v$")
ax = axes[1]
ax.scatter(
x_samples[0], v_samples, marker=".", alpha=0.05, rasterized=True, color="r"
)
ax.set_xlabel("$x_0$")
ax.set_xlim(-20, 20)
ax.set_ylim(-9, 9)
# save
plt.savefig("../images/neals-funnel-c.svg", bbox_inches=0, transparent=True)
if __name__ == "__main__":
main()
| mit |
dvro/imbalanced-learn | examples/pipeline/plot_pipeline_classification.py | 3 | 1421 | """
=========================
Pipeline Object
=========================
An example of the Pipeline object working with transformers and resamplers.
"""
print(__doc__)
from sklearn.cross_validation import train_test_split as tts
from sklearn.datasets import make_classification
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.metrics import classification_report
from imblearn.pipeline import make_pipeline
from imblearn.under_sampling import EditedNearestNeighbours
from imblearn.under_sampling import RepeatedEditedNearestNeighbours
# Generate the dataset
X, y = make_classification(n_classes=2, class_sep=1.25, weights=[0.3, 0.7],
n_informative=3, n_redundant=1, flip_y=0,
n_features=5, n_clusters_per_class=1,
n_samples=5000, random_state=10)
# Instanciate a PCA object for the sake of easy visualisation
pca = PCA(n_components=2)
# Create the samplers
enn = EditedNearestNeighbours()
renn = RepeatedEditedNearestNeighbours()
# Create teh classifier
knn = KNN(1)
# Make the splits
X_train, X_test, y_train, y_test = tts(X, y, random_state=42)
# Add one transformers and two samplers in the pipeline object
pipeline = make_pipeline(pca, enn, renn, knn)
pipeline.fit(X_train, y_train)
y_hat = pipeline.predict(X_test)
print(classification_report(y_test, y_hat))
| mit |
kubeflow/examples | github_issue_summarization/pipelines/components/t2t/t2t-app/app/main.py | 1 | 5290 | # -*- coding: utf-8 -*-
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import logging
import os
import random
import re
import requests
from flask import Flask
from flask import jsonify
from flask import render_template
from flask import g, request
import pandas as pd
import tensorflow as tf
# similar to T2T's query.py
# https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/serving/query.py
from tensor2tensor import problems as problems_lib # pylint: disable=unused-import
from tensor2tensor.utils import registry
from tensor2tensor.utils import usr_dir
from tensor2tensor.serving import serving_utils
app = Flask(__name__)
model_name = os.getenv('MODEL_NAME', 'ghsumm')
problem_name = os.getenv('PROBLEM_NAME', 'gh_problem')
t2t_usr_dir = os.getenv('T2T_USR_DIR', 'ghsumm/trainer')
hparams_name = os.getenv('HPARAMS', 'transformer_prepend')
data_dir = os.getenv('DATADIR', 'gs://aju-dev-demos-codelabs/kubecon/t2t_data_gh_all/')
github_token = os.getenv('GH_TOKEN', 'xxx')
SERVER = os.getenv('TFSERVING_HOST', 'ghsumm.kubeflow')
print("using server: %s" % SERVER)
SERVABLE_NAME = os.getenv('TF_SERVABLE_NAME', 'ghsumm')
print("using model servable name: %s" % SERVABLE_NAME)
SAMPLE_ISSUES = './github_issues_sample.csv'
SERVER_URL = 'http://' + SERVER + ':8500/v1/models/' + SERVABLE_NAME + ':predict'
def get_issue_body(issue_url):
issue_url = re.sub('.*github.com/', 'https://api.github.com/repos/',
issue_url)
tf.logging.info("issue url: %s", issue_url)
# tf.logging.info("using GH token: %s", github_token)
response = requests.get(
issue_url, headers={
'Authorization': 'token {}'.format(github_token)
}).json()
tf.logging.info("----response from url fetch: %s", response)
return response['body']
@app.route('/')
def index():
return render_template('index.html')
@app.route("/random_github_issue", methods=['GET'])
def random_github_issue():
github_issues = getattr(g, '_github_issues', None)
if github_issues is None:
github_issues = g._github_issues = pd.read_csv(
SAMPLE_ISSUES).body.tolist()
random_issue = github_issues[random.randint(0,
len(github_issues) - 1)]
tf.logging.info("----random issue text: %s", random_issue)
return jsonify({'body': random_issue})
@app.route("/summary", methods=['POST'])
def summary():
"""Main prediction route.
Provides a machine-generated summary of the given text. Sends a request to a live
model trained on GitHub issues.
"""
global problem #pylint: disable=global-statement
if problem is None:
init()
request_fn = make_tfserving_rest_request_fn()
if request.method == 'POST':
issue_text = request.form["issue_text"]
issue_url = request.form["issue_url"]
if issue_url:
print("fetching issue from URL...")
issue_text = get_issue_body(issue_url)
tf.logging.info("issue_text: %s", issue_text)
outputs = serving_utils.predict([issue_text], problem, request_fn)
outputs, = outputs
output, score = outputs #pylint: disable=unused-variable
tf.logging.info("output: %s", output)
return jsonify({'summary': output, 'body': issue_text})
return ('', 204)
problem = None
def init():
# global input_encoder, output_decoder, fname, problem
global problem #pylint: disable=global-statement
tf.logging.set_verbosity(tf.logging.INFO)
tf.logging.info("importing ghsumm/trainer from {}".format(t2t_usr_dir))
usr_dir.import_usr_dir(t2t_usr_dir)
print(t2t_usr_dir)
problem = registry.problem(problem_name)
hparams = tf.contrib.training.HParams(data_dir=os.path.expanduser(data_dir))
problem.get_hparams(hparams)
def make_tfserving_rest_request_fn():
"""Wraps function to make CloudML Engine requests with runtime args."""
def _make_tfserving_rest_request_fn(examples):
"""..."""
# api = discovery.build("ml", "v1", credentials=credentials)
# parent = "projects/%s/models/%s/versions/%s" % (cloud.default_project(),
# model_name, version)
input_data = {
"instances": [{
"input": {
"b64": base64.b64encode(ex.SerializeToString())
}
} for ex in examples]
}
response = requests.post(SERVER_URL, json=input_data)
predictions = response.json()['predictions']
tf.logging.info("Predictions: %s", predictions)
return predictions
return _make_tfserving_rest_request_fn
@app.errorhandler(500)
def server_error(e):
logging.exception('An error occurred during a request.')
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
if __name__ == '__main__':
app.run(port=8080, debug=True)
| apache-2.0 |
codrut3/tensorflow | tensorflow/contrib/timeseries/examples/multivariate.py | 67 | 5155 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A multivariate TFTS example.
Fits a multivariate model, exports it, and visualizes the learned correlations
by iteratively predicting and sampling from the predictions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import tempfile
import numpy
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_level.csv")
def multivariate_train_and_sample(
csv_file_name=_DATA_FILE, export_directory=None, training_steps=500):
"""Trains, evaluates, and exports a multivariate model."""
estimator = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=[], num_features=5)
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
# Larger window sizes generally produce a better covariance matrix.
reader, batch_size=4, window_size=64)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
current_state = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
values = [current_state["observed"]]
times = [current_state[tf.contrib.timeseries.FilteringResults.TIMES]]
# Export the model so we can do iterative prediction and filtering without
# reloading model checkpoints.
if export_directory is None:
export_directory = tempfile.mkdtemp()
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
export_location = estimator.export_savedmodel(
export_directory, input_receiver_fn)
with tf.Graph().as_default():
numpy.random.seed(1) # Make the example a bit more deterministic
with tf.Session() as session:
signatures = tf.saved_model.loader.load(
session, [tf.saved_model.tag_constants.SERVING], export_location)
for _ in range(100):
current_prediction = (
tf.contrib.timeseries.saved_model_utils.predict_continuation(
continue_from=current_state, signatures=signatures,
session=session, steps=1))
next_sample = numpy.random.multivariate_normal(
# Squeeze out the batch and series length dimensions (both 1).
mean=numpy.squeeze(current_prediction["mean"], axis=[0, 1]),
cov=numpy.squeeze(current_prediction["covariance"], axis=[0, 1]))
# Update model state so that future predictions are conditional on the
# value we just sampled.
filtering_features = {
tf.contrib.timeseries.TrainEvalFeatures.TIMES: current_prediction[
tf.contrib.timeseries.FilteringResults.TIMES],
tf.contrib.timeseries.TrainEvalFeatures.VALUES: next_sample[
None, None, :]}
current_state = (
tf.contrib.timeseries.saved_model_utils.filter_continuation(
continue_from=current_state,
session=session,
signatures=signatures,
features=filtering_features))
values.append(next_sample[None, None, :])
times.append(current_state["times"])
all_observations = numpy.squeeze(numpy.concatenate(values, axis=1), axis=0)
all_times = numpy.squeeze(numpy.concatenate(times, axis=1), axis=0)
return all_times, all_observations
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
all_times, all_observations = multivariate_train_and_sample()
# Show where sampling starts on the plot
pyplot.axvline(1000, linestyle="dotted")
pyplot.plot(all_times, all_observations)
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
tosolveit/scikit-learn | examples/mixture/plot_gmm.py | 248 | 2817 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
| bsd-3-clause |
shakamunyi/tensorflow | tensorflow/examples/learn/iris_run_config.py | 76 | 2565 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with run config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# You can define you configurations by providing a RunConfig object to
# estimator to control session configurations, e.g. tf_random_seed.
run_config = tf.estimator.RunConfig().replace(tf_random_seed=1)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column(
X_FEATURE, shape=np.array(x_train).shape[1:])]
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3,
config=run_config)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=200)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class_ids'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
PMitura/smiles-neural-network | baselines/rdkit_comp.py | 1 | 2737 | #! /usr/bin/env python
import db
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys
from sets import Set
from rdkit import Chem
from rdkit.Chem import rdMolDescriptors
from rdkit.Chem import SDWriter
from sklearn.feature_selection import VarianceThreshold
data = db.getTarget_206_1977()
SDF_FILE_MAX_LINES = 1000
# read db and write .sdf files split to 1000 mols per file
'''
sdf_name_suffix = '.sdf'
sdf_name_prefix = 'sdf_206_1977'
for step in range(int((len(data)+SDF_FILE_MAX_LINES-1)/SDF_FILE_MAX_LINES)):
lo = step*SDF_FILE_MAX_LINES
hi = min((step+1)*SDF_FILE_MAX_LINES,len(data))
sdf_name = sdf_name_prefix + '_' + str(step) + sdf_name_suffix
writer = SDWriter(sdf_name)
for i in range(lo, hi):
smiles = data[i][0]
mol = Chem.MolFromSmiles(smiles)
writer.write(mol)
writer.close()
'''
# maccs sparse to dense representation
'''
df_data = {}
df_data['canonical_smiles'] = []
for i in range(166):
df_data['maccs'+str(i)] = []
rdkit_maccs_csv_files = ['rdkit_maccs_206_1977_0.csv', 'rdkit_maccs_206_1977_1.csv']
db_idx = 0
for fname in rdkit_maccs_csv_files:
file = open(fname)
lines = file.readlines()
for line in lines:
vals = Set([int(x) for x in line.split(',')])
df_data['canonical_smiles'].append(data[db_idx][0])
for i in range(166):
df_data['maccs'+str(i)].append(int(i in vals))
db_idx = db_idx+1
df = pd.DataFrame(df_data)
df.set_index('canonical_smiles', inplace=True)
print(df.describe())
df = df.loc[:,df.apply(pd.Series.nunique) != 1]
print(df.describe())
# print(df.to_csv('rdkit_maccs_206_1977_dense.csv'))
'''
# cdk klekota roth sparse to dense representation
'''
df_data = {}
df_data['canonical_smiles'] = []
for i in range(4680):
df_data['kr'+str(i)] = []
kr_csv_files = ['cdk_kr_206_1977_0.csv', 'cdk_kr_206_1977_1.csv']
db_idx = 0
for fname in kr_csv_files:
file = open(fname)
lines = file.readlines()
for line in lines:
# print(db_idx,len(data))
df_data['canonical_smiles'].append(data[db_idx][0])
line = line.rstrip()
if len(line) !=0:
vals = Set([int(x) for x in line.split(',')])
for i in range(4680):
df_data['kr'+str(i)].append(int(i in vals))
else:
for i in range(4680):
df_data['kr'+str(i)].append(0)
db_idx = db_idx+1
df = pd.DataFrame(df_data)
df.set_index('canonical_smiles', inplace=True)
print(df.describe())
df = df.loc[:,df.apply(pd.Series.nunique) != 1]
print(df.describe())
print(df.to_csv('cdk_kr_206_1977_dense_pruned.csv'))
'''
| bsd-3-clause |
plowman/python-mcparseface | models/syntaxnet/tensorflow/tensorflow/contrib/learn/python/learn/tests/test_grid_search.py | 7 | 1877 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
HAS_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if HAS_SKLEARN:
try:
from sklearn import datasets
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score, mean_squared_error
except ImportError:
HAS_SKLEARN = False
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
class GridSearchTest(tf.test.TestCase):
def testIrisDNN(self):
if HAS_SKLEARN:
random.seed(42)
iris = datasets.load_iris()
classifier = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3,
steps=50)
grid_search = GridSearchCV(classifier,
{'hidden_units': [[5, 5], [10, 10]],
'learning_rate': [0.1, 0.01]})
grid_search.fit(iris.data, iris.target)
score = accuracy_score(iris.target, grid_search.predict(iris.data))
self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
datapythonista/pandas | pandas/core/ops/missing.py | 3 | 5186 | """
Missing data handling for arithmetic operations.
In particular, pandas conventions regarding division by zero differ
from numpy in the following ways:
1) np.array([-1, 0, 1], dtype=dtype1) // np.array([0, 0, 0], dtype=dtype2)
gives [nan, nan, nan] for most dtype combinations, and [0, 0, 0] for
the remaining pairs
(the remaining being dtype1==dtype2==intN and dtype==dtype2==uintN).
pandas convention is to return [-inf, nan, inf] for all dtype
combinations.
Note: the numpy behavior described here is py3-specific.
2) np.array([-1, 0, 1], dtype=dtype1) % np.array([0, 0, 0], dtype=dtype2)
gives precisely the same results as the // operation.
pandas convention is to return [nan, nan, nan] for all dtype
combinations.
3) divmod behavior consistent with 1) and 2).
"""
import operator
import numpy as np
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer_dtype,
is_scalar,
)
from pandas.core.ops import roperator
def fill_zeros(result, x, y):
"""
If this is a reversed op, then flip x,y
If we have an integer value (or array in y)
and we have 0's, fill them with np.nan,
return the result.
Mask the nan's from x.
"""
if is_float_dtype(result.dtype):
return result
is_variable_type = hasattr(y, "dtype") or hasattr(y, "type")
is_scalar_type = is_scalar(y)
if not is_variable_type and not is_scalar_type:
return result
if is_scalar_type:
y = np.array(y)
if is_integer_dtype(y.dtype):
if (y == 0).any():
# GH#7325, mask and nans must be broadcastable (also: GH#9308)
# Raveling and then reshaping makes np.putmask faster
mask = ((y == 0) & ~np.isnan(result)).ravel()
shape = result.shape
result = result.astype("float64", copy=False).ravel()
np.putmask(result, mask, np.nan)
result = result.reshape(shape)
return result
def mask_zero_div_zero(x, y, result):
"""
Set results of 0 // 0 to np.nan, regardless of the dtypes
of the numerator or the denominator.
Parameters
----------
x : ndarray
y : ndarray
result : ndarray
Returns
-------
ndarray
The filled result.
Examples
--------
>>> x = np.array([1, 0, -1], dtype=np.int64)
>>> x
array([ 1, 0, -1])
>>> y = 0 # int 0; numpy behavior is different with float
>>> result = x // y
>>> result # raw numpy result does not fill division by zero
array([0, 0, 0])
>>> mask_zero_div_zero(x, y, result)
array([ inf, nan, -inf])
"""
if not isinstance(result, np.ndarray):
# FIXME: SparseArray would raise TypeError with np.putmask
return result
if is_scalar(y):
y = np.array(y)
zmask = y == 0
if isinstance(zmask, bool):
# FIXME: numpy did not evaluate pointwise, seen in docs build
return result
if zmask.any():
# Flip sign if necessary for -0.0
zneg_mask = zmask & np.signbit(y)
zpos_mask = zmask & ~zneg_mask
nan_mask = zmask & (x == 0)
with np.errstate(invalid="ignore"):
neginf_mask = (zpos_mask & (x < 0)) | (zneg_mask & (x > 0))
posinf_mask = (zpos_mask & (x > 0)) | (zneg_mask & (x < 0))
if nan_mask.any() or neginf_mask.any() or posinf_mask.any():
# Fill negative/0 with -inf, positive/0 with +inf, 0/0 with NaN
result = result.astype("float64", copy=False)
result[nan_mask] = np.nan
result[posinf_mask] = np.inf
result[neginf_mask] = -np.inf
return result
def dispatch_fill_zeros(op, left, right, result):
"""
Call fill_zeros with the appropriate fill value depending on the operation,
with special logic for divmod and rdivmod.
Parameters
----------
op : function (operator.add, operator.div, ...)
left : object (np.ndarray for non-reversed ops)
right : object (np.ndarray for reversed ops)
result : ndarray
Returns
-------
result : np.ndarray
Notes
-----
For divmod and rdivmod, the `result` parameter and returned `result`
is a 2-tuple of ndarray objects.
"""
if op is divmod:
result = (
mask_zero_div_zero(left, right, result[0]),
fill_zeros(result[1], left, right),
)
elif op is roperator.rdivmod:
result = (
mask_zero_div_zero(right, left, result[0]),
fill_zeros(result[1], right, left),
)
elif op is operator.floordiv:
# Note: no need to do this for truediv; in py3 numpy behaves the way
# we want.
result = mask_zero_div_zero(left, right, result)
elif op is roperator.rfloordiv:
# Note: no need to do this for rtruediv; in py3 numpy behaves the way
# we want.
result = mask_zero_div_zero(right, left, result)
elif op is operator.mod:
result = fill_zeros(result, left, right)
elif op is roperator.rmod:
result = fill_zeros(result, right, left)
return result
| bsd-3-clause |
hugobowne/scikit-learn | sklearn/decomposition/nmf.py | 8 | 47008 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck
# Mathieu Blondel <[email protected]>
# Tom Dupre la Tour
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (Projected gradient, Python and NumPy port)
# License: BSD 3 clause
from __future__ import division, print_function
from math import sqrt
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted, check_non_negative
from ..utils import deprecated
from ..exceptions import ConvergenceWarning
from .cdnmf_fast import _update_cdnmf_fast
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def _check_init(A, shape, whom):
A = check_array(A)
if np.shape(A) != shape:
raise ValueError('Array with wrong shape passed to %s. Expected %s, '
'but got %s ' % (whom, shape, np.shape(A)))
check_non_negative(A, whom)
if np.max(A) == 0:
raise ValueError('Array passed to %s is full of zeros.' % whom)
def _safe_compute_error(X, W, H):
"""Frobenius norm between X and WH, safe for sparse array"""
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(norm_X + norm_WH - 2. * cross_prod)
return error
def _check_string_param(sparseness, solver):
allowed_sparseness = (None, 'data', 'components')
if sparseness not in allowed_sparseness:
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, allowed_sparseness))
allowed_solver = ('pg', 'cd')
if solver not in allowed_solver:
raise ValueError(
'Invalid solver parameter: got %r instead of one of %r' %
(solver, allowed_solver))
def _initialize_nmf(X, n_components, init=None, eps=1e-6,
random_state=None):
"""Algorithms for NMF initialization.
Computes an initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix to be decomposed.
n_components : integer
The number of components desired in the approximation.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise 'random'.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
eps : float
Truncate all values less then this in output to zero.
random_state : int seed, RandomState instance, or None (default)
Random number generator seed control, used in 'nndsvdar' and
'random' modes.
Returns
-------
W : array-like, shape (n_samples, n_components)
Initial guesses for solving X ~= WH
H : array-like, shape (n_components, n_features)
Initial guesses for solving X ~= WH
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
n_samples, n_features = X.shape
if init is None:
if n_components < n_features:
init = 'nndsvd'
else:
init = 'random'
# Random initialization
if init == 'random':
avg = np.sqrt(X.mean() / n_components)
rng = check_random_state(random_state)
H = avg * rng.randn(n_components, n_features)
W = avg * rng.randn(n_samples, n_components)
# we do not write np.abs(H, out=H) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(H, H)
np.abs(W, W)
return W, H
# NNDSVD initialization
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if init == "nndsvd":
pass
elif init == "nndsvda":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif init == "nndsvdar":
rng = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * rng.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * rng.randn(len(H[H == 0])) / 100)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'random', 'nndsvd', 'nndsvda', 'nndsvdar')))
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, alpha=0., l1_ratio=0.,
sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the projected
gradient descent algorithm.
Parameters
----------
V : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Constant matrix.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
grad : array-like, shape (n_components, n_features)
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = fast_dot(W.T, W)
# values justified in the paper (alpha is renamed gamma)
gamma = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
if alpha > 0 and l1_ratio == 1.:
grad += alpha
elif alpha > 0:
grad += alpha * (l1_ratio + (1 - l1_ratio) * H)
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(20):
# Gradient step.
Hn = H - gamma * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_gamma = not suff_decr
if decr_gamma:
if suff_decr:
H = Hn
break
else:
gamma *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
gamma /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
def _update_projected_gradient_w(X, W, H, tolW, nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Helper function for _fit_projected_gradient"""
n_samples, n_features = X.shape
n_components_ = H.shape[0]
if sparseness is None:
Wt, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'data':
Wt, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(beta) * np.ones((1,
n_components_))]),
W.T, tolW, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'components':
Wt, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(eta) * np.eye(n_components_)]),
W.T, tolW, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
return Wt.T, gradW.T, iterW
def _update_projected_gradient_h(X, W, H, tolH, nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Helper function for _fit_projected_gradient"""
n_samples, n_features = X.shape
n_components_ = W.shape[1]
if sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((n_components_, n_features))]),
safe_vstack([W,
np.sqrt(eta) * np.eye(n_components_)]),
H, tolH, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(beta)
* np.ones((1, n_components_))]),
H, tolH, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
return H, gradH, iterH
def _fit_projected_gradient(X, W, H, tol, max_iter,
nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Compute Non-negative Matrix Factorization (NMF) with Projected Gradient
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with Sparseness Constraints.
Journal of Machine Learning Research 2004.
"""
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = squared_norm(gradW) + squared_norm(gradH.T)
# max(0.001, tol) to force alternating minimizations of W and H
tolW = max(0.001, tol) * np.sqrt(init_grad)
tolH = tolW
for n_iter in range(1, max_iter + 1):
# stopping condition
# as discussed in paper
proj_grad_W = squared_norm(gradW * np.logical_or(gradW < 0, W > 0))
proj_grad_H = squared_norm(gradH * np.logical_or(gradH < 0, H > 0))
if (proj_grad_W + proj_grad_H) / init_grad < tol ** 2:
break
# update W
W, gradW, iterW = _update_projected_gradient_w(X, W, H, tolW,
nls_max_iter,
alpha, l1_ratio,
sparseness, beta, eta)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = _update_projected_gradient_h(X, W, H, tolH,
nls_max_iter,
alpha, l1_ratio,
sparseness, beta, eta)
if iterH == 1:
tolH = 0.1 * tolH
H[H == 0] = 0 # fix up negative zeros
if n_iter == max_iter:
W, _, _ = _update_projected_gradient_w(X, W, H, tol, nls_max_iter,
alpha, l1_ratio, sparseness,
beta, eta)
return W, H, n_iter
def _update_coordinate_descent(X, W, Ht, l1_reg, l2_reg, shuffle,
random_state):
"""Helper function for _fit_coordinate_descent
Update W to minimize the objective function, iterating once over all
coordinates. By symmetry, to update H, one can call
_update_coordinate_descent(X.T, Ht, W, ...)
"""
n_components = Ht.shape[1]
HHt = fast_dot(Ht.T, Ht)
XHt = safe_sparse_dot(X, Ht)
# L2 regularization corresponds to increase of the diagonal of HHt
if l2_reg != 0.:
# adds l2_reg only on the diagonal
HHt.flat[::n_components + 1] += l2_reg
# L1 regularization corresponds to decrease of each element of XHt
if l1_reg != 0.:
XHt -= l1_reg
if shuffle:
permutation = random_state.permutation(n_components)
else:
permutation = np.arange(n_components)
# The following seems to be required on 64-bit Windows w/ Python 3.5.
permutation = np.asarray(permutation, dtype=np.intp)
return _update_cdnmf_fast(W, HHt, XHt, permutation)
def _fit_coordinate_descent(X, W, H, tol=1e-4, max_iter=200, alpha=0.001,
l1_ratio=0., regularization=None, update_H=True,
verbose=0, shuffle=False, random_state=None):
"""Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent
The objective function is minimized with an alternating minimization of W
and H. Each minimization is done with a cyclic (up to a permutation of the
features) Coordinate Descent.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Initial guess for the solution.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
# so W and Ht are both in C order in memory
Ht = check_array(H.T, order='C')
X = check_array(X, accept_sparse='csr')
# L1 and L2 regularization
l1_H, l2_H, l1_W, l2_W = 0, 0, 0, 0
if regularization in ('both', 'components'):
alpha = float(alpha)
l1_H = l1_ratio * alpha
l2_H = (1. - l1_ratio) * alpha
if regularization in ('both', 'transformation'):
alpha = float(alpha)
l1_W = l1_ratio * alpha
l2_W = (1. - l1_ratio) * alpha
rng = check_random_state(random_state)
for n_iter in range(max_iter):
violation = 0.
# Update W
violation += _update_coordinate_descent(X, W, Ht, l1_W, l2_W,
shuffle, rng)
# Update H
if update_H:
violation += _update_coordinate_descent(X.T, Ht, W, l1_H, l2_H,
shuffle, rng)
if n_iter == 0:
violation_init = violation
if violation_init == 0:
break
if verbose:
print("violation:", violation / violation_init)
if violation / violation_init <= tol:
if verbose:
print("Converged at iteration", n_iter + 1)
break
return W, Ht.T, n_iter
def non_negative_factorization(X, W=None, H=None, n_components=None,
init='random', update_H=True, solver='cd',
tol=1e-4, max_iter=200, alpha=0., l1_ratio=0.,
regularization=None, random_state=None,
verbose=0, shuffle=False, nls_max_iter=2000,
sparseness=None, beta=1, eta=0.1):
"""Compute Non-negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H. If H is given and update_H=False, it solves for W only.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
If update_H=False, it is used as a constant, to solve for W only.
n_components : integer
Number of components, if n_components is not set all features
are kept.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a (deprecated) Projected Gradient solver.
'cd' is a Coordinate Descent solver.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
Actual number of iterations.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
X = check_array(X, accept_sparse=('csr', 'csc'))
check_non_negative(X, "NMF (input X)")
_check_string_param(sparseness, solver)
n_samples, n_features = X.shape
if n_components is None:
n_components = n_features
if not isinstance(n_components, six.integer_types) or n_components <= 0:
raise ValueError("Number of components must be positive;"
" got (n_components=%r)" % n_components)
if not isinstance(max_iter, numbers.Number) or max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % max_iter)
if not isinstance(tol, numbers.Number) or tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % tol)
# check W and H, or initialize them
if init == 'custom':
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(X, n_components, init=init,
random_state=random_state)
if solver == 'pg':
warnings.warn("'pg' solver will be removed in release 0.19."
" Use 'cd' solver instead.", DeprecationWarning)
if update_H: # fit_transform
W, H, n_iter = _fit_projected_gradient(X, W, H, tol,
max_iter,
nls_max_iter,
alpha, l1_ratio,
sparseness,
beta, eta)
else: # transform
W, H, n_iter = _update_projected_gradient_w(X, W, H,
tol, nls_max_iter,
alpha, l1_ratio,
sparseness, beta,
eta)
elif solver == 'cd':
W, H, n_iter = _fit_coordinate_descent(X, W, H, tol,
max_iter,
alpha, l1_ratio,
regularization,
update_H=update_H,
verbose=verbose,
shuffle=shuffle,
random_state=random_state)
else:
raise ValueError("Invalid solver parameter '%s'." % solver)
if n_iter == max_iter:
warnings.warn("Maximum number of iteration %d reached. Increase it to"
" improve convergence." % max_iter, ConvergenceWarning)
return W, H, n_iter
class NMF(BaseEstimator, TransformerMixin):
"""Non-Negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H.
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all features
are kept.
init : 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a Projected Gradient solver (deprecated).
'cd' is a Coordinate Descent solver (recommended).
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : integer, default: 200
Number of iterations to compute.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
.. versionadded:: 0.17
*alpha* used in the Coordinate Descent solver.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
.. versionadded:: 0.17
Regularization parameter *l1_ratio* used in the Coordinate Descent solver.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
.. versionadded:: 0.17
*shuffle* parameter used in the Coordinate Descent solver.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import NMF
>>> model = NMF(n_components=2, init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
NMF(alpha=0.0, beta=1, eta=0.1, init='random', l1_ratio=0.0, max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, shuffle=False,
solver='cd', sparseness=None, tol=0.0001, verbose=0)
>>> model.components_
array([[ 2.09783018, 0.30560234],
[ 2.13443044, 2.13171694]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00115993...
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
def __init__(self, n_components=None, init=None, solver='cd',
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., verbose=0, shuffle=False,
nls_max_iter=2000, sparseness=None, beta=1, eta=0.1):
self.n_components = n_components
self.init = init
self.solver = solver
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.alpha = alpha
self.l1_ratio = l1_ratio
self.verbose = verbose
self.shuffle = shuffle
if sparseness is not None:
warnings.warn("Controlling regularization through the sparseness,"
" beta and eta arguments is only available"
" for 'pg' solver, which will be removed"
" in release 0.19. Use another solver with L1 or L2"
" regularization instead.", DeprecationWarning)
self.nls_max_iter = nls_max_iter
self.sparseness = sparseness
self.beta = beta
self.eta = eta
def fit_transform(self, X, y=None, W=None, H=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
Attributes
----------
components_ : array-like, shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
W: array, shape (n_samples, n_components)
Transformed data.
"""
X = check_array(X, accept_sparse=('csr', 'csc'))
W, H, n_iter_ = non_negative_factorization(
X=X, W=W, H=H, n_components=self.n_components,
init=self.init, update_H=True, solver=self.solver,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle,
nls_max_iter=self.nls_max_iter, sparseness=self.sparseness,
beta=self.beta, eta=self.eta)
if self.solver == 'pg':
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
self.reconstruction_err_ = _safe_compute_error(X, W, H)
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter_
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
Attributes
----------
components_ : array-like, shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be transformed by the model
Attributes
----------
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
W: array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'n_components_')
W, _, n_iter_ = non_negative_factorization(
X=X, W=None, H=self.components_, n_components=self.n_components_,
init=self.init, update_H=False, solver=self.solver,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle,
nls_max_iter=self.nls_max_iter, sparseness=self.sparseness,
beta=self.beta, eta=self.eta)
self.n_iter_ = n_iter_
return W
def inverse_transform(self, W):
"""
Parameters
----------
W: {array-like, sparse matrix}, shape (n_samples, n_components)
Transformed Data matrix
Returns
-------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix of original shape
.. versionadded:: 0.18
"""
check_is_fitted(self, 'n_components_')
return np.dot(W, self.components_)
@deprecated("It will be removed in release 0.19. Use NMF instead."
"'pg' solver is still available until release 0.19.")
class ProjectedGradientNMF(NMF):
"""Non-Negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H.
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all features
are kept.
init : 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a Projected Gradient solver (deprecated).
'cd' is a Coordinate Descent solver (recommended).
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : integer, default: 200
Number of iterations to compute.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
.. versionadded:: 0.17
*alpha* used in the Coordinate Descent solver.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
.. versionadded:: 0.17
Regularization parameter *l1_ratio* used in the Coordinate Descent solver.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
.. versionadded:: 0.17
*shuffle* parameter used in the Coordinate Descent solver.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import NMF
>>> model = NMF(n_components=2, init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
NMF(alpha=0.0, beta=1, eta=0.1, init='random', l1_ratio=0.0, max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, shuffle=False,
solver='cd', sparseness=None, tol=0.0001, verbose=0)
>>> model.components_
array([[ 2.09783018, 0.30560234],
[ 2.13443044, 2.13171694]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00115993...
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
def __init__(self, n_components=None, solver='pg', init=None,
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., verbose=0,
nls_max_iter=2000, sparseness=None, beta=1, eta=0.1):
super(ProjectedGradientNMF, self).__init__(
n_components=n_components, init=init, solver='pg', tol=tol,
max_iter=max_iter, random_state=random_state, alpha=alpha,
l1_ratio=l1_ratio, verbose=verbose, nls_max_iter=nls_max_iter,
sparseness=sparseness, beta=beta, eta=eta)
| bsd-3-clause |
ZENGXH/scikit-learn | examples/linear_model/plot_lasso_lars.py | 363 | 1080 | #!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
ruleant/buildtime-trend | generate_trend.py | 1 | 2619 | #!/usr/bin/env python
# vim: set expandtab sw=4 ts=4:
'''
Generates a trend (graph) from the buildtimes in buildtimes.xml
Usage : generate_trend.py -h --mode=native,keen
Copyright (C) 2014 Dieter Adriaenssens <[email protected]>
This file is part of buildtime-trend
<https://github.com/ruleant/buildtime-trend/>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import sys
from buildtimetrend.tools import get_logger
from buildtimetrend.travis import load_travis_env_vars
from buildtimetrend.settings import Settings
from buildtimetrend.settings import process_argv
def generate_trend(argv):
'''
Generate trends from analised buildtime data
'''
settings = Settings()
# load Travis environment variables and save them in settings
load_travis_env_vars()
# process command line arguments
process_argv(argv)
# run trend_keen() always,
# if $KEEN_PROJECT_ID variable is set (checked later), it will be executed
if settings.get_setting("mode_native") is True:
trend_native()
if settings.get_setting("mode_keen") is True:
trend_keen()
def trend_native():
'''
Generate native trend with matplotlib : chart in PNG format
'''
from buildtimetrend.trend import Trend
# use parameter for timestamps file and check if file exists
result_file = os.getenv('BUILD_TREND_OUTPUTFILE', 'trends/buildtimes.xml')
chart_file = os.getenv('BUILD_TREND_TRENDFILE', 'trends/trend.png')
trend = Trend()
if trend.gather_data(result_file):
logger = get_logger()
# log number of builds and list of buildnames
logger.info('Builds (%d) : %s', len(trend.builds), trend.builds)
logger.info('Stages (%d) : %s', len(trend.stages), trend.stages)
trend.generate(chart_file)
def trend_keen():
'''
Setup trends using Keen.io API
'''
from buildtimetrend.keenio import generate_overview_config_file
generate_overview_config_file(Settings().get_project_name())
if __name__ == "__main__":
generate_trend(sys.argv)
| gpl-3.0 |
martydill/url_shortener | code/venv/lib/python2.7/site-packages/IPython/core/pylabtools.py | 4 | 12815 | # -*- coding: utf-8 -*-
"""Pylab (matplotlib) support utilities."""
from __future__ import print_function
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from io import BytesIO
from IPython.core.display import _pngxy
from IPython.utils.decorators import flag_calls
from IPython.utils import py3compat
# If user specifies a GUI, that dictates the backend, otherwise we read the
# user's mpl default from the mpl rc structure
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'gtk3': 'GTK3Agg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'qt5': 'Qt5Agg',
'osx': 'MacOSX',
'nbagg': 'nbAgg',
'notebook': 'nbAgg',
'inline' : 'module://IPython.kernel.zmq.pylab.backend_inline'}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
# Our tests expect backend2gui to just return 'qt'
backend2gui['Qt4Agg'] = 'qt'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['GTK3Cairo'] = 'gtk3'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
#-----------------------------------------------------------------------------
# Matplotlib utilities
#-----------------------------------------------------------------------------
def getfigs(*fig_nums):
"""Get a list of matplotlib figures by figure numbers.
If no arguments are given, all available figures are returned. If the
argument list contains references to invalid figures, a warning is printed
but the function continues pasting further figures.
Parameters
----------
figs : tuple
A tuple of ints giving the figure numbers of the figures to return.
"""
from matplotlib._pylab_helpers import Gcf
if not fig_nums:
fig_managers = Gcf.get_all_fig_managers()
return [fm.canvas.figure for fm in fig_managers]
else:
figs = []
for num in fig_nums:
f = Gcf.figs.get(num)
if f is None:
print('Warning: figure %s not available.' % num)
else:
figs.append(f.canvas.figure)
return figs
def figsize(sizex, sizey):
"""Set the default figure size to be [sizex, sizey].
This is just an easy to remember, convenience wrapper that sets::
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
"""
import matplotlib
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
def print_figure(fig, fmt='png', bbox_inches='tight', **kwargs):
"""Print a figure to an image, and return the resulting file data
Returned data will be bytes unless ``fmt='svg'``,
in which case it will be unicode.
Any keyword args are passed to fig.canvas.print_figure,
such as ``quality`` or ``bbox_inches``.
"""
from matplotlib import rcParams
# When there's an empty figure, we shouldn't return anything, otherwise we
# get big blank areas in the qt console.
if not fig.axes and not fig.lines:
return
dpi = rcParams['savefig.dpi']
if fmt == 'retina':
dpi = dpi * 2
fmt = 'png'
# build keyword args
kw = dict(
format=fmt,
facecolor=fig.get_facecolor(),
edgecolor=fig.get_edgecolor(),
dpi=dpi,
bbox_inches=bbox_inches,
)
# **kwargs get higher priority
kw.update(kwargs)
bytes_io = BytesIO()
fig.canvas.print_figure(bytes_io, **kw)
data = bytes_io.getvalue()
if fmt == 'svg':
data = data.decode('utf-8')
return data
def retina_figure(fig, **kwargs):
"""format a figure as a pixel-doubled (retina) PNG"""
pngdata = print_figure(fig, fmt='retina', **kwargs)
w, h = _pngxy(pngdata)
metadata = dict(width=w//2, height=h//2)
return pngdata, metadata
# We need a little factory function here to create the closure where
# safe_execfile can live.
def mpl_runner(safe_execfile):
"""Factory to return a matplotlib-enabled runner for %run.
Parameters
----------
safe_execfile : function
This must be a function with the same interface as the
:meth:`safe_execfile` method of IPython.
Returns
-------
A function suitable for use as the ``runner`` argument of the %run magic
function.
"""
def mpl_execfile(fname,*where,**kw):
"""matplotlib-aware wrapper around safe_execfile.
Its interface is identical to that of the :func:`execfile` builtin.
This is ultimately a call to execfile(), but wrapped in safeties to
properly handle interactive rendering."""
import matplotlib
import matplotlib.pylab as pylab
#print '*** Matplotlib runner ***' # dbg
# turn off rendering until end of script
is_interactive = matplotlib.rcParams['interactive']
matplotlib.interactive(False)
safe_execfile(fname,*where,**kw)
matplotlib.interactive(is_interactive)
# make rendering call now, if the user tried to do it
if pylab.draw_if_interactive.called:
pylab.draw()
pylab.draw_if_interactive.called = False
return mpl_execfile
def select_figure_formats(shell, formats, **kwargs):
"""Select figure formats for the inline backend.
Parameters
==========
shell : InteractiveShell
The main IPython instance.
formats : str or set
One or a set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
**kwargs : any
Extra keyword arguments to be passed to fig.canvas.print_figure.
"""
from matplotlib.figure import Figure
from IPython.kernel.zmq.pylab import backend_inline
svg_formatter = shell.display_formatter.formatters['image/svg+xml']
png_formatter = shell.display_formatter.formatters['image/png']
jpg_formatter = shell.display_formatter.formatters['image/jpeg']
pdf_formatter = shell.display_formatter.formatters['application/pdf']
if isinstance(formats, py3compat.string_types):
formats = {formats}
# cast in case of list / tuple
formats = set(formats)
[ f.pop(Figure, None) for f in shell.display_formatter.formatters.values() ]
supported = {'png', 'png2x', 'retina', 'jpg', 'jpeg', 'svg', 'pdf'}
bad = formats.difference(supported)
if bad:
bs = "%s" % ','.join([repr(f) for f in bad])
gs = "%s" % ','.join([repr(f) for f in supported])
raise ValueError("supported formats are: %s not %s" % (gs, bs))
if 'png' in formats:
png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
if 'retina' in formats or 'png2x' in formats:
png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
if 'jpg' in formats or 'jpeg' in formats:
jpg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'jpg', **kwargs))
if 'svg' in formats:
svg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'svg', **kwargs))
if 'pdf' in formats:
pdf_formatter.for_type(Figure, lambda fig: print_figure(fig, 'pdf', **kwargs))
#-----------------------------------------------------------------------------
# Code for initializing matplotlib and importing pylab
#-----------------------------------------------------------------------------
def find_gui_and_backend(gui=None, gui_select=None):
"""Given a gui string return the gui and mpl backend.
Parameters
----------
gui : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
gui_select : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
This is any gui already selected by the shell.
Returns
-------
A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
'WXAgg','Qt4Agg','module://IPython.kernel.zmq.pylab.backend_inline').
"""
import matplotlib
if gui and gui != 'auto':
# select backend based on requested gui
backend = backends[gui]
else:
# We need to read the backend from the original data structure, *not*
# from mpl.rcParams, since a prior invocation of %matplotlib may have
# overwritten that.
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParamsOrig['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
# If we have already had a gui active, we need it and inline are the
# ones allowed.
if gui_select and gui != gui_select:
gui = gui_select
backend = backends[gui]
return gui, backend
def activate_matplotlib(backend):
"""Activate the given backend and set interactive to True."""
import matplotlib
matplotlib.interactive(True)
# Matplotlib had a bug where even switch_backend could not force
# the rcParam to update. This needs to be set *before* the module
# magic of switch_backend().
matplotlib.rcParams['backend'] = backend
import matplotlib.pyplot
matplotlib.pyplot.switch_backend(backend)
# This must be imported last in the matplotlib series, after
# backend/interactivity choices have been made
import matplotlib.pylab as pylab
pylab.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pylab.draw_if_interactive = flag_calls(pylab.draw_if_interactive)
def import_pylab(user_ns, import_all=True):
"""Populate the namespace with pylab-related values.
Imports matplotlib, pylab, numpy, and everything from pylab and numpy.
Also imports a few names from IPython (figsize, display, getfigs)
"""
# Import numpy as np/pyplot as plt are conventions we're trying to
# somewhat standardize on. Making them available to users by default
# will greatly help this.
s = ("import numpy\n"
"import matplotlib\n"
"from matplotlib import pylab, mlab, pyplot\n"
"np = numpy\n"
"plt = pyplot\n"
)
exec(s, user_ns)
if import_all:
s = ("from matplotlib.pylab import *\n"
"from numpy import *\n")
exec(s, user_ns)
# IPython symbols to add
user_ns['figsize'] = figsize
from IPython.core.display import display
# Add display and getfigs to the user's namespace
user_ns['display'] = display
user_ns['getfigs'] = getfigs
def configure_inline_support(shell, backend):
"""Configure an IPython shell object for matplotlib use.
Parameters
----------
shell : InteractiveShell instance
backend : matplotlib backend
"""
# If using our svg payload backend, register the post-execution
# function that will pick up the results for display. This can only be
# done with access to the real shell object.
# Note: if we can't load the inline backend, then there's no point
# continuing (such as in terminal-only shells in environments without
# zeromq available).
try:
from IPython.kernel.zmq.pylab.backend_inline import InlineBackend
except ImportError:
return
from matplotlib import pyplot
cfg = InlineBackend.instance(parent=shell)
cfg.shell = shell
if cfg not in shell.configurables:
shell.configurables.append(cfg)
if backend == backends['inline']:
from IPython.kernel.zmq.pylab.backend_inline import flush_figures
shell.events.register('post_execute', flush_figures)
# Save rcParams that will be overwrittern
shell._saved_rcParams = dict()
for k in cfg.rc:
shell._saved_rcParams[k] = pyplot.rcParams[k]
# load inline_rc
pyplot.rcParams.update(cfg.rc)
else:
from IPython.kernel.zmq.pylab.backend_inline import flush_figures
try:
shell.events.unregister('post_execute', flush_figures)
except ValueError:
pass
if hasattr(shell, '_saved_rcParams'):
pyplot.rcParams.update(shell._saved_rcParams)
del shell._saved_rcParams
# Setup the default figure format
select_figure_formats(shell, cfg.figure_formats, **cfg.print_figure_kwargs)
| mit |
larsmans/scikit-learn | examples/plot_kernel_approximation.py | 262 | 8004 | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
| bsd-3-clause |
Eigenstate/msmbuilder | msmbuilder/project_templates/analysis/gather-metadata-plot.py | 9 | 1969 | """Plot metadata info
{{header}}
"""
# ? include "plot_header.template"
# ? from "plot_macros.template" import xdg_open with context
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from msmbuilder.io import load_meta, render_meta
sns.set_style('ticks')
colors = sns.color_palette()
## Load
meta = load_meta()
## Histogram of trajectory lengths
def plot_lengths(ax):
lengths_ns = meta['nframes'] * (meta['step_ps'] / 1000)
ax.hist(lengths_ns)
ax.set_xlabel("Lenths / ns", fontsize=16)
ax.set_ylabel("Count", fontsize=16)
total_label = ("Total length: {us:.2f}"
.format(us=np.sum(lengths_ns) / 1000))
total_label += r" / $\mathrm{\mu s}$"
ax.annotate(total_label,
xy=(0.05, 0.95),
xycoords='axes fraction',
fontsize=18,
va='top',
)
## Pie graph
def plot_pie(ax):
lengths_ns = meta['nframes'] * (meta['step_ps'] / 1000)
sampling = lengths_ns.groupby(level=0).sum()
ax.pie(sampling,
shadow=True,
labels=sampling.index,
colors=sns.color_palette(),
)
ax.axis('equal')
## Box plot
def plot_boxplot(ax):
meta2 = meta.copy()
meta2['ns'] = meta['nframes'] * (meta['step_ps'] / 1000)
sns.boxplot(
x=meta2.index.names[0],
y='ns',
data=meta2.reset_index(),
ax=ax,
)
## Plot hist
fig, ax = plt.subplots(figsize=(7, 5))
plot_lengths(ax)
fig.tight_layout()
fig.savefig("lengths-hist.pdf")
# {{xdg_open('lengths-hist.pdf')}}
## Plot pie
fig, ax = plt.subplots(figsize=(7, 5))
plot_pie(ax)
fig.tight_layout()
fig.savefig("lengths-pie.pdf")
# {{xdg_open('lengths-pie.pdf')}}
## Plot box
fig, ax = plt.subplots(figsize=(7, 5))
plot_boxplot(ax)
fig.tight_layout()
fig.savefig("lengths-boxplot.pdf")
# {{xdg_open('lengths-boxplot.pdf')}}
## Save metadata as html table
render_meta(meta, 'meta.pandas.html')
| lgpl-2.1 |
tunneff/tef | deps/boost_1_63_0/libs/metaparse/tools/benchmark/benchmark.py | 5 | 9313 | #!/usr/bin/python
"""Utility to benchmark the generated source files"""
# Copyright Abel Sinkovics ([email protected]) 2016.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import argparse
import os
import subprocess
import json
import math
import platform
import matplotlib
import random
import re
import time
import psutil
import PIL
matplotlib.use('Agg')
import matplotlib.pyplot # pylint:disable=I0011,C0411,C0412,C0413
def benchmark_command(cmd, progress):
"""Benchmark one command execution"""
full_cmd = '/usr/bin/time --format="%U %M" {0}'.format(cmd)
print '{0:6.2f}% Running {1}'.format(100.0 * progress, full_cmd)
(_, err) = subprocess.Popen(
['/bin/bash', '-c', full_cmd],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate('')
values = err.strip().split(' ')
if len(values) == 2:
try:
return (float(values[0]), float(values[1]))
except: # pylint:disable=I0011,W0702
pass # Handled by the code after the "if"
print err
raise Exception('Error during benchmarking')
def benchmark_file(
filename, compiler, include_dirs, (progress_from, progress_to),
iter_count):
"""Benchmark one file"""
time_sum = 0
mem_sum = 0
for nth_run in xrange(0, iter_count):
(time_spent, mem_used) = benchmark_command(
'{0} -std=c++11 {1} -c {2}'.format(
compiler,
' '.join('-I{0}'.format(i) for i in include_dirs),
filename
),
(
progress_to * nth_run + progress_from * (iter_count - nth_run)
) / iter_count
)
os.remove(os.path.splitext(os.path.basename(filename))[0] + '.o')
time_sum = time_sum + time_spent
mem_sum = mem_sum + mem_used
return {
"time": time_sum / iter_count,
"memory": mem_sum / (iter_count * 1024)
}
def compiler_info(compiler):
"""Determine the name + version of the compiler"""
(out, err) = subprocess.Popen(
['/bin/bash', '-c', '{0} -v'.format(compiler)],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate('')
gcc_clang = re.compile('(gcc|clang) version ([0-9]+(\\.[0-9]+)*)')
for line in (out + err).split('\n'):
mtch = gcc_clang.search(line)
if mtch:
return mtch.group(1) + ' ' + mtch.group(2)
return compiler
def string_char(char):
"""Turn the character into one that can be part of a filename"""
return '_' if char in [' ', '~', '(', ')', '/', '\\'] else char
def make_filename(string):
"""Turn the string into a filename"""
return ''.join(string_char(c) for c in string)
def files_in_dir(path, extension):
"""Enumartes the files in path with the given extension"""
ends = '.{0}'.format(extension)
return (f for f in os.listdir(path) if f.endswith(ends))
def format_time(seconds):
"""Format a duration"""
minute = 60
hour = minute * 60
day = hour * 24
week = day * 7
result = []
for name, dur in [
('week', week), ('day', day), ('hour', hour),
('minute', minute), ('second', 1)
]:
if seconds > dur:
value = seconds // dur
result.append(
'{0} {1}{2}'.format(int(value), name, 's' if value > 1 else '')
)
seconds = seconds % dur
return ' '.join(result)
def benchmark(src_dir, compiler, include_dirs, iter_count):
"""Do the benchrmarking"""
files = list(files_in_dir(src_dir, 'cpp'))
random.shuffle(files)
started_at = time.time()
result = {}
for filename in files:
progress = len(result)
result[filename] = benchmark_file(
os.path.join(src_dir, filename),
compiler,
include_dirs,
(float(progress) / len(files), float(progress + 1) / len(files)),
iter_count
)
elapsed = time.time() - started_at
total = float(len(files) * elapsed) / len(result)
print 'Elapsed time: {0}, Remaining time: {1}'.format(
format_time(elapsed),
format_time(total - elapsed)
)
return result
def plot(values, mode_names, title, (xlabel, ylabel), out_file):
"""Plot a diagram"""
matplotlib.pyplot.clf()
for mode, mode_name in mode_names.iteritems():
vals = values[mode]
matplotlib.pyplot.plot(
[x for x, _ in vals],
[y for _, y in vals],
label=mode_name
)
matplotlib.pyplot.title(title)
matplotlib.pyplot.xlabel(xlabel)
matplotlib.pyplot.ylabel(ylabel)
if len(mode_names) > 1:
matplotlib.pyplot.legend()
matplotlib.pyplot.savefig(out_file)
def mkdir_p(path):
"""mkdir -p path"""
try:
os.makedirs(path)
except OSError:
pass
def configs_in(src_dir):
"""Enumerate all configs in src_dir"""
for filename in files_in_dir(src_dir, 'json'):
with open(os.path.join(src_dir, filename), 'rb') as in_f:
yield json.load(in_f)
def byte_to_gb(byte):
"""Convert bytes to GB"""
return byte / (1024.0 * 1024 * 1024)
def join_images(img_files, out_file):
"""Join the list of images into the out file"""
images = [PIL.Image.open(f) for f in img_files]
joined = PIL.Image.new(
'RGB',
(sum(i.size[0] for i in images), max(i.size[1] for i in images))
)
left = 0
for img in images:
joined.paste(im=img, box=(left, 0))
left = left + img.size[0]
joined.save(out_file)
def plot_temp_diagrams(config, results, temp_dir):
"""Plot temporary diagrams"""
display_name = {
'time': 'Compilation time (s)',
'memory': 'Compiler memory usage (MB)',
}
files = config['files']
img_files = []
for measured in ['time', 'memory']:
mpts = sorted(int(k) for k in files.keys())
img_files.append(os.path.join(temp_dir, '_{0}.png'.format(measured)))
plot(
{
m: [(x, results[files[str(x)][m]][measured]) for x in mpts]
for m in config['modes'].keys()
},
config['modes'],
display_name[measured],
(config['x_axis_label'], display_name[measured]),
img_files[-1]
)
return img_files
def plot_diagram(config, results, images_dir, out_filename):
"""Plot one diagram"""
img_files = plot_temp_diagrams(config, results, images_dir)
join_images(img_files, out_filename)
for img_file in img_files:
os.remove(img_file)
def plot_diagrams(results, configs, compiler, out_dir):
"""Plot all diagrams specified by the configs"""
compiler_fn = make_filename(compiler)
total = psutil.virtual_memory().total # pylint:disable=I0011,E1101
memory = int(math.ceil(byte_to_gb(total)))
images_dir = os.path.join(out_dir, 'images')
for config in configs:
out_prefix = '{0}_{1}'.format(config['name'], compiler_fn)
plot_diagram(
config,
results,
images_dir,
os.path.join(images_dir, '{0}.png'.format(out_prefix))
)
with open(
os.path.join(out_dir, '{0}.qbk'.format(out_prefix)),
'wb'
) as out_f:
qbk_content = """{0}
Measured on a {2} host with {3} GB memory. Compiler used: {4}.
[$images/metaparse/{1}.png [width 100%]]
""".format(config['desc'], out_prefix, platform.platform(), memory, compiler)
out_f.write(qbk_content)
def main():
"""The main function of the script"""
desc = 'Benchmark the files generated by generate.py'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'--src',
dest='src_dir',
default='generated',
help='The directory containing the sources to benchmark'
)
parser.add_argument(
'--out',
dest='out_dir',
default='../../doc',
help='The output directory'
)
parser.add_argument(
'--include',
dest='include',
default='include',
help='The directory containing the headeres for the benchmark'
)
parser.add_argument(
'--boost_headers',
dest='boost_headers',
default='../../../..',
help='The directory containing the Boost headers (the boost directory)'
)
parser.add_argument(
'--compiler',
dest='compiler',
default='g++',
help='The compiler to do the benchmark with'
)
parser.add_argument(
'--repeat_count',
dest='repeat_count',
type=int,
default=5,
help='How many times a measurement should be repeated.'
)
args = parser.parse_args()
compiler = compiler_info(args.compiler)
results = benchmark(
args.src_dir,
args.compiler,
[args.include, args.boost_headers],
args.repeat_count
)
plot_diagrams(results, configs_in(args.src_dir), compiler, args.out_dir)
if __name__ == '__main__':
main()
| lgpl-3.0 |
Clyde-fare/scikit-learn | sklearn/preprocessing/tests/test_data.py | 71 | 38516 | import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.utils.validation import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
@ignore_warnings
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
@ignore_warnings
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
"""Check MaxAbsScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# sparse data
X_csr = sparse.csr_matrix(X)
X_trans = scaler.fit_transform(X_csr)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans.A, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv.A)
def test_maxabs_scaler_large_negative_value():
"""Check MaxAbsScaler on toy data with a large negative value"""
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
| bsd-3-clause |
liangz0707/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 286 | 4353 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
mengyun1993/RNN-binary | rnn12-new.py | 1 | 30238 | """ Vanilla RNN
@author Graham Taylor
"""
import numpy as np
import theano
import theano.tensor as T
from sklearn.base import BaseEstimator
import logging
import time
import os
import datetime
import pickle as pickle
import math
import matplotlib.pyplot as plt
plt.ion()
mode = theano.Mode(linker='cvm')
#mode = 'DEBUG_MODE'
class RNN(object):
""" Recurrent neural network class
Supported output types:
real : linear output units, use mean-squared error
binary : binary output units, use cross-entropy error
softmax : single softmax out, use cross-entropy error
"""
def __init__(self, input, n_in, n_hidden, n_out, activation=T.tanh,
output_type='real', use_symbolic_softmax=False):
self.input = input
self.activation = activation
self.output_type = output_type
# when using HF, SoftmaxGrad.grad is not implemented
# use a symbolic softmax which is slightly slower than T.nnet.softmax
# See: http://groups.google.com/group/theano-dev/browse_thread/
# thread/3930bd5a6a67d27a
if use_symbolic_softmax:
def symbolic_softmax(x):
e = T.exp(x)
return e / T.sum(e, axis=1).dimshuffle(0, 'x')
self.softmax = symbolic_softmax
else:
self.softmax = T.nnet.softmax
# recurrent weights as a shared variable
W_init = np.asarray(np.random.uniform(size=(n_hidden, n_hidden),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W = theano.shared(value=W_init, name='W')
# input to hidden layer weights
W_in_init = np.asarray(np.random.uniform(size=(n_in, n_hidden),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W_in = theano.shared(value=W_in_init, name='W_in')
# hidden to output layer weights
W_out_init = np.asarray(np.random.uniform(size=(n_hidden, n_out),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W_out = theano.shared(value=W_out_init, name='W_out')
h0_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
self.h0 = theano.shared(value=h0_init, name='h0')
bh_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
self.bh = theano.shared(value=bh_init, name='bh')
by_init = np.zeros((n_out,), dtype=theano.config.floatX)
self.by = theano.shared(value=by_init, name='by')
self.params = [self.W, self.W_in, self.W_out, self.h0,
self.bh, self.by]
# for every parameter, we maintain it's last update
# the idea here is to use "momentum"
# keep moving mostly in the same direction
self.updates = {}
for param in self.params:
init = np.zeros(param.get_value(borrow=True).shape,
dtype=theano.config.floatX)
self.updates[param] = theano.shared(init)
# recurrent function (using tanh activation function) and linear output
# activation function
def step(x_t, h_tm1):
h_t = self.activation(T.dot(x_t, self.W_in) + \
T.dot(h_tm1, self.W) + self.bh)
y_t = T.dot(h_t, self.W_out) + self.by
return h_t, y_t
# the hidden state `h` for the entire sequence, and the output for the
# entire sequence `y` (first dimension is always time)
[self.h, self.y_pred], _ = theano.scan(step,
sequences=self.input,
outputs_info=[self.h0, None])
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = 0
self.L1 += abs(self.W.sum())
self.L1 += abs(self.W_in.sum())
self.L1 += abs(self.W_out.sum())
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = 0
self.L2_sqr += (self.W ** 2).sum()
self.L2_sqr += (self.W_in ** 2).sum()
self.L2_sqr += (self.W_out ** 2).sum()
if self.output_type == 'real':
self.loss = lambda y: self.mse(y)
elif self.output_type == 'binary':
# push through sigmoid
self.p_y_given_x = T.nnet.sigmoid(self.y_pred) # apply sigmoid
self.y_out = T.round(self.p_y_given_x) # round to {0,1}
self.loss = lambda y: self.nll_binary(y)
elif self.output_type == 'softmax':
# push through softmax, computing vector of class-membership
# probabilities in symbolic form
self.p_y_given_x = self.softmax(self.y_pred)
# compute prediction as class whose probability is maximal
self.y_out = T.argmax(self.p_y_given_x, axis=-1)
self.loss = lambda y: self.nll_multiclass(y)
else:
raise NotImplementedError
def mse(self, y):
# error between output and target
return T.mean((self.y_pred - y) ** 2)
def nll_binary(self, y):
# negative log likelihood based on binary cross entropy error
return T.mean(T.nnet.binary_crossentropy(self.p_y_given_x, y))
def nll_multiclass(self, y):
# negative log likelihood based on multiclass cross entropy error
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of time steps (call it T) in the sequence
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def errors(self, y):
"""Return a float representing the number of errors in the sequence
over the total number of examples in the sequence ; zero one
loss over the size of the sequence
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_out.ndim:
raise TypeError('y should have the same shape as self.y_out',
('y', y.type, 'y_out', self.y_out.type))
if self.output_type in ('binary', 'softmax'):
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_out, y))
else:
raise NotImplementedError()
class MetaRNN(BaseEstimator):
def __init__(self, n_in=5, n_hidden=50, n_out=5, learning_rate=0.01,
n_epochs=100, L1_reg=0.00, L2_reg=0.00005, learning_rate_decay=1,
activation='tanh', output_type='real',
final_momentum=0.9, initial_momentum=0.5,
momentum_switchover=5,
use_symbolic_softmax=False):
self.n_in = int(n_in)
self.n_hidden = int(n_hidden)
self.n_out = int(n_out)
self.learning_rate = float(learning_rate)
self.learning_rate_decay = float(learning_rate_decay)
self.n_epochs = int(n_epochs)
self.L1_reg = float(L1_reg)
self.L2_reg = float(L2_reg)
self.activation = activation
self.output_type = output_type
self.initial_momentum = float(initial_momentum)
self.final_momentum = float(final_momentum)
self.momentum_switchover = int(momentum_switchover)
self.use_symbolic_softmax = use_symbolic_softmax
self.ready()
def ready(self):
# input (where first dimension is time)
self.x = T.matrix()
# target (where first dimension is time)
if self.output_type == 'real':
self.y = T.matrix(name='y', dtype=theano.config.floatX)
elif self.output_type == 'binary':
self.y = T.matrix(name='y', dtype='int32')
elif self.output_type == 'softmax': # only vector labels supported
self.y = T.vector(name='y', dtype='int32')
else:
raise NotImplementedError
# initial hidden state of the RNN
self.h0 = T.vector()
# learning rate
self.lr = T.scalar()
if self.activation == 'tanh':
activation = T.tanh
elif self.activation == 'sigmoid':
activation = T.nnet.sigmoid
elif self.activation == 'relu':
activation = lambda x: x * (x > 0)
elif self.activation == 'cappedrelu':
activation = lambda x: T.minimum(x * (x > 0), 6)
else:
raise NotImplementedError
self.rnn = RNN(input=self.x, n_in=self.n_in,
n_hidden=self.n_hidden, n_out=self.n_out,
activation=activation, output_type=self.output_type,
use_symbolic_softmax=self.use_symbolic_softmax)
if self.output_type == 'real':
self.predict = theano.function(inputs=[self.x, ],
outputs=self.rnn.y_pred,
mode=mode)
elif self.output_type == 'binary':
self.predict_proba = theano.function(inputs=[self.x, ],
outputs=self.rnn.p_y_given_x, mode=mode)
self.predict = theano.function(inputs=[self.x, ],
outputs=T.round(self.rnn.p_y_given_x),
mode=mode)
elif self.output_type == 'softmax':
self.predict_proba = theano.function(inputs=[self.x, ],
outputs=self.rnn.p_y_given_x, mode=mode)
self.predict = theano.function(inputs=[self.x, ],
outputs=self.rnn.y_out, mode=mode)
else:
raise NotImplementedError
def shared_dataset(self, data_xy):
""" Load the dataset into shared variables """
data_x, data_y = data_xy
shared_x = theano.shared(np.asarray(data_x,
dtype=theano.config.floatX))
shared_y = theano.shared(np.asarray(data_y,
dtype=theano.config.floatX))
if self.output_type in ('binary', 'softmax'):
return shared_x, T.cast(shared_y, 'int32')
else:
return shared_x, shared_y
def __getstate__(self):
""" Return state sequence."""
params = self._get_params() # parameters set in constructor
weights = [p.get_value() for p in self.rnn.params]
state = (params, weights)
return state
def _set_weights(self, weights):
""" Set fittable parameters from weights sequence.
Parameters must be in the order defined by self.params:
W, W_in, W_out, h0, bh, by
"""
i = iter(weights)
for param in self.rnn.params:
param.set_value(i.next())
def __setstate__(self, state):
""" Set parameters from state sequence.
Parameters must be in the order defined by self.params:
W, W_in, W_out, h0, bh, by
"""
params, weights = state
self.set_params(**params)
self.ready()
self._set_weights(weights)
def save(self, fpath='.', fname=None):
""" Save a pickled representation of Model state. """
fpathstart, fpathext = os.path.splitext(fpath)
if fpathext == '.pkl':
# User supplied an absolute path to a pickle file
fpath, fname = os.path.split(fpath)
elif fname is None:
# Generate filename based on date
date_obj = datetime.datetime.now()
date_str = date_obj.strftime('%Y-%m-%d-%H:%M:%S')
class_name = self.__class__.__name__
fname = '%s.%s.pkl' % (class_name, date_str)
fabspath = os.path.join(fpath, fname)
logging.info("Saving to %s ..." % fabspath)
file = open(fabspath, 'wb')
state = self.__getstate__()
pickle.dump(state, file, protocol=pickle.HIGHEST_PROTOCOL)
file.close()
def load(self, path):
""" Load model parameters from path. """
logging.info("Loading from %s ..." % path)
file = open(path, 'rb')
state = pickle.load(file)
self.__setstate__(state)
file.close()
def fit(self, X_train, Y_train, X_test=None, Y_test=None,
validation_frequency=100):
""" Fit model
Pass in X_test, Y_test to compute test error and report during
training.
X_train : ndarray (n_seq x n_steps x n_in)
Y_train : ndarray (n_seq x n_steps x n_out)
validation_frequency : int
in terms of number of sequences (or number of weight updates)
"""
f = file('../RNN-data/trainProcess/trainOutput-b15-2220-720-60.txt','a+')
if X_test is not None:
assert(Y_test is not None)
self.interactive = True
test_set_x, test_set_y = self.shared_dataset((X_test, Y_test))
else:
self.interactive = False
train_set_x, train_set_y = self.shared_dataset((X_train, Y_train))
n_train = train_set_x.get_value(borrow=True).shape[0]
if self.interactive:
n_test = test_set_x.get_value(borrow=True).shape[0]
######################
# BUILD ACTUAL MODEL #
######################
logging.info('... building the model')
index = T.lscalar('index') # index to a case
# learning rate (may change)
l_r = T.scalar('l_r', dtype=theano.config.floatX)
mom = T.scalar('mom', dtype=theano.config.floatX) # momentum
cost = self.rnn.loss(self.y) \
+ self.L1_reg * self.rnn.L1 \
+ self.L2_reg * self.rnn.L2_sqr
compute_train_error = theano.function(inputs=[index, ],
outputs=self.rnn.loss(self.y),
givens={
self.x: train_set_x[index],
self.y: train_set_y[index]},
mode=mode)
if self.interactive:
compute_test_error = theano.function(inputs=[index, ],
outputs=self.rnn.loss(self.y),
givens={
self.x: test_set_x[index],
self.y: test_set_y[index]},
mode=mode)
# compute the gradient of cost with respect to theta = (W, W_in, W_out)
# gradients on the weights using BPTT
gparams = []
for param in self.rnn.params:
gparam = T.grad(cost, param)
gparams.append(gparam)
updates = {}
for param, gparam in zip(self.rnn.params, gparams):
weight_update = self.rnn.updates[param]
upd = mom * weight_update - l_r * gparam
updates[weight_update] = upd
updates[param] = param + upd
# compiling a Theano function `train_model` that returns the
# cost, but in the same time updates the parameter of the
# model based on the rules defined in `updates`
train_model = theano.function(inputs=[index, l_r, mom],
outputs=cost,
updates=updates,
givens={
self.x: train_set_x[index],
self.y: train_set_y[index]},
mode=mode)
###############
# TRAIN MODEL #
###############
logging.info('... training')
epoch = 0
while (epoch < self.n_epochs):
epoch = epoch + 1
for idx in xrange(n_train):
effective_momentum = self.final_momentum \
if epoch > self.momentum_switchover \
else self.initial_momentum
example_cost = train_model(idx, self.learning_rate,
effective_momentum)
# iteration number (how many weight updates have we made?)
# epoch is 1-based, index is 0 based
iter = (epoch - 1) * n_train + idx + 1
if iter % validation_frequency == 0:
# compute loss on training set
train_losses = [compute_train_error(i)
for i in xrange(n_train)]
this_train_loss = np.mean(train_losses)
if self.interactive:
test_losses = [compute_test_error(i)
for i in xrange(n_test)]
this_test_loss = np.mean(test_losses)
f.write('epoch %i, seq %i/%i, tr loss %f '
'te loss %f lr: %f \n' % \
(epoch, idx + 1, n_train,
this_train_loss, this_test_loss, self.learning_rate))
print('epoch %i, seq %i/%i, tr loss %f '
'te loss %f lr: %f' % \
(epoch, idx + 1, n_train,
this_train_loss, this_test_loss, self.learning_rate))
else:
f.write('epoch %i, seq %i/%i, train loss %f '
'lr: %f \n' % \
(epoch, idx + 1, n_train, this_train_loss,
self.learning_rate))
print('epoch %i, seq %i/%i, train loss %f '
'lr: %f' % \
(epoch, idx + 1, n_train, this_train_loss,
self.learning_rate))
self.learning_rate *= self.learning_rate_decay
f.close()
def test_real():
""" Test RNN with real-valued outputs. """
n_hidden = 200
n_in = 20
n_out = 5
n_steps = 10
n_seq = 100
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_seq, n_steps, n_in)
targets = np.zeros((n_seq, n_steps, n_out))
targets[:, 1:, 0] = seq[:, :-1, 3] # delayed 1
targets[:, 1:, 1] = seq[:, :-1, 2] # delayed 1
targets[:, 2:, 2] = seq[:, :-2, 0] # delayed 2
targets += 0.01 * np.random.standard_normal(targets.shape)
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.001, learning_rate_decay=0.999,
n_epochs=400, activation='tanh')
model.fit(seq, targets, validation_frequency=1000)
[seqNum,lineNum,colNum] = targets.shape
print(seqNum,lineNum,colNum)
error = [0 for i in range(colNum)]
plt.close('all')
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[0])
ax1.set_title('input')
ax2 = plt.subplot(212)
true_targets = plt.plot(targets[0])
guess = model.predict(seq[0])
guessed_targets = plt.plot(guess, linestyle='--')
for i, x in enumerate(guessed_targets):
x.set_color(true_targets[i].get_color())
ax2.set_title('solid: true output, dashed: model output')
dif = abs(guess - targets[0])
[linedif,coldif] = dif.shape
print(linedif,coldif)
errorsum = 0
for i in range (colNum):
sum = 0
for j in range (lineNum):
sum += dif[j][i] ** 2
error[i] = math.sqrt(sum/lineNum)
errorsum += error[i]
print(error[i])
print("average error = ", errorsum/colNum)
def test_binary(multiple_out=False, n_epochs=250):
""" Test RNN with binary outputs. """
n_hidden = 100
n_in = 5
n_out = 121
n_steps = 40
n_seq = 1500
np.random.seed(0)
# simple lag test
seqlist = []
count = 0
data = []
BASE_DIR = os.path.dirname(__file__)
file_path1 = os.path.join(BASE_DIR,"../RNN-data/traindata/inputdata-b12-50-40-30-y.txt")
for l in open(file_path1):
#for l in open("inputdata-b02-300-10.txt"):
count += 1
row = [int(x) for x in l.split()]
if len(row) > 0:
data.append(row)
if (count == n_steps):
count = 0
if len(data) >0:
seqlist.append(data)
data = []
seqarray = np.asarray(seqlist)
seq = seqarray[:,:,:n_in]
targets = seqarray[:,:,n_in:]
seqlistTest1 = []
count = 0
dataTest1 = []
file_path2 = os.path.join(BASE_DIR, '../RNN-data/testdata/inputdata-b12-20-40-25.txt')
#file_path2 = os.path.join(BASE_DIR, '../RNN-data/testdata/inputerror-b15-60-60-12-y.txt')
for l in open(file_path2):
#for l in open("inputdata-b02-100-10.txt"):
count += 1
row = [int(x) for x in l.split()]
if len(row) > 0:
dataTest1.append(row)
if (count == n_steps):
count = 0
if len(dataTest1) >0:
seqlistTest1.append(dataTest1)
dataTest1 = []
seqarrayTest1 = np.asarray(seqlistTest1)
seqTest1 = seqarrayTest1[:,:,:n_in]
targetsTest1 = seqarrayTest1[:,:,n_in:]
############## Add another Test ####################
seqlistTest2 = []
count = 0
dataTest2 = []
#file_path2 = os.path.join(BASE_DIR, '../RNN-data/testdata/inputdata-b15-60-60-12.txt')
file_path4 = os.path.join(BASE_DIR, '../RNN-data/testdata/inputerror-b12-20-40-25-y.txt')
for l in open(file_path4):
#for l in open("inputdata-b02-100-10.txt"):
count += 1
row = [int(x) for x in l.split()]
if len(row) > 0:
dataTest2.append(row)
if (count == n_steps):
count = 0
if len(dataTest2) >0:
seqlistTest2.append(dataTest2)
dataTest2 = []
seqarrayTest2 = np.asarray(seqlistTest2)
seqTest2 = seqarrayTest2[:,:,:n_in]
targetsTest2 = seqarrayTest2[:,:,n_in:]
########### End add another Test ##############
######## Calculate change Frequency for each FF ##############
seqlistError = []
count = 0
dataError = []
file_path3 = os.path.join(BASE_DIR, '../RNN-data/traindata/inputerror-b12-50-40-30-y.txt')
for l in open(file_path3):
count += 1
row = [int(x) for x in l.split()]
if len(row) > 0:
dataError.append(row)
if (count == n_steps):
count = 0
if len(dataError) >0:
seqlistError.append(dataError)
dataError = []
seqarrayError = np.asarray(seqlistError)
targetsError = seqarrayError[:,:,n_in:]
[seqNum, lineNum, colNum] = targetsTest1.shape
freqArray = [None] * lineNum
for i in range (lineNum):
freqArray[i] = [0]*colNum
freqArrayNP = np.asarray(freqArray)
for i in range(seqNum):
freqArrayNP = freqArrayNP +abs(targets[i] - targetsError[i])
fmatrix = file('../RNN-data/matrix/freqMatrix-b12.txt','a+')
for i in range (lineNum):
for j in range(colNum):
fmatrix.write(str(freqArrayNP[i,j]))
fmatrix.write("\n")
fmatrix.close()
######### End Frequency Calculation #########################
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.085, learning_rate_decay=1.005,
n_epochs=n_epochs, activation='tanh', output_type='binary')
#model.fit(seq, targets, validation_frequency=1000)
model.fit(seq, targets, seqTest1, targetsTest1, validation_frequency=1000)
ferror1 = file('errorRate/errorRate-b12-no.txt','a+')
ferror2 = file('errorRate/errorRate-b12-single.txt','a+')
[seqNum,lineNum,colNum] = targetsTest1.shape
seqs = xrange(seqNum)
error = [0 for i in range(seqNum)]
errorsum = 0
for k in seqs:
guess1 = model.predict_proba(seqTest1[k])
dif1 = abs(guess1 - targetsTest1[k])
[lineDif,colDif] = dif1.shape
for i in range (1,lineDif):
for j in range (colDif):
if (dif1[i][j] > 0.5):
ferror1.write("1 ")
else:
ferror1.write("0 ")
ferror1.write("\n")
ferror1.close()
for k in seqs:
guess2 = model.predict_proba(seqTest2[k])
dif2 = abs(guess2 - targetsTest2[k])
[lineDif,colDif] = dif2.shape
for i in range (1,lineDif):
for j in range (colDif):
if (dif2[i][j] > 0.5):
ferror2.write("1 ")
else:
ferror2.write("0 ")
ferror2.write("\n")
ferror2.close()
## #print (seqTest.shape)
## seqs = xrange(seqNum)
## error = [0 for i in range(lineNum*seqNum)]
## errorsum = 0
## for k in seqs:
## guess = model.predict_proba(seqTest[k])
## dif = abs(guess - targetsTest[k])
## [lineDif,colDif] = dif.shape
## #print(lineDif,colDif)
## for i in range (lineDif):
## ki = k*lineDif+i
## for j in range (colDif):
## if (dif[i][j] > 0.5):
## error[ki] += 1
## ferror.write('error %d = %d \n' % (ki,error[ki]))
## if (error[ki]>0):
## errorsum += 1
## print(errorsum)
## errorRate = errorsum/1.0/seqNum/lineNum
## ferror.write("average error = %f \n" % (errorRate))
##
## seqs = xrange(1)
##
## [seqNum,lineNum,colNum] = targets.shape
## print(seqNum,lineNum,colNum)
## error = [0 for i in range(colNum)]
##
## plt.close('all')
## for seq_num in seqs:
## fig = plt.figure()
## ax1 = plt.subplot(211)
## plt.plot(seq[seq_num])
## ax1.set_title('input')
## ax2 = plt.subplot(212)
## true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')
##
## guess = model.predict_proba(seq[seq_num])
## guessed_targets = plt.step(xrange(n_steps), guess)
## plt.setp(guessed_targets, linestyle='--', marker='d')
## for i, x in enumerate(guessed_targets):
## x.set_color(true_targets[i].get_color())
## ax2.set_ylim((-0.1, 1.1))
## ax2.set_title('solid: true output, dashed6 model output (prob)')
##
##
## dif = abs(guess - targets[seq_num])
## [lineDif,colDif] = dif.shape
## print(lineDif,colDif)
## errorsum = 0
## for i in range (colNum):
## for j in range (lineNum):
## if (dif[j][i] > 0.5):
## error[i] += 1
## print(error[i])
## errorsum += error[i]
## print("average error = ", errorsum/colNum)
def test_softmax(n_epochs=250):
""" Test RNN with softmax outputs. """
n_hidden = 10
n_in = 5
n_steps = 10
n_seq = 100
n_classes = 3
n_out = n_classes # restricted to single softmax per time step
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_seq, n_steps, n_in)
targets = np.zeros((n_seq, n_steps), dtype=np.int)
thresh = 0.5
# if lag 1 (dim 3) is greater than lag 2 (dim 0) + thresh
# class 1
# if lag 1 (dim 3) is less than lag 2 (dim 0) - thresh
# class 2
# if lag 2(dim0) - thresh <= lag 1 (dim 3) <= lag2(dim0) + thresh
# class 0
targets[:, 2:][seq[:, 1:-1, 3] > seq[:, :-2, 0] + thresh] = 1
targets[:, 2:][seq[:, 1:-1, 3] < seq[:, :-2, 0] - thresh] = 2
#targets[:, 2:, 0] = np.cast[np.int](seq[:, 1:-1, 3] > seq[:, :-2, 0])
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.001, learning_rate_decay=0.999,
n_epochs=n_epochs, activation='tanh',
output_type='softmax', use_symbolic_softmax=False)
model.fit(seq, targets, validation_frequency=1000)
seqs = xrange(10)
[seqNum,lineNum,colNum] = seq.shape
print(seqNum,lineNum,colNum)
error = [0 for i in range(colNum)]
plt.close('all')
for seq_num in seqs:
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[seq_num])
ax1.set_title('input??')
ax2 = plt.subplot(212)
# blue line will represent true classes
true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')
# show probabilities (in b/w) output by model
guess = model.predict_proba(seq[seq_num])
guessed_probs = plt.imshow(guess.T, interpolation='nearest',
cmap='gray')
ax2.set_title('blue: true class, grayscale: probs assigned by model')
dif = abs(seq[seq_num] - targets[seq_num])
for i in range (colNum):
sum = 0
for j in range (lineNum):
sum += dif[i,j] ** 2
error[i] = math.sqrt(sum/lineNum)
print(error[i])
if __name__ == "__main__":
##logging.basicConfig(
## level = logging.INFO,
## format = 'LINE %(lineno)-4d %(levelname)-8s %(message)s',
## datafmt = '%m-%d %H:%M',
## filename = "D:/logresult20160123/one.log",
## filemode = 'w')
t0 = time.time()
#test_real()
# problem takes more epochs to solve
test_binary(multiple_out=True, n_epochs=20)
#test_softmax(n_epochs=250)
print ("Elapsed time: %f" % (time.time() - t0))
| bsd-3-clause |
mattilyra/scikit-learn | sklearn/utils/__init__.py | 17 | 12898 | """
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric)
from .deprecation import deprecated
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
from ..exceptions import ConvergenceWarning as _ConvergenceWarning
from ..exceptions import DataConversionWarning
@deprecated("ConvergenceWarning has been moved into the sklearn.exceptions "
"module. It will not be available here from version 0.19")
class ConvergenceWarning(_ConvergenceWarning):
pass
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric"]
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def axis0_safe_slice(X, mask, len_mask):
"""
This mask is safer than safe_mask since it returns an
empty array, when a sparse matrix is sliced with a boolean mask
with all False, instead of raising an unhelpful error in older
versions of SciPy.
See: https://github.com/scipy/scipy/issues/5361
Also note that we can avoid doing the dot product by checking if
the len_mask is not zero in _huber_loss_and_gradient but this
is not going to be the bottleneck, since the number of outliers
and non_outliers are typically non-zero and it makes the code
tougher to follow.
"""
if len_mask != 0:
return X[safe_mask(X, mask), :]
return np.zeros(shape=(0, X.shape[1]))
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
If replace is False it should not be larger than the length of
arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
elif (max_n_samples > n_samples) and (not replace):
raise ValueError("Cannot sample %d out of arrays with dim %d"
"when replace is False" % (max_n_samples,
n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False)
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1"
% n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
| bsd-3-clause |
ctogle/modular | src/modular4/mplt.py | 1 | 27992 | #!/usr/bin/env python
import modular4.base as mb
import modular4.ensemble as me
import modular4.output as mo
import sim_anneal.pspace as spsp
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams['pdf.fonttype'] = 42
import argparse,sys,os,time,numpy,cPickle
import pdb
# mplot is a representation of a "figure", which
# contains some number of subplots, and some arrangement
# of modular data objects with those plots
class mplot(mb.mobject):
# use load_data to associate modular data objects with the mplot
# specify general info about the final figure to help
# lay out the axes
# how many subplots?
# line and/or color and/or anything else?
# each ax has xax,[yax,[zax]],targets at minimum
# this corresponds to extracting arrays from data objects
# theres polish information for each subplot thats also needed:
# xax_title,yax_title,plot_title,color_bar_range
# colors of lines,legend labels per target
# line markers/styles/linewidths per target
#
# presumeably the plot should be outputted pdf/png as the regular
# plotting interface does
# requires filename/save directory
#
# paths is a list of full paths to data file from which modular
# data objects will be extracted
# get a new axis to place in the figure
def subplot(self,*args,**kwargs):
sub = msubplot(self,*args,**kwargs)
self.subplots.append(sub)
return sub
# destroy and data dep. information
def clear(self):
#for da in self.datas:self.datas[da] = None
#self.datas['extras'] = lfu.data_container(data = [])
#self.datas = {'extras':lfu.data_container(data = [])}
self.datas = {'extras':{}}
def __init__(self,*args,**kwargs):
self._def('name','mplot',**kwargs)
self._def('subplots',[],**kwargs)
self._def('wspace',0.2,**kwargs)
self._def('hspace',0.2,**kwargs)
self._def('left',0.1,**kwargs)
self._def('right',0.98,**kwargs)
self._def('top',0.96,**kwargs)
self._def('bottom',0.08,**kwargs)
self._def('pipe',[],**kwargs)
self._def('aspect',(24,16),**kwargs)
self.figure = plt.figure(figsize = self.aspect)
self.figure.subplots_adjust(wspace = self.wspace,hspace = self.hspace)
#self.datas = {'extras':lfu.data_container(data = [])}
self.datas = {'extras':{}}
self.clear()
# access specified data files and return data objects
def open_data(self,*paths):
self.clear()
fpaths = []
for pa in paths:
if pa.endswith('.pkl'):
#fpaths.extend(glob.glob(pa))
fpaths.append(pa)
else:
pafis = os.listdir(pa)
pfs = [p for p in pafis if p.endswith('.pkl')]
fpaths.extend(pfs)
for pa in fpaths:
if not pa in self.datas:
self.datas[pa] = None
for fpa in self.datas:
if fpa == 'extras':continue
print '\nloading pkl data:',fpa
with open(fpa,'rb') as h:dat = cPickle.load(h)['pages']
self.datas[fpa] = dat
# produce the actual plot axes objects with plotted data
# axes are used from elsewhere (including plt.show())
def render(self,savefilename = None):
for subp in self.subplots:
subp.clear()
#subp.extract()
subp.render()
l,r,t,b = self.left,self.right,self.top,self.bottom
self.figure.subplots_adjust(left = l,right = r,top = t,bottom = b)
#self.figure.tight_layout()
if not savefilename is None:
print('saving figure...')
self.figure.savefig(savefilename,dpi = 100)
print('saved figure!!!')
# this represents a subplot in the figure, with one axis, and all
# other data associated with the plot
class msubplot(mb.mobject):
# destroy any plot/data dep. information
def clear(self):self.ax = None
def __init__(self,mplot,subloc,*args,**kwargs):
self.mplot = mplot
self.loc = subloc
self._def('reqorder',[],**kwargs)
self._def('requests',[],**kwargs)
self._def('name','msubplot',**kwargs)
self._def('ptype','lines',**kwargs)
self._def('plab',None,**kwargs)
self._def('xlab',None,**kwargs)
self._def('ylab',None,**kwargs)
self._def('zlab',None,**kwargs)
self._def('plabsize',18,**kwargs)
self._def('xlabsize',20,**kwargs)
self._def('ylabsize',20,**kwargs)
self._def('zlabsize',20,**kwargs)
self._def('xticksize',20,**kwargs)
self._def('yticksize',20,**kwargs)
self._def('xlog',False,**kwargs)
self._def('ylog',False,**kwargs)
self._def('zlog',False,**kwargs)
self._def('xmin',None,**kwargs)
self._def('xmax',None,**kwargs)
self._def('ymin',None,**kwargs)
self._def('ymax',None,**kwargs)
self._def('zmin',None,**kwargs)
self._def('zmax',None,**kwargs)
self._def('legend',True,**kwargs)
self._def('legendloc',1,**kwargs)
# given x and y (strings), add a request for this target
# requests are considered based on input data in self.extract
def add_line(self,x,y,**kwargs):
if type(x) is type(''):print 'expecting domain',x,'in input files'
#elif issubclass(x.__class__,mst.scalars):
# self.mplot.datas['extras'].data.append(x)
else:
xnam = 'extrax-'+str(len(self.mplot.datas['extras']))
xdat = numpy.array(x)
self.mplot.datas['extras'][xnam] = xdat
x = xnam
if type(y) is type(''):print 'expecting codomain',y,'in input files'
#elif issubclass(y.__class__,mst.scalars):
# self.mplot.datas['extras'].data.append(y)
else:
ynam = 'extray-'+str(len(self.mplot.datas['extras']))
ydat = numpy.array(y)
self.mplot.datas['extras'][ynam] = ydat
y = ynam
self.requests.append(kwargs)
self.reqorder.append((x,y))
self.ptype = 'lines'
# given x,y, and z (strings), add a request for this target
# requests are considered based on input data in self.extract
def add_heat(self,x,y,z,zm = None,zmf = None,**kwargs):
if type(x) is type(''):print 'expecting xdomain',x,'in input files'
else:
xnam = 'extrax-'+str(len(self.mplot.datas['extras']))
xdat = numpy.array(x)
self.mplot.datas['extras'][xnam] = xdat
x = xnam
if type(y) is type(''):print 'expecting ydomain',y,'in input files'
else:
ynam = 'extray-'+str(len(self.mplot.datas['extras']))
ydat = numpy.array(y)
self.mplot.datas['extras'][ynam] = ydat
y = ynam
if type(z) is type(''):print 'expecting surface',z,'in input files'
else:
znam = 'extraz-'+str(len(self.mplot.datas['extras']))
zdat = numpy.array(z)
self.mplot.datas['extras'][znam] = zdat
z = znam
if not zm is None:
if type(zm) is type(()):
for szm in zm:
if type(szm) is type(''):
print 'expecting sub-mask surface',szm,'in input files'
else:raise ValueError
elif type(zm) is type(''):
print 'expecting mask surface',zm,'in input files'
else:
zmnam = 'extraz-'+str(len(self.mplot.datas['extras']))
zmdat = numpy.array(zm)
self.mplot.datas['extras'][zmnam] = zmdat
zm = zmnam
self.requests.append(kwargs)
self.reqorder.append((x,y,z,zm))
self.ptype = 'color'
self.zmf = zmf
# given the data objects of the parent mplot
# create and store a set of mplottarget objects
def extract(self):
datas = self.mplot.datas
if self.ptype == 'lines':ptargets = self.lines(datas)
elif self.ptype == 'color':ptargets = self.color(datas)
else:raise ValueError
self.plottargets = ptargets
self.targetcount = len(ptargets)
# generate mplottarget line objects for this subplot
def lines(self,datas):
exs = self.mplot.datas['extras']
lines = []
for reqx in range(len(self.reqorder)):
rkws = self.requests[reqx]
req = self.reqorder[reqx]
xdom,ycod = req
print '\nextracting request:',xdom,ycod,'\n','-'*40
if xdom in exs:x = exs[xdom]
else:x = self.locate(datas,xdom)
if ycod in exs:y = exs[ycod]
else:y = self.locate(datas,ycod)
if x is None:print 'failed to locate x for request:',req
elif y is None:print 'failed to locate y for request:',req
else:
line = mplotline(self,x,y,req,**rkws)
lines.append(line)
print 'made line:',line
return lines
# generate color mesh objects for this subplot
def color(self,datas):
heats = []
for reqx in range(len(self.reqorder)):
rkws = self.requests[reqx]
req = self.reqorder[reqx]
xdom,ydom,zcod,zmask = req
print '\nextracting request:',req,'\n','-'*40
if zmask is None:zm = None
elif type(zmask) == type(()):
zm = tuple((self.locate(datas,szm) for szm in zmask))
else:zm = self.locate(datas,zmask)
z = self.locate(datas,zcod)
x = self.locate(datas,xdom)
y = self.locate(datas,ydom)
#zmask = 'T_total1:mean_event_count'
#zm = self.locate(datas,zmask)
#if issubclass(z.__class__,mst.reducer):
# x = z
# y = z
#else:
# x = self.locate(datas,xdom)
# y = self.locate(datas,ydom)
if x is None:print 'failed to locate x for request:',req
elif y is None:print 'failed to locate y for request:',req
elif z is None:print 'failed to locate z for request:',req
else:
heat = mplotheat(self,x,y,z,zm,req,**rkws)
heats.append(heat)
print 'made heat map:',heat
return heats
# given a list of possibly redundant data objects
# return the subset whose names match the target
def locate(self,datas,target):
located = []
dfs = sorted(datas.keys())
for df in dfs:
if df == 'extras':continue
data = datas[df]
for pg in data:
if not pg:continue
ds,ts,ps = pg
if target in ts:
located.append((pg,df))
elif target in pg[2].keys():
located.append((pg[2],df))
lcnt = len(located)
if lcnt == 0:
print '\n','-'*40,'\ntarget:',target
print '\tcould not be located!!\n','-'*40,'\n'
return
elif lcnt == 1:return located[0][0]
#elif self.mplot.pipe:return located[self.mplot.pipe.pop(0)][0]
elif self.mplot.pipe:
if type(self.mplot.pipe) == type([]):
return located[self.mplot.pipe.pop(0)][0]
else:return located[next(self.mplot.pipe)][0]
else:
print 'more than one data object was found for:',target
for lx in range(lcnt):
lc,lf = located[lx]
#print '\n',lx,'\tfrom file:',lf,'\n\t\tof class:',lc
print '\n',lx,'\tfrom file:',lf
which = int(raw_input('\n\tlocated index please:\n\t\t'))
return located[which][0]
'''#
elif lcnt == 2 and located[0][1] == located[1][1]:
if issubclass(located[0][0].__class__,mst.reducer):
return located[0][0]
else:return located[1][0]
else:
print 'more than one data object was found for:',target
for lx in range(lcnt):
lf,lc = located[lx]
print '\n',lx,'\tfrom file:',lf,'\n\t\tof class:',lc
which = int(raw_input('\n\tlocated index please:\n\t\t'))
return located[which][0]
'''#
# determine the min/max of x/y based on current plottargets
def minmaxes(self):
tcnt = self.targetcount
if tcnt == 0:mms = (0,1,0,1,0,1)
elif tcnt == 1:mms = self.plottargets[0].minmax()
else:
mms = self.plottargets[0].minmax()
for tx in range(1,tcnt):
omms = self.plottargets[tx].minmax()
mms = (
min(mms[0],omms[0]),max(mms[1],omms[1]),
min(mms[2],omms[2]),max(mms[3],omms[3]),
min(mms[4],omms[4]),max(mms[5],omms[5]))
mms = (
mms[0] if self.xmin is None else self.xmin,
mms[1] if self.xmax is None else self.xmax,
mms[2] if self.ymin is None else self.ymin,
mms[3] if self.ymax is None else self.ymax,
mms[4] if self.zmin is None else self.zmin,
mms[5] if self.zmax is None else self.zmax)
return mms
# reset the axis and replot all mplottarget objects
def render(self):
self.clear()
self.extract()
self.ax = self.mplot.figure.add_subplot(self.loc)
for ptarg in self.plottargets:ptarg.render(self.ax)
xb,xt,yb,yt,zb,zt = self.minmaxes()
self.ax.set_xlim([xb,xt])
self.ax.set_ylim([yb,yt])
#self.ax.set_zlim([zb,zt])
#if self.ptype == 'lines' and self.legend:
if self.ptype == 'lines':
if self.legend:
leg = self.ax.legend(loc = self.legendloc)
#leg.draggable()
if self.xlog:self.ax.set_xscale('log')
if self.ylog:self.ax.set_yscale('log')
if self.xlab:self.ax.set_xlabel(self.xlab,fontsize = self.xlabsize)
if self.ylab:self.ax.set_ylabel(self.ylab,fontsize = self.ylabsize)
if self.zlab:
if hasattr(self.ax,'set_zlabel'):
self.ax.set_zlabel(self.zlab,fontsize = self.zlabsize)
if self.plab:self.ax.set_title(self.plab,fontsize = self.plabsize)
for tick in self.ax.xaxis.get_major_ticks():
tick.label.set_fontsize(self.xticksize)
for tick in self.ax.yaxis.get_major_ticks():
tick.label.set_fontsize(self.yticksize)
# represents one plot target in a plot
class mplottarg(mb.mobject):
# describe this target in a string
def __str__(self):
minx,maxx,miny,maxy,minz,maxz = self.minmax()
s = ''.join((' "',str(self.name),'" - ',str(self.__class__)))
s += ''.join(('\n\tresulting from request: "',str(self.req),'"'))
s += ''.join(('\n\tx-data:\n\t\ton range:',str(minx),' - ',str(maxx)))
s += ''.join(('\n\ty-data:\n\t\ton range:',str(miny),' - ',str(maxy)))
s += ''.join(('\n\tz-data:\n\t\ton range:',str(minz),' - ',str(maxz)))
return s
def __init__(self,msubplot,req,*args,**kwargs):
self.msub = msubplot
self._def('name',req[-1],**kwargs)
self._def('cmap',None,**kwargs)
self._def('rast',False,**kwargs)
self._def('width',2.0,**kwargs)
# represents one colormesh in a plot
class mplotheat(mplottarg):
# given a data object for x,y, and z, store numpy arrays to plot
def inp(self,x,y,z,zm,req):
if type(x) != type(()):
self.x = x
self.y = y
self.z = z
self.zm = zm
else:
aux = x[2]
if 'pspaceaxes' in aux and req[0] in aux['pspaceaxes']:
axisnames = aux['pspaceaxes']
axxs = tuple(z[1].index(a) for a in axisnames)
axvs = [z[0][a] for a in axxs]
axds = [None for a in axisnames]
axcnt = len(axisnames)
if axcnt > 1:
print '\nneed to set axis defaults on a reducer...'
for axx in range(axcnt):
nv,ax = '',axisnames[axx]
axval = numpy.unique(z[0][z[1].index(ax)])
if ax == req[0] or ax == req[1]:continue
if len(axval) > 1:
if ax in self.name:
nmsp = tuple(x.strip() for x in self.name.split('='))
nv = nmsp[nmsp.index(ax)+1]
try:nv = float(nv)
except:nv = ''
if nv == '':
axdef = numpy.unique(axvs[axx])
print 'reducer axis default:','"'+ax+'"',':',axdef
print '\twith potential values:',axval
#if self.msub.mplot.pipe:nv = self.msub.mplot.pipe.pop(0)
if self.msub.mplot.pipe:
if type(self.msub.mplot.pipe) == type([]):
nv = self.msub.mplot.pipe.pop(0)
else:nv = next(self.msub.mplot.pipe)
else:nv = raw_input('\n\tnew value?:\t')
if nv == '':nv = axval[0]
try:
nv = float(nv)
nv = axval[spsp.locate(axval,nv)]
axds[axx] = nv
except:print 'axis default input ignored:',nv
axss,inss = axds[:],[]
axss[axisnames.index(req[0])] = None
axss[axisnames.index(req[1])] = None
for axx in range(len(axisnames)):
if axss[axx] is None:inss.append([1 for v in axvs[axx]])
else:inss.append([1 if v == axds[axx] else 0 for v in axvs[axx]])
in_every = [(0 not in row) for row in zip(*inss)]
surf = [sur for sur,ie in zip(z[0][z[1].index(req[2])],in_every) if ie]
if zm is None:
msurf = numpy.ones(len(surf))
elif type(zm) == type(()):
msurf = [[sur for sur,ie in
zip(szm[0][szm[1].index(rq)],in_every) if ie]
for szm,rq in zip(zm,req[3])]
else:
msurf = [sur for sur,ie in zip(zm[0][zm[1].index(req[3])],in_every) if ie]
xzip = zip(x[0][x[1].index(req[0])],in_every)
yzip = zip(y[0][y[1].index(req[1])],in_every)
dx = numpy.unique(numpy.array([j for j,ie in xzip if ie]))
dy = numpy.unique(numpy.array([j for j,ie in yzip if ie]))
ds = numpy.array(surf,dtype = numpy.float)
if type(msurf) == type([]):
dsm = [numpy.array(smsurf,dtype = numpy.float) for smsurf in msurf]
else:dsm = [numpy.array(msurf,dtype = numpy.float)]
#ddx,ddy = dx[1]-dx[0],dy[1]-dy[0]
#dx = numpy.linspace(dx[0]-ddx/2.0,dx[-1]+ddx/2.0,dx.size+1)
#dy = numpy.linspace(dy[0]-ddy/2.0,dy[-1]+ddy/2.0,dy.size+1)
#dx = numpy.linspace(dx[0],dx[-1]+ddx,dx.size+1)
#dy = numpy.linspace(dy[0],dy[-1]+ddy,dy.size+1)
if self.msub.xlog:
dx = numpy.exp(numpy.linspace(
numpy.log(dx[0]),numpy.log(dx[-1]),dx.size+1))
else:dx = numpy.linspace(dx[0],dx[-1],dx.size+1)
if self.msub.ylog:
dy = numpy.exp(numpy.linspace(
numpy.log(dy[0]),numpy.log(dy[-1]),dy.size+1))
else:dy = numpy.linspace(dy[0],dy[-1],dy.size+1)
ds = ds.reshape(dx.size-1,dy.size-1)
dsm = [sdsm.reshape(dx.size-1,dy.size-1) for sdsm in dsm]
if axisnames.index(req[0]) < axisnames.index(req[1]):
ds = ds.transpose()
dsm = [sdsm.transpose() for sdsm in dsm]
self.x = dx
self.y = dy
self.z = ds
self.zm = dsm
elif req[0] in x[1]:
print 'simple!'
raise NotImplemented
else:raise ValueError
self.req = req
def __init__(self,msubplot,x,y,z,zm,req,*args,**kwargs):
mplottarg.__init__(self,msubplot,req,*args,**kwargs)
self.inp(x,y,z,zm,req)
# get the min/max of the x/y data of this plot target
def minmax(self):
minx,maxx = self.x.min(),self.x.max()
miny,maxy = self.y.min(),self.y.max()
minz,maxz = self.z.min(),self.z.max()
return minx,maxx,miny,maxy,minz,maxz
# add a matplotlib color object to an axis for this target
def render(self,ax):
minx,maxx,miny,maxy,minz,maxz = self.msub.minmaxes()
#zmask = (self.z >= minz) <= maxz
#zmask = (self.zm >= 100)
if self.msub.zmf is None:zmask = self.zm == 0
else:zmask = self.msub.zmf(*self.zm)
#else:zmask = self.msub.zmf(self.zm)
if self.cmap is None:cmap = plt.cm.jet
else:cmap = self.cmap
cmap.set_over('m',1.0)
cmap.set_bad('k',1.0)
cmap.set_under('c',1.0)
pckws = {
'vmin':minz,'vmax':maxz,'cmap':cmap,
'linewidth':self.width,'rasterized':self.rast,
}
z = numpy.ma.masked_array(self.z,mask = zmask)
cmesh = ax.pcolormesh(self.x,self.y,z,**pckws)
#cmesh = ax.pcolor(self.x,self.y,z,**pckws)
cmesh.set_edgecolor('face')
xticks = numpy.linspace(self.x[0],self.x[-1],6)
yticks = numpy.linspace(self.y[0],self.y[-1],6)
ax.set_xticks(xticks,['%f' % numpy.round(v,2) for v in xticks])
ax.set_yticks(yticks,['%f' % numpy.round(v,2) for v in yticks])
if not minz == maxz:
cb = self.msub.mplot.figure.colorbar(
cmesh,extend = 'both',shrink = 0.9)
cb.set_label('',fontsize = 20)
for tick in cb.ax.xaxis.get_major_ticks():
tick.label.set_fontsize(20)
if False:
curves = 10
levels = numpy.arange(self.z.min(),self.z.max(),
(1/float(curves))*(self.z.max()-self.z.min()))
#ax.contour(surf,colors = 'white',levels = levels)
contour = ax.contour(self.x,self.y,self.z,
colors = 'white',levels = levels)
ax.clabel(contour,inline=1,fontsize = 10)
# represents one line in a plot
class mplotline(mplottarg):
# given a data object for x and y, store numpy arrays to plot
def inp(self,x,y,req):
if type(x) != type(()):
self.x = x
self.y = y
else:
aux = x[2]
if 'pspaceaxes' in aux and req[0] in aux['pspaceaxes']:
axisnames = aux['pspaceaxes']
axxs = tuple(y[1].index(a) for a in axisnames)
axvs = [y[0][a] for a in axxs]
axds = [None for a in axisnames]
axcnt = len(axisnames)
if axcnt > 1:
print '\nneed to set axis defaults on a reducer...'
for axx in range(axcnt):
nv,ax = '',axisnames[axx]
axval = numpy.unique(y[0][y[1].index(ax)])
if ax == req[0]:continue
if len(axval) > 1:
if ax in self.name:
nmsp = tuple(x.strip() for x in self.name.split('='))
nv = nmsp[nmsp.index(ax)+1]
try:nv = float(nv)
except:nv = ''
if nv == '':
axdef = numpy.unique(axvs[axx])
print 'reducer axis default:','"'+ax+'"',':',axdef
print '\twith potential values:',axval
#if self.msub.mplot.pipe:nv = self.msub.mplot.pipe.pop(0)
if self.msub.mplot.pipe:
if type(self.msub.mplot.pipe) == type([]):
nv = self.msub.mplot.pipe.pop(0)
else:nv = next(self.msub.mplot.pipe)
else:nv = raw_input('\n\tnew value?:\t')
if nv == '':nv = axval[0]
try:
nv = float(nv)
nv = axval[spsp.locate(axval,nv)]
axds[axx] = nv
except:print 'axis default input ignored:',nv
axss,inss = axds[:],[]
axss[axisnames.index(req[0])] = None
for axx in range(len(axisnames)):
if axss[axx] is None:inss.append([1 for v in axvs[axx]])
else:inss.append([1 if v == axds[axx] else 0 for v in axvs[axx]])
in_every = [(0 not in row) for row in zip(*inss)]
xzip = zip(x[0][x[1].index(req[0])],in_every)
yzip = zip(y[0][y[1].index(req[1])],in_every)
dx = numpy.array([j for j,ie in xzip if ie])
dy = numpy.array([j for j,ie in yzip if ie])
self.x = dx
self.y = dy
elif req[0] in x[1]:
if len(x[0].shape) == 2:
self.x = x[0][x[1].index(req[0])]
elif len(x[0].shape) == 3:
print('extradimhack!',x[0].shape)
self.x = x[0][0][x[1].index(req[0])]
else:raise ValueError
if len(y[0].shape) == 2:
self.y = y[0][y[1].index(req[1])]
elif len(y[0].shape) == 3:
print('extradimhack!',y[0].shape)
self.y = y[0][0][y[1].index(req[1])]
else:raise ValueError
#try:self.y = y[0][y[1].index(req[1])]
#except:pdb.set_trace()
else:
pdb.set_trace()
raise ValueError
if not len(self.x) == len(self.y):
print 'unequal data lengths for request:',req
raise ValueError
self.req = req
def __init__(self,msubplot,x,y,req,*args,**kwargs):
mplottarg.__init__(self,msubplot,req,*args,**kwargs)
self._def('color','black',**kwargs)
self._def('style','-',**kwargs)
self._def('mark','',**kwargs)
self.inp(x,y,req)
# get the min/max of the x/y data of this plot target
def minmax(self):
if type(self.x) == type({}):
minx,maxx = 0,1
miny,maxy = 0,1
minz,maxz = 0,1
else:
minx,maxx = self.x.min(),self.x.max()
miny,maxy = self.y.min(),self.y.max()
minz,maxz = 0,1
return minx,maxx,miny,maxy,minz,maxz
# add a matplotlib line object to an axis for this target
def render(self,ax):
largs = {
'color':self.color,'linestyle':self.style,
'linewidth':self.width,'marker':self.mark}
if type(self.x) == type({}):
for le in self.x['extra_trajectory']:
ln,gs = le
line = matplotlib.lines.Line2D(*ln,**gs)
#if self.name:line.set_label(self.name)
ax.add_line(line)
else:
line = matplotlib.lines.Line2D(self.x,self.y,**largs)
if self.name:line.set_label(self.name)
ax.add_line(line)
| mit |
liberatorqjw/scikit-learn | sklearn/decomposition/pca.py | 14 | 22688 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
data spectrum
rank: int,
tested rank value
n_samples: int,
number of samples
dim: int,
embedding/empirical dimension
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
DawnEve/bio_scripts | plot/plot_distribution.py | 2 | 1953 | #!/usr/bin/env python
from __future__ import print_function
import argparse
import sys
import re
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description='Plot distribution',
epilog="https://github.com/shenwei356/bio_scripts")
parser.add_argument('-i', '--infile', nargs='?', type=argparse.FileType('r'),
default=sys.stdin, help='Input file')
parser.add_argument('-o', '--outfile', nargs='?', type=str,
default='dist.png', help='Output file')
parser.add_argument('--width', type=int, default=8, help='Figure width')
parser.add_argument('--height', type=int, default=6, help='Figure heigth')
parser.add_argument('--x_lim', type=str, help='x_lim. format: "1,100"')
parser.add_argument('--y_lim', type=str, help='y_lim. format: "1,100"')
parser.add_argument(
'-t', '--title', type=str, default='Distribution Plot', help='Figure Title')
parser.add_argument(
'-x', '--xlabel', type=str, default='Value', help='Figure X label')
parser.add_argument(
'-y', '--ylabel', type=str, default='Frequency', help='Figure Y label')
args = parser.parse_args()
if args.y_lim and not re.match('^\d+,\d+$', args.y_lim):
print("Invalid option value for --y_lim. Example: --y_lim 1,100 ", file=sys.stderr)
sys.exit(1)
if args.x_lim and not re.match('^\d+,\d+$', args.x_lim):
print("Invalid option value for --x_lim. Example: --y_lim 1,100 ", file=sys.stderr)
sys.exit(1)
data = []
for line in args.infile:
data.append(float(line.strip()))
mpl.rc("figure", figsize=(args.width, args.height))
figure = sns.distplot(data)
figure.set_title(args.title)
figure.set_xlabel(args.xlabel)
figure.set_ylabel(args.ylabel)
if args.x_lim:
figure.set_xlim([int(x) for x in args.x_lim.split(',')])
if args.y_lim:
figure.set_ylim([int(y) for y in args.y_lim.split(',')])
plt.savefig(args.outfile)
| mit |
planet-os/notebooks | api-examples/API_client/python/lib/variables.py | 1 | 4340 | import datetime
from pick import pick
from API_client.python.lib.predef_locations import locations
import dateutil.parser
class variable:
def __init__(self, varname, input_variables, scope, ds, debug=False):
self.varname = varname
self.ds = ds
var_attrs = [i for i in input_variables if i['variableKey'] == varname]
var_attrs = var_attrs[0]
if 'temporalCoverage' in var_attrs:
for at in ['start','end']:
setattr(self, at, datetime.datetime.utcfromtimestamp(var_attrs['temporalCoverage'][at]/1e3))
for at in ['units','long_name']:
if at in var_attrs:
setattr(self, at, var_attrs[at])
for sc in scope:
setattr(self, sc, scope[sc])
self.values = {}
self.debug = debug
## This does not work in notebook mode...
def get_values_interactive(self):
selected_reftime = pick(self.reftimes, "select reftime: ", multi_select=True, min_selection_count=1)
selected_start_time = pick(self.timesteps, "select start", min_selection_count=0)
selected_end_time = pick(self.timesteps, "select end", min_selection_count=0)
locations.update({'other':'lon,lat'})
selected_coords = pick(list(locations.keys()), 'select location or insert custom', min_selection_count=1)
selected_count = pick([1,10,100,1000,10000], 'count', min_selection_count=1)
# print("selected coordinates", selected_coords)
if selected_coords[0][0] == 'other':
selected_lon = input("insert lon")
selected_lat = input("insert lat")
assert selected_lon >= -180
assert selected_lon <= 360
assert selected_lat <= 90
assert selected_lat >= -90
else:
selected_lon = locations[selected_coords[0]][0]
selected_lat = locations[selected_coords[0]][1]
print("Is this correct:")
print("reftime:", selected_reftime)
print("start:", selected_start_time)
print("end:", selected_end_time)
assert input("y/n: ") == "y"
start_time = selected_start_time[0]
self.values['testloc'] = self.ds.get_json_data_in_pandas(debug=self.debug, **{'reftime_start':selected_reftime[0][0].isoformat(),
'reftime_end':selected_reftime[-1][0].isoformat(),
'vars':self.varname,
'lon':selected_lon,'lat':selected_lat,
'count':selected_count[0]})
def get_values(self, location="Võru", reftime=None, reftime_end=None, count=10, debug=False):
kwags = {}
if not reftime:
if self.reftimes:
kwags['reftime_start'] = selected_reftime = self.reftimes[-1]
else:
kwags['reftime_start'] = reftime
if reftime_end:
kwags['reftime_end'] = reftime_end
kwags['lon'] = locations[location][0]
kwags['lat'] = locations[location][1]
kwags['count'] = count
kwags['vars'] = self.varname
self.values[location] = self.ds.get_json_data_in_pandas(debug=debug, **kwags)
## **{'reftime_start':selected_reftime.isoformat(),
## 'reftime_end':reftime_end.isoformat(),
## 'vars':self.varname,
## 'lon':selected_lon,'lat':selected_lat,
## 'count':selected_count})
def get_values_analysis(self, location="Võru", reftime=None, reftime_end=None, count=10, debug=False):
kwags = {}
kwags['lon'] = locations[location][0]
kwags['lat'] = locations[location][1]
kwags['count'] = count
kwags['vars'] = self.varname
self.values[location] = self.ds.get_json_data_in_pandas(debug=debug, **kwags)
class variables:
def __init__(self, input_variables, scope, ds, debug=False):
for i in input_variables:
varname = i['variableKey']
setattr(self, varname.replace('-',''), variable(varname, input_variables, scope, ds))
| mit |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/sklearn/datasets/tests/test_svmlight_format.py | 228 | 11221 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| mit |
oemof/examples | oemof_examples/oemof.solph/v0.2.x/generic_chp/bpt.py | 2 | 2981 | # -*- coding: utf-8 -*-
"""
General description
-------------------
Example that illustrates how to use custom component `GenericCHP` can be used.
In this case it is used to model a back pressure turbine.
Installation requirements
-------------------------
This example requires the latest version of oemof. Install by:
pip install oemof
"""
import os
import pandas as pd
import oemof.solph as solph
from oemof.network import Node
from oemof.outputlib import processing, views
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
# read sequence data
full_filename = os.path.join(os.path.dirname(__file__), 'generic_chp.csv')
data = pd.read_csv(full_filename, sep=",")
# select periods
periods = len(data)-1
# create an energy system
idx = pd.date_range('1/1/2017', periods=periods, freq='H')
es = solph.EnergySystem(timeindex=idx)
Node.registry = es
# resources
bgas = solph.Bus(label='bgas')
rgas = solph.Source(label='rgas', outputs={bgas: solph.Flow()})
# heat
bth = solph.Bus(label='bth')
# dummy source at high costs that serves the residual load
source_th = solph.Source(label='source_th',
outputs={bth: solph.Flow(variable_costs=1000)})
demand_th = solph.Sink(label='demand_th', inputs={bth: solph.Flow(fixed=True,
actual_value=data['demand_th'], nominal_value=200)})
# power
bel = solph.Bus(label='bel')
demand_el = solph.Sink(label='demand_el', inputs={bel: solph.Flow(
variable_costs=data['price_el'])})
# back pressure turbine with same parameters as btp
# (for back pressure characteristics Q_CW_min=0 and back_pressure=True)
bpt = solph.components.GenericCHP(
label='back_pressure_turbine',
fuel_input={bgas: solph.Flow(
H_L_FG_share_max=[0.19 for p in range(0, periods)])},
electrical_output={bel: solph.Flow(
P_max_woDH=[200 for p in range(0, periods)],
P_min_woDH=[80 for p in range(0, periods)],
Eta_el_max_woDH=[0.53 for p in range(0, periods)],
Eta_el_min_woDH=[0.43 for p in range(0, periods)])},
heat_output={bth: solph.Flow(
Q_CW_min=[0 for p in range(0, periods)])},
Beta=[0. for p in range(0, periods)],
back_pressure=True)
# create an optimization problem and solve it
om = solph.Model(es)
# debugging
# om.write('generic_chp.lp', io_options={'symbolic_solver_labels': True})
# solve model
om.solve(solver='cbc', solve_kwargs={'tee': True})
# create result object
results = processing.results(om)
# plot data
if plt is not None:
# plot PQ diagram from component results
data = results[(bpt, None)]['sequences']
ax = data.plot(kind='scatter', x='Q', y='P', grid=True)
ax.set_xlabel('Q (MW)')
ax.set_ylabel('P (MW)')
plt.show()
# plot thermal bus
data = views.node(results, 'bth')['sequences']
ax = data.plot(kind='line', drawstyle='steps-post', grid=True)
ax.set_xlabel('Time (h)')
ax.set_ylabel('Q (MW)')
plt.show()
| gpl-3.0 |
phobson/pycvc | pycvc/tests/info_tests.py | 2 | 1388 | import nose.tools as nt
import pandas.util.testing as pdtest
import pandas
from pycvc import info
def test_constants():
nt.assert_equal(info.LITERS_PER_CUBICMETER, 1000)
nt.assert_equal(info.MICROGRAMS_PER_GRAM, 1000000)
nt.assert_equal(info.MILLIGRAMS_PER_GRAM, 1000)
def test_POC_dicts():
for poc in info.POC_dicts:
expected_keys = sorted([
'cvcname', 'bmpname', 'nsqdname',
'conc_units', 'load_units', 'load_factor',
'group', 'include'
])
keys = sorted(list(poc.keys()))
nt.assert_list_equal(keys, expected_keys)
nt.assert_true(poc['group'] in ['A', 'B'])
nt.assert_true(poc['include'] in [True, False])
nt.assert_true(poc['conc_units']['plain'] in ['ug/L', 'mg/L', 'CFU/100 mL'])
def test_getPOCs():
nt.assert_true(isinstance(info.getPOCs(), list))
def test_getPOCInfo():
nt.assert_equal(
info.getPOCInfo('nsqdname', 'Copper', 'cvcname'),
'Copper (Cu)'
)
@nt.raises(ValueError)
def test_getPOCInfo_non_unique_result():
info.getPOCInfo('group', 'A', 'cvcname')
def test_wqstd_template():
std = info.wqstd_template()
nt.assert_list_equal(std.columns.tolist(), ['parameter', 'units', 'season', 'influent median'])
expected_shape = (16*4, 4) #(POCs x seasons, cols)
nt.assert_tuple_equal(std.shape, expected_shape)
| bsd-3-clause |
hrjn/scikit-learn | examples/applications/plot_out_of_core_classification.py | 51 | 13651 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <[email protected]>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
# --------------------------------
#
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
# ----
#
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batches of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
# ------------
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = ['b', 'g', 'r', 'c', 'm', 'y']
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
ryfeus/lambda-packs | Tensorflow_LightGBM_Scipy_nightly/source/scipy/stats/_discrete_distns.py | 5 | 22392 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy import special
from scipy.special import entr, logsumexp, betaln, gammaln as gamln
from scipy._lib._numpy_compat import broadcast_to
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
from ._distn_infrastructure import (
rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf, get_distribution_names)
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is::
binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
for ``k`` in ``{0, 1,..., n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.binomial(n, p, self._size)
def _argcheck(self, n, p):
self.b = n
return (n >= 0) & (p >= 0) & (p <= 1)
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k, n, p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q, n, p))
vals1 = np.maximum(vals - 1, 0)
temp = special.bdtr(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p, moments='mv'):
q = 1.0 - p
mu = n * p
var = n * p * q
g1, g2 = None, None
if 's' in moments:
g1 = (q - p) / sqrt(var)
if 'k' in moments:
g2 = (1.0 - 6*p*q) / var
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is::
bernoulli.pmf(k) = 1-p if k = 0
= p if k = 1
for ``k`` in ``{0, 1}``.
`bernoulli` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
return entr(p) + entr(1-p)
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
Negative binomial distribution describes a sequence of i.i.d. Bernoulli
trials, repeated until a predefined, non-random number of successes occurs.
The probability mass function of the number of failures for `nbinom` is::
nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
for ``k >= 0``.
`nbinom` takes ``n`` and ``p`` as shape parameters where n is the number of
successes, whereas p is the probability of a single success.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n > 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
# skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q, n, p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is::
geom.pmf(k) = (1-p)**(k-1)*p
for ``k >= 1``.
`geom` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return self._random_state.geometric(p, size=self._size)
def _argcheck(self, p):
return (p <= 1) & (p >= 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return special.xlog1py(k - 1, -p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log(1.0-q)/log(1-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
r"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
`M` is the total number of objects, `n` is total number of Type I objects.
The random variate represents the number of Type I objects in `N` drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The symbols used to denote the shape parameters (`M`, `n`, and `N`) are not
universally accepted. See the Examples for a clarification of the
definitions used here.
The probability mass function is defined as,
.. math:: p(k, M, n, N) = \frac{\binom{n}{k} \binom{M - n}{N - k}}{\binom{M}{N}}
for :math:`k \in [\max(0, N - M + n), \min(n, N)]`, where the binomial
coefficients are defined as,
.. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
%(after_notes)s
Examples
--------
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return self._random_state.hypergeometric(n, M-n, N, size=self._size)
def _argcheck(self, M, n, N):
cond = (M > 0) & (n >= 0) & (N >= 0)
cond &= (n <= M) & (N <= M)
self.a = np.maximum(N-(M-n), 0)
self.b = np.minimum(n, N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return betaln(good+1, 1) + betaln(bad+1,1) + betaln(tot-N+1, N+1)\
- betaln(k+1, good-k+1) - betaln(N-k+1,bad-N+k+1)\
- betaln(tot+1, 1)
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
# tot, good, sample_size = M, n, N
# "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n')
M, n, N = 1.*M, 1.*n, 1.*N
m = M - n
p = n/M
mu = N*p
var = m*n*N*(M - N)*1.0/(M*M*(M-1))
g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))
g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m
g2 *= (M-1)*M*M
g2 += 6.*n*N*(M-N)*m*(5.*M-6)
g2 /= n * N * (M-N) * m * (M-2.) * (M-3.)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = np.r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
return np.sum(entr(vals), axis=0)
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual
# integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
def _logsf(self, k, M, n, N):
"""
More precise calculation than log(sf)
"""
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Integration over probability mass function using logsumexp
k2 = np.arange(quant + 1, draw + 1)
res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is::
logser.pmf(k) = - p**k / (k*log(1-p))
for ``k >= 1``.
`logser` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return self._random_state.logseries(p, size=self._size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
return -np.power(p, k) * 1.0 / k / special.log1p(-p)
def _stats(self, p):
r = special.log1p(-p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is::
poisson.pmf(k) = exp(-mu) * mu**k / k!
for ``k >= 0``.
`poisson` takes ``mu`` as shape parameter.
%(after_notes)s
%(example)s
"""
# Override rv_discrete._argcheck to allow mu=0.
def _argcheck(self, mu):
return mu >= 0
def _rvs(self, mu):
return self._random_state.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = special.xlogy(k, mu) - gamln(k + 1) - mu
return Pk
def _pmf(self, k, mu):
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k, mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k, mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q, mu))
vals1 = np.maximum(vals - 1, 0)
temp = special.pdtr(vals1, mu)
return np.where(temp >= q, vals1, vals)
def _stats(self, mu):
var = mu
tmp = np.asarray(mu)
mu_nonzero = tmp > 0
g1 = _lazywhere(mu_nonzero, (tmp,), lambda x: sqrt(1.0/x), np.inf)
g2 = _lazywhere(mu_nonzero, (tmp,), lambda x: 1.0/x, np.inf)
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is::
planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k)
for ``k*lambda_ >= 0``.
`planck` takes ``lambda_`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lambda_):
self.a = np.where(lambda_ > 0, 0, -np.inf)
self.b = np.where(lambda_ > 0, np.inf, 0)
return lambda_ != 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _sf(self, x, lambda_):
return np.exp(self._logsf(x, lambda_))
def _logsf(self, x, lambda_):
k = floor(x)
return -lambda_*(k+1)
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is::
boltzmann.pmf(k) = (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
for ``k = 0,..., N-1``.
`boltzmann` takes ``lambda_`` and ``N`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is::
randint.pmf(k) = 1./(high - low)
for ``k = low, ..., high - 1``.
`randint` takes ``low`` and ``high`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, low, high):
self.a = low
self.b = high - 1
return (high > low)
def _pmf(self, k, low, high):
p = np.ones_like(k) / (high - low)
return np.where((k >= low) & (k < high), p, 0.)
def _cdf(self, x, low, high):
k = floor(x)
return (k - low + 1.) / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q * (high - low) + low) - 1
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = np.asarray(high), np.asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d*d - 1) / 12.0
g1 = 0.0
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
return mu, var, g1, g2
def _rvs(self, low, high):
"""An array of *size* random integers >= ``low`` and < ``high``."""
if self._size is not None:
# Numpy's RandomState.randint() doesn't broadcast its arguments.
# Use `broadcast_to()` to extend the shapes of low and high
# up to self._size. Then we can use the numpy.vectorize'd
# randint without needing to pass it a `size` argument.
low = broadcast_to(low, self._size)
high = broadcast_to(high, self._size)
randint = np.vectorize(self._random_state.randint, otypes=[np.int_])
return randint(low, high)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint', longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is::
zipf.pmf(k, a) = 1/(zeta(a) * k**a)
for ``k >= 1``.
`zipf` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / special.zeta(a, 1) / k**a
return Pk
def _munp(self, n, a):
return _lazywhere(
a > n + 1, (a, n),
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
np.inf)
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is::
dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
for ``a > 0``.
`dlaplace` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, a):
return tanh(a/2.0) * exp(-a * abs(k))
def _cdf(self, x, a):
k = floor(x)
f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)
f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = 1 + exp(a)
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1,
-log((1-q) * const) / a))
vals1 = vals - 1
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-np.inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with
parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and
``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then ``rho = 0``.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes ``mu1`` and ``mu2`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return (self._random_state.poisson(mu1, n) -
self._random_state.poisson(mu2, n))
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0,
_ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
_ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = floor(x)
px = np.where(x < 0,
_ncx2_cdf(2*mu2, -2*x, 2*mu1),
1-_ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
__all__ = _distn_names + _distn_gen_names
| mit |
chenyyx/scikit-learn-doc-zh | examples/zh/applications/plot_tomography_l1_reconstruction.py | 27 | 5478 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x].astype(np.float64)
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2.) ** 2 + (y - l / 2.) ** 2 < (l / 2.) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return np.logical_xor(res, ndimage.binary_erosion(res))
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| gpl-3.0 |
stkubr/zipline | zipline/examples/dual_ema_talib.py | 3 | 3149 | #!/usr/bin/env python
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dual Moving Average Crossover algorithm.
This algorithm buys apple once its short moving average crosses
its long moving average (indicating upwards momentum) and sells
its shares once the averages cross again (indicating downwards
momentum).
"""
# Import exponential moving average from talib wrapper
from zipline.transforms.ta import EMA
def initialize(context):
context.security = symbol('AAPL')
# Add 2 mavg transforms, one with a long window, one with a short window.
context.short_ema_trans = EMA(timeperiod=20)
context.long_ema_trans = EMA(timeperiod=40)
# To keep track of whether we invested in the stock or not
context.invested = False
def handle_data(context, data):
short_ema = context.short_ema_trans.handle_data(data)
long_ema = context.long_ema_trans.handle_data(data)
if short_ema is None or long_ema is None:
return
buy = False
sell = False
if (short_ema > long_ema).all() and not context.invested:
order(context.security, 100)
context.invested = True
buy = True
elif (short_ema < long_ema).all() and context.invested:
order(context.security, -100)
context.invested = False
sell = True
record(AAPL=data[context.security].price,
short_ema=short_ema[context.security],
long_ema=long_ema[context.security],
buy=buy,
sell=sell)
if __name__ == '__main__':
from datetime import datetime
import matplotlib.pyplot as plt
import pytz
from zipline.algorithm import TradingAlgorithm
from zipline.api import order, record, symbol
from zipline.utils.factory import load_from_yahoo
start = datetime(2014, 1, 1, 0, 0, 0, 0, pytz.utc)
end = datetime(2014, 11, 1, 0, 0, 0, 0, pytz.utc)
data = load_from_yahoo(stocks=['AAPL'], indexes={}, start=start,
end=end)
algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data)
results = algo.run(data).dropna()
fig = plt.figure()
ax1 = fig.add_subplot(211, ylabel='portfolio value')
results.portfolio_value.plot(ax=ax1)
ax2 = fig.add_subplot(212)
results[['AAPL', 'short_ema', 'long_ema']].plot(ax=ax2)
ax2.plot(results.ix[results.buy].index, results.short_ema[results.buy],
'^', markersize=10, color='m')
ax2.plot(results.ix[results.sell].index, results.short_ema[results.sell],
'v', markersize=10, color='k')
plt.legend(loc=0)
plt.gcf().set_size_inches(18, 8)
plt.show()
| apache-2.0 |
sanketloke/scikit-learn | examples/ensemble/plot_feature_transformation.py | 115 | 4327 | """
===============================================
Feature transformations with ensembles of trees
===============================================
Transform your features into a higher dimensional, sparse space. Then
train a linear model on these features.
First fit an ensemble of trees (totally random trees, a random
forest, or gradient boosted trees) on the training set. Then each leaf
of each tree in the ensemble is assigned a fixed arbitrary feature
index in a new feature space. These leaf indices are then encoded in a
one-hot fashion.
Each sample goes through the decisions of each tree of the ensemble
and ends up in one leaf per tree. The sample is encoded by setting
feature values for these leaves to 1 and the other feature values to 0.
The resulting transformer has then learned a supervised, sparse,
high-dimensional categorical embedding of the data.
"""
# Author: Tim Head <[email protected]>
#
# License: BSD 3 clause
import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,
GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.pipeline import make_pipeline
n_estimator = 10
X, y = make_classification(n_samples=80000)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# It is important to train the ensemble of trees on a different subset
# of the training data than the linear regression model to avoid
# overfitting, in particular if the total number of leaves is
# similar to the number of training samples
X_train, X_train_lr, y_train, y_train_lr = train_test_split(X_train,
y_train,
test_size=0.5)
# Unsupervised transformation based on totally random trees
rt = RandomTreesEmbedding(max_depth=3, n_estimators=n_estimator,
random_state=0)
rt_lm = LogisticRegression()
pipeline = make_pipeline(rt, rt_lm)
pipeline.fit(X_train, y_train)
y_pred_rt = pipeline.predict_proba(X_test)[:, 1]
fpr_rt_lm, tpr_rt_lm, _ = roc_curve(y_test, y_pred_rt)
# Supervised transformation based on random forests
rf = RandomForestClassifier(max_depth=3, n_estimators=n_estimator)
rf_enc = OneHotEncoder()
rf_lm = LogisticRegression()
rf.fit(X_train, y_train)
rf_enc.fit(rf.apply(X_train))
rf_lm.fit(rf_enc.transform(rf.apply(X_train_lr)), y_train_lr)
y_pred_rf_lm = rf_lm.predict_proba(rf_enc.transform(rf.apply(X_test)))[:, 1]
fpr_rf_lm, tpr_rf_lm, _ = roc_curve(y_test, y_pred_rf_lm)
grd = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc = OneHotEncoder()
grd_lm = LogisticRegression()
grd.fit(X_train, y_train)
grd_enc.fit(grd.apply(X_train)[:, :, 0])
grd_lm.fit(grd_enc.transform(grd.apply(X_train_lr)[:, :, 0]), y_train_lr)
y_pred_grd_lm = grd_lm.predict_proba(
grd_enc.transform(grd.apply(X_test)[:, :, 0]))[:, 1]
fpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred_grd_lm)
# The gradient boosted model by itself
y_pred_grd = grd.predict_proba(X_test)[:, 1]
fpr_grd, tpr_grd, _ = roc_curve(y_test, y_pred_grd)
# The random forest model by itself
y_pred_rf = rf.predict_proba(X_test)[:, 1]
fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
plt.figure(2)
plt.xlim(0, 0.2)
plt.ylim(0.8, 1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve (zoomed in at top left)')
plt.legend(loc='best')
plt.show()
| bsd-3-clause |
deepfield/ibis | ibis/__init__.py | 1 | 4468 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
import sys
from multipledispatch import halt_ordering, restart_ordering
import ibis.config_init
import ibis.util as util
import ibis.expr.api as api
import ibis.expr.types as ir
from ibis.config import options
from ibis.common import IbisError
from ibis.compat import suppress
from ibis.filesystems import HDFS, WebHDFS
# __all__ is defined
from ibis.expr.api import *
# speeds up signature registration
halt_ordering()
# pandas backend is mandatory
import ibis.pandas.api as pandas
with suppress(ImportError):
# pip install ibis-framework[csv]
import ibis.file.csv as csv
with suppress(ImportError):
# pip install ibis-framework[parquet]
import ibis.file.parquet as parquet
with suppress(ImportError):
# pip install ibis-framework[hdf5]
import ibis.file.hdf5 as hdf5
with suppress(ImportError):
# pip install ibis-framework[impala]
import ibis.impala.api as impala
with suppress(ImportError):
# pip install ibis-framework[sqlite]
import ibis.sql.sqlite.api as sqlite
with suppress(ImportError):
# pip install ibis-framework[postgres]
import ibis.sql.postgres.api as postgres
with suppress(ImportError):
# pip install ibis-framework[mysql]
import ibis.sql.mysql.api as mysql
with suppress(ImportError):
# pip install ibis-framework[clickhouse]
import ibis.clickhouse.api as clickhouse
with suppress(ImportError):
# pip install ibis-framework[bigquery]
import ibis.bigquery.api as bigquery
with suppress(ImportError):
# pip install ibis-framework[mapd]
if sys.version_info.major < 3:
raise ImportError('The MapD backend is not supported under Python 2.')
import ibis.mapd.api as mapd
restart_ordering()
def hdfs_connect(host='localhost', port=50070, protocol='webhdfs',
use_https='default', auth_mechanism='NOSASL',
verify=True, **kwds):
"""
Connect to HDFS
Parameters
----------
host : string, Host name of the HDFS NameNode
port : int, NameNode's WebHDFS port (default 50070)
protocol : {'webhdfs'}
use_https : boolean, default 'default'
Connect to WebHDFS with HTTPS, otherwise plain HTTP. For secure
authentication, the default for this is True, otherwise False
auth_mechanism : string, Set to NOSASL or PLAIN for non-secure clusters.
Set to GSSAPI or LDAP for Kerberos-secured clusters.
verify : boolean, Set to False to turn off verifying SSL certificates.
(default True)
Other keywords are forwarded to hdfs library classes
Returns
-------
client : WebHDFS
"""
import requests
session = kwds.setdefault('session', requests.Session())
session.verify = verify
if auth_mechanism in ['GSSAPI', 'LDAP']:
if use_https == 'default':
prefix = 'https'
else:
prefix = 'https' if use_https else 'http'
try:
import requests_kerberos
except ImportError:
raise IbisError(
"Unable to import requests-kerberos, which is required for "
"Kerberos HDFS support. Install it by executing `pip install "
"requests-kerberos` or `pip install hdfs[kerberos]`.")
from hdfs.ext.kerberos import KerberosClient
# note SSL
url = '{0}://{1}:{2}'.format(prefix, host, port)
kwds.setdefault('mutual_auth', 'OPTIONAL')
hdfs_client = KerberosClient(url, **kwds)
else:
if use_https == 'default':
prefix = 'http'
else:
prefix = 'https' if use_https else 'http'
from hdfs.client import InsecureClient
url = '{0}://{1}:{2}'.format(prefix, host, port)
hdfs_client = InsecureClient(url, **kwds)
return WebHDFS(hdfs_client)
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| apache-2.0 |
numeristical/introspective | docs/conf.py | 1 | 10191 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# ML Insights documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 9 13:32:07 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../ml_insights'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
import sphinx_rtd_theme
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'ML Insights'
copyright = '2020, Brian Lucena and Ramesh Sampath'
author = 'Brian Lucena and Ramesh Sampath'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'ML Insights v0.0.2'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'MLInsightsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MLInsights.tex', 'ML Insights Documentation',
'Brian Lucena and Ramesh Sampath', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mlinsights', 'ML Insights Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# mock imports
autodoc_mock_imports = ["sklearn"]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MLInsights', 'ML Insights Documentation',
author, 'MLInsights', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| mit |
Event38/MissionPlanner | Lib/site-packages/numpy/lib/polynomial.py | 58 | 35930 | """
Functions to operate on polynomials.
"""
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import isscalar, abs, finfo, atleast_1d, hstack
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag
from numpy.linalg import eigvals, lstsq
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
pass
else:
raise ValueError, "input must be 1d or square 2d array."
if len(seq_of_zeros) == 0:
return 1.0
a = [1]
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, [1, -seq_of_zeros[k]], mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0,roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like of shape(M,)
Rank-1 array of polynomial co-efficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError:
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with
a given sequence of roots.
polyval : Evaluate a polynomial at a point.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] Wikipedia, "Companion matrix",
http://en.wikipedia.org/wiki/Companion_matrix
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError,"Input must be a rank-1 array."
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0, :] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : {array_like, poly1d}
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : {None, list of `m` scalars, scalar}, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError, "Order of integral must be positive (see polyder)"
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError, \
"k must be a scalar or a rank-1 array of length 1 or >m."
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError, "Order of derivative must be positive (see polyint)"
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than this
relative to the largest singular value will be ignored. The default
value is len(x)*eps, where eps is the relative precision of the float
type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is
False (the default) just the coefficients are returned, when True
diagnostic information from the singular value decomposition is also
returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first.
If `y` was 2-D, the coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond : present only if `full` = True
Residuals of the least-squares fit, the effective rank of the scaled
Vandermonde coefficient matrix, its singular values, and the specified
value of `rcond`. For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Computes polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[n] + ... + x[0] * p[1] + p[0] = y[0]
x[1]**n * p[n] + ... + x[1] * p[1] + p[0] = y[1]
...
x[k]**n * p[n] + ... + x[k] * p[1] + p[0] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError, "expected deg >= 0"
if x.ndim != 1:
raise TypeError, "expected 1D vector for x"
if x.size == 0:
raise TypeError, "expected non-empty vector for x"
if y.ndim < 1 or y.ndim > 2 :
raise TypeError, "expected 1D or 2D array for y"
if x.shape[0] != y.shape[0] :
raise TypeError, "expected x and y to have same length"
# set rcond
if rcond is None :
rcond = len(x)*finfo(x.dtype).eps
# scale x to improve condition number
scale = abs(x).max()
if scale != 0 :
x /= scale
# solve least squares equation for powers of x
v = vander(x, order)
c, resids, rank, s = lstsq(v, y, rcond)
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
# scale returned coefficients
if scale != 0 :
if c.ndim == 1 :
c /= vander([scale], order)[0]
else :
c /= vander([scale], order).T
if full :
return c, resids, rank, s, rcond
else :
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, a 1D array of numbers, or an instance of poly1d, "at"
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = x * y + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print p1
1 x + 2
>>> print p2
2
9 x + 5 x + 4
>>> print np.polyadd(p1, p2)
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print p1
2
1 x + 2 x + 3
>>> print p2
2
9 x + 5 x + 1
>>> print np.polymul(p1, p2)
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1,a2 = poly1d(a1),poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while 1:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2)+len(toadd2) > wrap) or \
(len(line1)+len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print np.poly1d(p)
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print p
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError, "Polynomial must be 1d only."
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError, "Power to non-negative integers only."
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
return NX.alltrue(self.coeffs == other.coeffs)
def __ne__(self, other):
return NX.any(self.coeffs != other.coeffs)
def __setattr__(self, key, val):
raise ValueError, "Attributes cannot be changed this way."
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c','coef','coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError("'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError, "Does not support negative powers."
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always',RankWarning)
| gpl-3.0 |
murali-munna/scikit-learn | sklearn/neighbors/regression.py | 106 | 10572 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
DanielTakeshi/rl_algorithms | bc/plot_bc.py | 1 | 7597 | """
(c) April 2017 by Daniel Seita
Code for plotting behavioral cloning. No need to use command line arguments,
just run `python plot_bc.py`. Easy! Right now it generates two figures per
environment, one with validation set losses and the other with returns. The
latter is probably more interesting.
"""
import argparse
import gym
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import sys
np.set_printoptions(edgeitems=100, linewidth=100, suppress=True)
# Some matplotlib settings.
plt.style.use('seaborn-darkgrid')
error_region_alpha = 0.25
LOGDIR = 'logs/'
FIGDIR = 'figures/'
title_size = 22
tick_size = 17
legend_size = 17
ysize = 18
xsize = 18
lw = 3
ms = 8
colors = ['red', 'blue', 'yellow', 'black']
def plot_bc_modern(edir):
""" Plot the results for this particular environment. """
subdirs = os.listdir(LOGDIR+edir)
print("plotting subdirs {}".format(subdirs))
# Make it easy to count how many of each numrollouts we have.
R_TO_COUNT = {'4':0, '11':0, '18':0, '25':0}
R_TO_IJ = {'4':(0,2), '11':(1,0), '18':(1,1), '25':(1,2)}
fig,axarr = plt.subplots(2, 3, figsize=(24,15))
axarr[0,2].set_title(edir+", Returns, 4 Rollouts", fontsize=title_size)
axarr[1,0].set_title(edir+", Returns, 11 Rollouts", fontsize=title_size)
axarr[1,1].set_title(edir+", Returns, 18 Rollouts", fontsize=title_size)
axarr[1,2].set_title(edir+", Returns, 25 Rollouts", fontsize=title_size)
# Don't forget to plot the expert performance!
exp04 = np.mean(np.load("expert_data/"+edir+"_004.npy")[()]['returns'])
exp11 = np.mean(np.load("expert_data/"+edir+"_011.npy")[()]['returns'])
exp18 = np.mean(np.load("expert_data/"+edir+"_018.npy")[()]['returns'])
axarr[0,2].axhline(y=exp04, color='brown', lw=lw, linestyle='--', label='expert')
axarr[1,0].axhline(y=exp11, color='brown', lw=lw, linestyle='--', label='expert')
axarr[1,1].axhline(y=exp18, color='brown', lw=lw, linestyle='--', label='expert')
if 'Reacher' not in edir:
exp25 = np.mean(np.load("expert_data/"+edir+"_025.npy")[()]['returns'])
axarr[1,2].axhline(y=exp25, color='brown', lw=lw, linestyle='--', label='expert')
for dd in subdirs:
ddsplit = dd.split("_") # `dd` is of the form `numroll_X_seed_Y`
numroll, seed = ddsplit[1], ddsplit[3]
xcoord = np.load(LOGDIR+edir+"/"+dd+"/iters.npy")
tr_loss = np.load(LOGDIR+edir+"/"+dd+"/tr_loss.npy")
val_loss = np.load(LOGDIR+edir+"/"+dd+"/val_loss.npy")
returns = np.load(LOGDIR+edir+"/"+dd+"/returns.npy")
mean_ret = np.mean(returns, axis=1)
std_ret = np.std(returns, axis=1)
# Playing with dictionaries
ijcoord = R_TO_IJ[numroll]
cc = colors[ R_TO_COUNT[numroll] ]
R_TO_COUNT[numroll] += 1
axarr[ijcoord].plot(xcoord, mean_ret, lw=lw, color=cc, label=dd)
axarr[ijcoord].fill_between(xcoord,
mean_ret-std_ret,
mean_ret+std_ret,
alpha=error_region_alpha,
facecolor=cc)
# Cram the training and validation losses on these subplots.
axarr[0,0].plot(xcoord, tr_loss, lw=lw, label=dd)
axarr[0,1].plot(xcoord, val_loss, lw=lw, label=dd)
boring_stuff(axarr, edir)
plt.tight_layout()
plt.savefig(FIGDIR+edir+".png")
def plot_bc_humanoid(edir):
""" Plots humanoid. The argument here is kind of redundant... also, I guess
we'll have to ignore one of the plots here since Humanoid will have 5
subplots. Yeah, it's a bit awkward.
"""
assert edir == "Humanoid-v1"
subdirs = os.listdir(LOGDIR+edir)
print("plotting subdirs {}".format(subdirs))
# Make it easy to count how many of each numrollouts we have.
R_TO_COUNT = {'80':0, '160':0, '240':0}
R_TO_IJ = {'80':(1,0), '160':(1,1), '240':(1,2)}
fig,axarr = plt.subplots(2, 3, figsize=(24,15))
axarr[0,2].set_title("Empty Plot", fontsize=title_size)
axarr[1,0].set_title(edir+", Returns, 80 Rollouts", fontsize=title_size)
axarr[1,1].set_title(edir+", Returns, 160 Rollouts", fontsize=title_size)
axarr[1,2].set_title(edir+", Returns, 240 Rollouts", fontsize=title_size)
# Plot expert performance (um, this takes a while...).
exp080 = np.mean(np.load("expert_data/"+edir+"_080.npy")[()]['returns'])
exp160 = np.mean(np.load("expert_data/"+edir+"_160.npy")[()]['returns'])
exp240 = np.mean(np.load("expert_data/"+edir+"_240.npy")[()]['returns'])
axarr[1,0].axhline(y=exp080, color='brown', lw=lw, linestyle='--', label='expert')
axarr[1,1].axhline(y=exp160, color='brown', lw=lw, linestyle='--', label='expert')
axarr[1,2].axhline(y=exp240, color='brown', lw=lw, linestyle='--', label='expert')
for dd in subdirs:
ddsplit = dd.split("_") # `dd` is of the form `numroll_X_seed_Y`
numroll, seed = ddsplit[1], ddsplit[3]
xcoord = np.load(LOGDIR+edir+"/"+dd+"/iters.npy")
tr_loss = np.load(LOGDIR+edir+"/"+dd+"/tr_loss.npy")
val_loss = np.load(LOGDIR+edir+"/"+dd+"/val_loss.npy")
returns = np.load(LOGDIR+edir+"/"+dd+"/returns.npy")
mean_ret = np.mean(returns, axis=1)
std_ret = np.std(returns, axis=1)
# Playing with dictionaries
ijcoord = R_TO_IJ[numroll]
cc = colors[ R_TO_COUNT[numroll] ]
R_TO_COUNT[numroll] += 1
axarr[ijcoord].plot(xcoord, mean_ret, lw=lw, color=cc, label=dd)
axarr[ijcoord].fill_between(xcoord,
mean_ret-std_ret,
mean_ret+std_ret,
alpha=error_region_alpha,
facecolor=cc)
# Cram the training and validation losses on these subplots.
axarr[0,0].plot(xcoord, tr_loss, lw=lw, label=dd)
axarr[0,1].plot(xcoord, val_loss, lw=lw, label=dd)
boring_stuff(axarr, edir)
plt.tight_layout()
plt.savefig(FIGDIR+edir+".png")
def boring_stuff(axarr, edir):
""" Axes, titles, legends, etc. Yeah yeah ... """
for i in range(2):
for j in range(3):
if i == 0 and j == 0:
axarr[i,j].set_ylabel("Loss Training MBs", fontsize=ysize)
if i == 0 and j == 1:
axarr[i,j].set_ylabel("Loss Validation Set", fontsize=ysize)
else:
axarr[i,j].set_ylabel("Average Return", fontsize=ysize)
axarr[i,j].set_xlabel("Training Minibatches", fontsize=xsize)
axarr[i,j].tick_params(axis='x', labelsize=tick_size)
axarr[i,j].tick_params(axis='y', labelsize=tick_size)
axarr[i,j].legend(loc="best", prop={'size':legend_size})
axarr[i,j].legend(loc="best", prop={'size':legend_size})
axarr[0,0].set_title(edir+", Training Losses", fontsize=title_size)
axarr[0,1].set_title(edir+", Validation Losses", fontsize=title_size)
axarr[0,0].set_yscale('log')
axarr[0,1].set_yscale('log')
def plot_bc(e):
""" Split into cases. It makes things easier for me. """
env_to_method = {'Ant-v1': plot_bc_modern,
'HalfCheetah-v1': plot_bc_modern,
'Hopper-v1': plot_bc_modern,
'Walker2d-v1': plot_bc_modern,
'Reacher-v1': plot_bc_modern,
'Humanoid-v1': plot_bc_humanoid}
env_to_method[e](e)
if __name__ == "__main__":
env_dirs = [e for e in os.listdir(LOGDIR) if "text" not in e]
print("Plotting with one figure per env_dirs = {}".format(env_dirs))
for e in env_dirs:
plot_bc(e)
| mit |
GraphProcessor/CommunityDetectionCodes | Prensentation/algorithms/clique_percolation/conradlee_clique_percolation.py | 1 | 4027 | from collections import defaultdict
import networkx as nx
from networkx.drawing.nx_agraph import graphviz_layout
import matplotlib.pyplot as plt
def get_percolated_cliques(G, k):
def get_percolation_graph():
percolation_graph = nx.Graph()
cliques = [frozenset(c) for c in nx.find_cliques(G) if len(c) >= k]
print 'first max cliques:', cliques
percolation_graph.add_nodes_from(cliques)
# First index which nodes are in which cliques
membership_dict = defaultdict(list)
for clique in cliques:
for node in clique:
membership_dict[node].append(clique)
# For each clique, see which adjacent cliques percolate
for clique in cliques:
def get_adjacent_cliques(clique, membership_dict):
adjacent_cliques = set()
for n in clique:
for adj_clique in membership_dict[n]:
if clique != adj_clique:
adjacent_cliques.add(adj_clique)
return adjacent_cliques
for adj_clique in get_adjacent_cliques(clique, membership_dict):
if len(clique.intersection(adj_clique)) >= (k - 1):
percolation_graph.add_edge(clique, adj_clique)
print '\npercolation graph nodes:', percolation_graph.nodes()
print 'percolation graph edges:', percolation_graph.edges()
return percolation_graph
percolation_graph = get_percolation_graph()
pos = nx.circular_layout(percolation_graph)
comm_dict = {}
# nx.draw(percolation_graph, , with_labels=True, node_size=500)
#
# Connected components of clique graph with perc edges
# are the percolated cliques
for idx, component in enumerate(nx.connected_components(percolation_graph)):
print idx, component
comm_dict[idx] = list(component)
yield (frozenset.union(*component))
color_list = ['r', 'g', 'b', 'y']
# nodes
for comm_id in comm_dict:
nx.draw_networkx_nodes(percolation_graph, pos, nodelist=comm_dict[comm_id],
node_color=color_list[comm_id], node_size=3000, alpha=0.4, node_shape='h', )
node_label_dict = {}
for node in percolation_graph.nodes():
node_label_dict[node] = str(sorted(list(node)))
nx.draw_networkx_labels(percolation_graph, pos, labels=node_label_dict, font_size=20)
edge_label_dict = {}
for edge in percolation_graph.edges():
edge_label_dict[edge] = str(list(frozenset.intersection(edge[0], edge[1])))
plt.axis('off')
plt.savefig('./percolation_graph.pdf', bbox_inches='tight', pad_inches=0, transparent=True)
plt.savefig('./percolation_graph.png', bbox_inches='tight', pad_inches=0, transparent=True)
plt.show()
def draw_with_edges():
for comm_id in comm_dict:
nx.draw_networkx_nodes(percolation_graph, pos, nodelist=comm_dict[comm_id],
node_color=color_list[comm_id], node_size=3000, alpha=0.4, node_shape='h', )
node_label_dict = {}
for node in percolation_graph.nodes():
node_label_dict[node] = str(sorted(list(node)))
nx.draw_networkx_labels(percolation_graph, pos, labels=node_label_dict, font_size=20)
nx.draw_networkx_edges(percolation_graph, pos, width=2, edge_color='grey')
nx.draw_networkx_edge_labels(percolation_graph, pos, edge_labels=edge_label_dict, font_size=20)
plt.axis('off')
plt.savefig('./percolation_graph_with_edges.pdf', bbox_inches='tight', pad_inches=0, transparent=True)
plt.savefig('./percolation_graph_with_edges.png', bbox_inches='tight', pad_inches=0, transparent=True)
plt.show()
draw_with_edges()
if __name__ == '__main__':
# data_graph = nx.karate_club_graph()
data_graph = nx.read_edgelist('example_edge_list.txt', nodetype=int)
comm_list = list(get_percolated_cliques(data_graph, 3))
print '\ncomm list:', comm_list
| gpl-2.0 |
thomasaarholt/hyperspy | hyperspy/drawing/_markers/horizontal_line_segment.py | 4 | 3559 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from hyperspy.drawing.marker import MarkerBase
class HorizontalLineSegment(MarkerBase):
"""Horizontal line segment marker that can be added to the signal figure
Parameters
----------
x1 : array or float
The position of the start of the line segment in x.
If float, the marker is fixed.
If array, the marker will be updated when navigating. The array should
have the same dimensions in the navigation axes.
x2 : array or float
The position of the end of the line segment in x.
see x1 arguments
y : array or float
The position of line segment in y.
see x1 arguments
kwargs :
Keywords argument of axvline valid properties (i.e. recognized by
mpl.plot).
Example
-------
>>> im = hs.signals.Signal2D(np.zeros((100, 100)))
>>> m = hs.plot.markers.horizontal_line_segment(
>>> x1=20, x2=70, y=70, linewidth=4, color='red', linestyle='dotted')
>>> im.add_marker(m)
Adding a marker permanently to a signal
>>> im = hs.signals.Signal2D(np.zeros((100, 100)))
>>> m = hs.plot.markers.horizontal_line_segment(
>>> x1=10, x2=30, y=42, linewidth=4, color='red', linestyle='dotted')
>>> im.add_marker(m, permanent=True)
"""
def __init__(self, x1, x2, y, **kwargs):
MarkerBase.__init__(self)
lp = {'color': 'black', 'linewidth': 1}
self.marker_properties = lp
self.set_data(x1=x1, x2=x2, y1=y)
self.set_marker_properties(**kwargs)
self.name = 'horizontal_line_segment'
def __repr__(self):
string = "<marker.{}, {} (x1={},x2={},y={},color={})>".format(
self.__class__.__name__,
self.name,
self.get_data_position('x1'),
self.get_data_position('x2'),
self.get_data_position('y1'),
self.marker_properties['color'],
)
return(string)
def update(self):
if self.auto_update is False:
return
self._update_segment()
def _plot_marker(self):
self.marker = self.ax.vlines(0, 0, 1, **self.marker_properties)
self._update_segment()
def _update_segment(self):
segments = self.marker.get_segments()
segments[0][0, 1] = self.get_data_position('y1')
segments[0][1, 1] = segments[0][0, 1]
if self.get_data_position('x1') is None:
segments[0][0, 0] = plt.getp(self.marker.axes, 'xlim')[0]
else:
segments[0][0, 0] = self.get_data_position('x1')
if self.get_data_position('x2') is None:
segments[0][1, 0] = plt.getp(self.marker.axes, 'xlim')[1]
else:
segments[0][1, 0] = self.get_data_position('x2')
self.marker.set_segments(segments)
| gpl-3.0 |
teonlamont/mne-python | mne/label.py | 2 | 86926 | # Authors: Alexandre Gramfort <[email protected]>
# Martin Luessi <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
from collections import defaultdict
from colorsys import hsv_to_rgb, rgb_to_hsv
from os import path as op
import os
import copy as cp
import re
import numpy as np
from scipy import linalg, sparse
from .utils import get_subjects_dir, _check_subject, logger, verbose, warn,\
check_random_state
from .source_estimate import (morph_data, SourceEstimate, _center_of_mass,
spatial_src_connectivity)
from .source_space import add_source_space_distances
from .surface import read_surface, fast_cross_3d, mesh_edges, mesh_dist
from .source_space import SourceSpaces
from .parallel import parallel_func, check_n_jobs
from .stats.cluster_level import _find_clusters, _get_components
from .externals.six import b, string_types
from .externals.six.moves import zip, xrange
def _blend_colors(color_1, color_2):
"""Blend two colors in HSV space.
Parameters
----------
color_1, color_2 : None | tuple
RGBA tuples with values between 0 and 1. None if no color is available.
If both colors are None, the output is None. If only one is None, the
output is the other color.
Returns
-------
color : None | tuple
RGBA tuple of the combined color. Saturation, value and alpha are
averaged, whereas the new hue is determined as angle half way between
the two input colors' hues.
"""
if color_1 is None and color_2 is None:
return None
elif color_1 is None:
return color_2
elif color_2 is None:
return color_1
r_1, g_1, b_1, a_1 = color_1
h_1, s_1, v_1 = rgb_to_hsv(r_1, g_1, b_1)
r_2, g_2, b_2, a_2 = color_2
h_2, s_2, v_2 = rgb_to_hsv(r_2, g_2, b_2)
hue_diff = abs(h_1 - h_2)
if hue_diff < 0.5:
h = min(h_1, h_2) + hue_diff / 2.
else:
h = max(h_1, h_2) + (1. - hue_diff) / 2.
h %= 1.
s = (s_1 + s_2) / 2.
v = (v_1 + v_2) / 2.
r, g, b = hsv_to_rgb(h, s, v)
a = (a_1 + a_2) / 2.
color = (r, g, b, a)
return color
def _split_colors(color, n):
"""Create n colors in HSV space that occupy a gradient in value.
Parameters
----------
color : tuple
RGBA tuple with values between 0 and 1.
n : int >= 2
Number of colors on the gradient.
Returns
-------
colors : tuple of tuples, len = n
N RGBA tuples that occupy a gradient in value (low to high) but share
saturation and hue with the input color.
"""
r, g, b, a = color
h, s, v = rgb_to_hsv(r, g, b)
gradient_range = np.sqrt(n / 10.)
if v > 0.5:
v_max = min(0.95, v + gradient_range / 2)
v_min = max(0.05, v_max - gradient_range)
else:
v_min = max(0.05, v - gradient_range / 2)
v_max = min(0.95, v_min + gradient_range)
hsv_colors = ((h, s, v_) for v_ in np.linspace(v_min, v_max, n))
rgb_colors = (hsv_to_rgb(h_, s_, v_) for h_, s_, v_ in hsv_colors)
rgba_colors = ((r_, g_, b_, a,) for r_, g_, b_ in rgb_colors)
return tuple(rgba_colors)
def _n_colors(n, bytes_=False, cmap='hsv'):
"""Produce a list of n unique RGBA color tuples based on a colormap.
Parameters
----------
n : int
Number of colors.
bytes : bool
Return colors as integers values between 0 and 255 (instead of floats
between 0 and 1).
cmap : str
Which colormap to use.
Returns
-------
colors : array, shape (n, 4)
RGBA color values.
"""
n_max = 2 ** 10
if n > n_max:
raise NotImplementedError("Can't produce more than %i unique "
"colors" % n_max)
from matplotlib.cm import get_cmap
cm = get_cmap(cmap, n_max)
pos = np.linspace(0, 1, n, False)
colors = cm(pos, bytes=bytes_)
if bytes_:
# make sure colors are unique
for ii, c in enumerate(colors):
if np.any(np.all(colors[:ii] == c, 1)):
raise RuntimeError('Could not get %d unique colors from %s '
'colormap. Try using a different colormap.'
% (n, cmap))
return colors
class Label(object):
"""A FreeSurfer/MNE label with vertices restricted to one hemisphere.
Labels can be combined with the ``+`` operator:
* Duplicate vertices are removed.
* If duplicate vertices have conflicting position values, an error
is raised.
* Values of duplicate vertices are summed.
Parameters
----------
vertices : array (length N)
vertex indices (0 based).
pos : array (N by 3) | None
locations in meters. If None, then zeros are used.
values : array (length N) | None
values at the vertices. If None, then ones are used.
hemi : 'lh' | 'rh'
Hemisphere to which the label applies.
comment : str
Kept as information but not used by the object itself.
name : str
Kept as information but not used by the object itself.
filename : str
Kept as information but not used by the object itself.
subject : str | None
Name of the subject the label is from.
color : None | matplotlib color
Default label color and alpha (e.g., ``(1., 0., 0., 1.)`` for red).
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Attributes
----------
color : None | tuple
Default label color, represented as RGBA tuple with values between 0
and 1.
comment : str
Comment from the first line of the label file.
hemi : 'lh' | 'rh'
Hemisphere.
name : None | str
A name for the label. It is OK to change that attribute manually.
pos : array, shape = (n_pos, 3)
Locations in meters.
subject : str | None
Subject name. It is best practice to set this to the proper
value on initialization, but it can also be set manually.
values : array, len = n_pos
Values at the vertices.
verbose : bool, str, int, or None
See above.
vertices : array, len = n_pos
Vertex indices (0 based)
"""
@verbose
def __init__(self, vertices, pos=None, values=None, hemi=None, comment="",
name=None, filename=None, subject=None, color=None,
verbose=None): # noqa: D102
# check parameters
if not isinstance(hemi, string_types):
raise ValueError('hemi must be a string, not %s' % type(hemi))
vertices = np.asarray(vertices, int)
if np.any(np.diff(vertices.astype(int)) <= 0):
raise ValueError('Vertices must be ordered in increasing order.')
if color is not None:
from matplotlib.colors import colorConverter
color = colorConverter.to_rgba(color)
if values is None:
values = np.ones(len(vertices))
else:
values = np.asarray(values)
if pos is None:
pos = np.zeros((len(vertices), 3))
else:
pos = np.asarray(pos)
if not (len(vertices) == len(values) == len(pos)):
raise ValueError("vertices, values and pos need to have same "
"length (number of vertices)")
# name
if name is None and filename is not None:
name = op.basename(filename[:-6])
self.vertices = vertices
self.pos = pos
self.values = values
self.hemi = hemi
self.comment = comment
self.verbose = verbose
self.subject = _check_subject(None, subject, False)
self.color = color
self.name = name
self.filename = filename
def __setstate__(self, state): # noqa: D105
self.vertices = state['vertices']
self.pos = state['pos']
self.values = state['values']
self.hemi = state['hemi']
self.comment = state['comment']
self.verbose = state['verbose']
self.subject = state.get('subject', None)
self.color = state.get('color', None)
self.name = state['name']
self.filename = state['filename']
def __getstate__(self): # noqa: D105
out = dict(vertices=self.vertices,
pos=self.pos,
values=self.values,
hemi=self.hemi,
comment=self.comment,
verbose=self.verbose,
subject=self.subject,
color=self.color,
name=self.name,
filename=self.filename)
return out
def __repr__(self): # noqa: D105
name = 'unknown, ' if self.subject is None else self.subject + ', '
name += repr(self.name) if self.name is not None else "unnamed"
n_vert = len(self)
return "<Label | %s, %s : %i vertices>" % (name, self.hemi, n_vert)
def __len__(self):
"""Return the number of vertices."""
return len(self.vertices)
def __add__(self, other):
"""Add BiHemiLabels."""
if isinstance(other, BiHemiLabel):
return other + self
elif isinstance(other, Label):
if self.subject != other.subject:
raise ValueError('Label subject parameters must match, got '
'"%s" and "%s". Consider setting the '
'subject parameter on initialization, or '
'setting label.subject manually before '
'combining labels.' % (self.subject,
other.subject))
if self.hemi != other.hemi:
name = '%s + %s' % (self.name, other.name)
if self.hemi == 'lh':
lh, rh = self.copy(), other.copy()
else:
lh, rh = other.copy(), self.copy()
color = _blend_colors(self.color, other.color)
return BiHemiLabel(lh, rh, name, color)
else:
raise TypeError("Need: Label or BiHemiLabel. Got: %r" % other)
# check for overlap
duplicates = np.intersect1d(self.vertices, other.vertices)
n_dup = len(duplicates)
if n_dup:
self_dup = [np.where(self.vertices == d)[0][0]
for d in duplicates]
other_dup = [np.where(other.vertices == d)[0][0]
for d in duplicates]
if not np.all(self.pos[self_dup] == other.pos[other_dup]):
err = ("Labels %r and %r: vertices overlap but differ in "
"position values" % (self.name, other.name))
raise ValueError(err)
isnew = np.array([v not in duplicates for v in other.vertices])
vertices = np.hstack((self.vertices, other.vertices[isnew]))
pos = np.vstack((self.pos, other.pos[isnew]))
# find position of other's vertices in new array
tgt_idx = [np.where(vertices == v)[0][0] for v in other.vertices]
n_self = len(self.values)
n_other = len(other.values)
new_len = n_self + n_other - n_dup
values = np.zeros(new_len, dtype=self.values.dtype)
values[:n_self] += self.values
values[tgt_idx] += other.values
else:
vertices = np.hstack((self.vertices, other.vertices))
pos = np.vstack((self.pos, other.pos))
values = np.hstack((self.values, other.values))
indcs = np.argsort(vertices)
vertices, pos, values = vertices[indcs], pos[indcs, :], values[indcs]
comment = "%s + %s" % (self.comment, other.comment)
name0 = self.name if self.name else 'unnamed'
name1 = other.name if other.name else 'unnamed'
name = "%s + %s" % (name0, name1)
color = _blend_colors(self.color, other.color)
verbose = self.verbose or other.verbose
label = Label(vertices, pos, values, self.hemi, comment, name, None,
self.subject, color, verbose)
return label
def __sub__(self, other):
"""Subtract BiHemiLabels."""
if isinstance(other, BiHemiLabel):
if self.hemi == 'lh':
return self - other.lh
else:
return self - other.rh
elif isinstance(other, Label):
if self.subject != other.subject:
raise ValueError('Label subject parameters must match, got '
'"%s" and "%s". Consider setting the '
'subject parameter on initialization, or '
'setting label.subject manually before '
'combining labels.' % (self.subject,
other.subject))
else:
raise TypeError("Need: Label or BiHemiLabel. Got: %r" % other)
if self.hemi == other.hemi:
keep = np.in1d(self.vertices, other.vertices, True, invert=True)
else:
keep = np.arange(len(self.vertices))
name = "%s - %s" % (self.name or 'unnamed', other.name or 'unnamed')
return Label(self.vertices[keep], self.pos[keep], self.values[keep],
self.hemi, self.comment, name, None, self.subject,
self.color, self.verbose)
def save(self, filename):
r"""Write to disk as FreeSurfer \*.label file.
Parameters
----------
filename : string
Path to label file to produce.
Notes
-----
Note that due to file specification limitations, the Label's subject
and color attributes are not saved to disk.
"""
write_label(filename, self)
def copy(self):
"""Copy the label instance.
Returns
-------
label : instance of Label
The copied label.
"""
return cp.deepcopy(self)
def fill(self, src, name=None):
"""Fill the surface between sources for a source space label.
Parameters
----------
src : SourceSpaces
Source space in which the label was defined. If a source space is
provided, the label is expanded to fill in surface vertices that
lie between the vertices included in the source space. For the
added vertices, ``pos`` is filled in with positions from the
source space, and ``values`` is filled in from the closest source
space vertex.
name : None | str
Name for the new Label (default is self.name).
Returns
-------
label : Label
The label covering the same vertices in source space but also
including intermediate surface vertices.
"""
# find source space patch info
if len(self.vertices) == 0:
return self.copy()
if self.hemi == 'lh':
hemi_src = src[0]
elif self.hemi == 'rh':
hemi_src = src[1]
if not np.all(np.in1d(self.vertices, hemi_src['vertno'])):
msg = "Source space does not contain all of the label's vertices"
raise ValueError(msg)
nearest = hemi_src['nearest']
if nearest is None:
warn("Computing patch info for source space, this can take "
"a while. In order to avoid this in the future, run "
"mne.add_source_space_distances() on the source space "
"and save it.")
add_source_space_distances(src)
nearest = hemi_src['nearest']
# find new vertices
include = np.in1d(nearest, self.vertices, False)
vertices = np.nonzero(include)[0]
# values
nearest_in_label = np.digitize(nearest[vertices], self.vertices, True)
values = self.values[nearest_in_label]
# pos
pos = hemi_src['rr'][vertices]
if name is None:
name = self.name
label = Label(vertices, pos, values, self.hemi, self.comment, name,
None, self.subject, self.color)
return label
@verbose
def smooth(self, subject=None, smooth=2, grade=None,
subjects_dir=None, n_jobs=1, verbose=None):
"""Smooth the label.
Useful for filling in labels made in a
decimated source space for display.
Parameters
----------
subject : str | None
The name of the subject used. If None, the value will be
taken from self.subject.
smooth : int
Number of iterations for the smoothing of the surface data.
Cannot be None here since not all vertices are used. For a
grade of 5 (e.g., fsaverage), a smoothing of 2 will fill a
label.
grade : int, list (of two arrays), array, or None
Resolution of the icosahedral mesh (typically 5). If None, all
vertices will be used (potentially filling the surface). If a list,
values will be morphed to the set of vertices specified in grade[0]
and grade[1], assuming that these are vertices for the left and
right hemispheres. Note that specifying the vertices (e.g.,
grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
standard grade 5 source space) can be substantially faster than
computing vertex locations. If one array is used, it is assumed
that all vertices belong to the hemisphere of the label. To create
a label filling the surface, use None.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
n_jobs : int
Number of jobs to run in parallel
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more). Defaults to self.verbose.
Returns
-------
label : instance of Label
The smoothed label.
Notes
-----
This function will set label.pos to be all zeros. If the positions
on the new surface are required, consider using mne.read_surface
with label.vertices.
"""
subject = _check_subject(self.subject, subject)
return self.morph(subject, subject, smooth, grade, subjects_dir,
n_jobs)
@verbose
def morph(self, subject_from=None, subject_to=None, smooth=5, grade=None,
subjects_dir=None, n_jobs=1, verbose=None):
"""Morph the label.
Useful for transforming a label from one subject to another.
Parameters
----------
subject_from : str | None
The name of the subject of the current label. If None, the
initial subject will be taken from self.subject.
subject_to : str
The name of the subject to morph the label to. This will
be put in label.subject of the output label file.
smooth : int
Number of iterations for the smoothing of the surface data.
Cannot be None here since not all vertices are used.
grade : int, list (of two arrays), array, or None
Resolution of the icosahedral mesh (typically 5). If None, all
vertices will be used (potentially filling the surface). If a list,
values will be morphed to the set of vertices specified in grade[0]
and grade[1], assuming that these are vertices for the left and
right hemispheres. Note that specifying the vertices (e.g.,
``grade=[np.arange(10242), np.arange(10242)]`` for fsaverage on a
standard grade 5 source space) can be substantially faster than
computing vertex locations. If one array is used, it is assumed
that all vertices belong to the hemisphere of the label. To create
a label filling the surface, use None.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
n_jobs : int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more).
Returns
-------
label : instance of Label
The morphed label.
Notes
-----
This function will set label.pos to be all zeros. If the positions
on the new surface are required, consider using `mne.read_surface`
with `label.vertices`.
"""
subject_from = _check_subject(self.subject, subject_from)
if not isinstance(subject_to, string_types):
raise TypeError('"subject_to" must be entered as a string')
if not isinstance(smooth, int):
raise TypeError('smooth must be an integer')
if np.all(self.values == 0):
raise ValueError('Morphing label with all zero values will result '
'in the label having no vertices. Consider using '
'something like label.values.fill(1.0).')
if(isinstance(grade, np.ndarray)):
if self.hemi == 'lh':
grade = [grade, np.array([], int)]
else:
grade = [np.array([], int), grade]
if self.hemi == 'lh':
vertices = [self.vertices, np.array([], int)]
else:
vertices = [np.array([], int), self.vertices]
data = self.values[:, np.newaxis]
stc = SourceEstimate(data, vertices, tmin=1, tstep=1,
subject=subject_from)
stc = morph_data(subject_from, subject_to, stc, grade=grade,
smooth=smooth, subjects_dir=subjects_dir,
warn=False, n_jobs=n_jobs)
inds = np.nonzero(stc.data)[0]
self.values = stc.data[inds, :].ravel()
self.pos = np.zeros((len(inds), 3))
if self.hemi == 'lh':
self.vertices = stc.vertices[0][inds]
else:
self.vertices = stc.vertices[1][inds]
self.subject = subject_to
return self
def split(self, parts=2, subject=None, subjects_dir=None,
freesurfer=False):
"""Split the Label into two or more parts.
Parameters
----------
parts : int >= 2 | tuple of str | str
Number of labels to create (default is 2), or tuple of strings
specifying label names for new labels (from posterior to anterior),
or 'contiguous' to split the label into connected components.
If a number or 'contiguous' is specified, names of the new labels
will be the input label's name with div1, div2 etc. appended.
subject : None | str
Subject which this label belongs to (needed to locate surface file;
should only be specified if it is not specified in the label).
subjects_dir : None | str
Path to SUBJECTS_DIR if it is not set in the environment.
freesurfer : bool
By default (``False``) ``split_label`` uses an algorithm that is
slightly optimized for performance and numerical precision. Set
``freesurfer`` to ``True`` in order to replicate label splits from
FreeSurfer's ``mris_divide_parcellation``.
Returns
-------
labels : list of Label (len = n_parts)
The labels, starting from the lowest to the highest end of the
projection axis.
Notes
-----
If using 'contiguous' split, you must ensure that the label being split
uses the same triangular resolution as the surface mesh files in
``subjects_dir`` Also, some small fringe labels may be returned that
are close (but not connected) to the large components.
The spatial split finds the label's principal eigen-axis on the
spherical surface, projects all label vertex coordinates onto this
axis, and divides them at regular spatial intervals.
"""
if isinstance(parts, string_types) and parts == 'contiguous':
return _split_label_contig(self, subject, subjects_dir)
elif isinstance(parts, (tuple, int)):
return split_label(self, parts, subject, subjects_dir, freesurfer)
else:
raise ValueError("Need integer, tuple of strings, or string "
"('contiguous'). Got %s)" % type(parts))
def get_vertices_used(self, vertices=None):
"""Get the source space's vertices inside the label.
Parameters
----------
vertices : ndarray of int, shape (n_vertices,) | None
The set of vertices to compare the label to. If None, equals to
``np.arange(10242)``. Defaults to None.
Returns
-------
label_verts : ndarray of in, shape (n_label_vertices,)
The vertices of the label corresponding used by the data.
"""
if vertices is None:
vertices = np.arange(10242)
label_verts = vertices[np.in1d(vertices, self.vertices)]
return label_verts
def get_tris(self, tris, vertices=None):
"""Get the source space's triangles inside the label.
Parameters
----------
tris : ndarray of int, shape (n_tris, 3)
The set of triangles corresponding to the vertices in a
source space.
vertices : ndarray of int, shape (n_vertices,) | None
The set of vertices to compare the label to. If None, equals to
``np.arange(10242)``. Defaults to None.
Returns
-------
label_tris : ndarray of int, shape (n_tris, 3)
The subset of tris used by the label
"""
vertices_ = self.get_vertices_used(vertices)
selection = np.all(np.in1d(tris, vertices_).reshape(tris.shape),
axis=1)
label_tris = tris[selection]
if len(np.unique(label_tris)) < len(vertices_):
logger.info('Surprising label structure. Trying to repair '
'triangles.')
dropped_vertices = np.setdiff1d(vertices_, label_tris)
n_dropped = len(dropped_vertices)
assert n_dropped == (len(vertices_) - len(np.unique(label_tris)))
# put missing vertices as extra zero-length triangles
add_tris = (dropped_vertices +
np.zeros((len(dropped_vertices), 3), dtype=int).T)
label_tris = np.r_[label_tris, add_tris.T]
assert len(np.unique(label_tris)) == len(vertices_)
return label_tris
def center_of_mass(self, subject=None, restrict_vertices=False,
subjects_dir=None, surf='sphere'):
"""Compute the center of mass of the label.
This function computes the spatial center of mass on the surface
as in [1]_.
Parameters
----------
subject : string | None
The subject the label is defined for.
restrict_vertices : bool | array of int | instance of SourceSpaces
If True, returned vertex will be one from the label. Otherwise,
it could be any vertex from surf. If an array of int, the
returned vertex will come from that array. If instance of
SourceSpaces (as of 0.13), the returned vertex will be from
the given source space. For most accuruate estimates, do not
restrict vertices.
subjects_dir : str, or None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
surf : str
The surface to use for Euclidean distance center of mass
finding. The default here is "sphere", which finds the center
of mass on the spherical surface to help avoid potential issues
with cortical folding.
Returns
-------
vertex : int
Vertex of the spatial center of mass for the inferred hemisphere,
with each vertex weighted by its label value.
See Also
--------
SourceEstimate.center_of_mass
vertex_to_mni
Notes
-----
.. versionadded: 0.13
References
----------
.. [1] Larson and Lee, "The cortical dynamics underlying effective
switching of auditory spatial attention", NeuroImage 2012.
"""
if not isinstance(surf, string_types):
raise TypeError('surf must be a string, got %s' % (type(surf),))
subject = _check_subject(self.subject, subject)
if np.any(self.values < 0):
raise ValueError('Cannot compute COM with negative values')
if np.all(self.values == 0):
raise ValueError('Cannot compute COM with all values == 0. For '
'structural labels, consider setting to ones via '
'label.values[:] = 1.')
vertex = _center_of_mass(self.vertices, self.values, self.hemi, surf,
subject, subjects_dir, restrict_vertices)
return vertex
class BiHemiLabel(object):
"""A freesurfer/MNE label with vertices in both hemispheres.
Parameters
----------
lh : Label
Label for the left hemisphere.
rh : Label
Label for the right hemisphere.
name : None | str
name for the label
color : None | matplotlib color
Label color and alpha (e.g., ``(1., 0., 0., 1.)`` for red).
Note that due to file specification limitations, the color isn't saved
to or loaded from files written to disk.
Attributes
----------
lh : Label
Label for the left hemisphere.
rh : Label
Label for the right hemisphere.
name : None | str
A name for the label. It is OK to change that attribute manually.
subject : str | None
Subject the label is from.
"""
def __init__(self, lh, rh, name=None, color=None): # noqa: D102
if lh.subject != rh.subject:
raise ValueError('lh.subject (%s) and rh.subject (%s) must '
'agree' % (lh.subject, rh.subject))
self.lh = lh
self.rh = rh
self.name = name
self.subject = lh.subject
self.color = color
self.hemi = 'both'
def __repr__(self): # noqa: D105
temp = "<BiHemiLabel | %s, lh : %i vertices, rh : %i vertices>"
name = 'unknown, ' if self.subject is None else self.subject + ', '
name += repr(self.name) if self.name is not None else "unnamed"
return temp % (name, len(self.lh), len(self.rh))
def __len__(self):
"""Return the number of vertices."""
return len(self.lh) + len(self.rh)
def __add__(self, other):
"""Add labels."""
if isinstance(other, Label):
if other.hemi == 'lh':
lh = self.lh + other
rh = self.rh
else:
lh = self.lh
rh = self.rh + other
elif isinstance(other, BiHemiLabel):
lh = self.lh + other.lh
rh = self.rh + other.rh
else:
raise TypeError("Need: Label or BiHemiLabel. Got: %r" % other)
name = '%s + %s' % (self.name, other.name)
color = _blend_colors(self.color, other.color)
return BiHemiLabel(lh, rh, name, color)
def __sub__(self, other):
"""Subtract labels."""
if isinstance(other, Label):
if other.hemi == 'lh':
lh = self.lh - other
rh = self.rh
else:
rh = self.rh - other
lh = self.lh
elif isinstance(other, BiHemiLabel):
lh = self.lh - other.lh
rh = self.rh - other.rh
else:
raise TypeError("Need: Label or BiHemiLabel. Got: %r" % other)
if len(lh.vertices) == 0:
return rh
elif len(rh.vertices) == 0:
return lh
else:
name = '%s - %s' % (self.name, other.name)
return BiHemiLabel(lh, rh, name, self.color)
def read_label(filename, subject=None, color=None):
"""Read FreeSurfer Label file.
Parameters
----------
filename : string
Path to label file.
subject : str | None
Name of the subject the data are defined for.
It is good practice to set this attribute to avoid combining
incompatible labels and SourceEstimates (e.g., ones from other
subjects). Note that due to file specification limitations, the
subject name isn't saved to or loaded from files written to disk.
color : None | matplotlib color
Default label color and alpha (e.g., ``(1., 0., 0., 1.)`` for red).
Note that due to file specification limitations, the color isn't saved
to or loaded from files written to disk.
Returns
-------
label : Label
Instance of Label object with attributes:
- ``comment``: comment from the first line of the label file
- ``vertices``: vertex indices (0 based, column 1)
- ``pos``: locations in meters (columns 2 - 4 divided by 1000)
- ``values``: values at the vertices (column 5)
See Also
--------
read_labels_from_annot
"""
if subject is not None and not isinstance(subject, string_types):
raise TypeError('subject must be a string')
# find hemi
basename = op.basename(filename)
if basename.endswith('lh.label') or basename.startswith('lh.'):
hemi = 'lh'
elif basename.endswith('rh.label') or basename.startswith('rh.'):
hemi = 'rh'
else:
raise ValueError('Cannot find which hemisphere it is. File should end'
' with lh.label or rh.label')
# find name
if basename.startswith(('lh.', 'rh.')):
basename_ = basename[3:]
if basename.endswith('.label'):
basename_ = basename[:-6]
else:
basename_ = basename[:-9]
name = "%s-%s" % (basename_, hemi)
# read the file
with open(filename, 'r') as fid:
comment = fid.readline().replace('\n', '')[1:]
nv = int(fid.readline())
data = np.empty((5, nv))
for i, line in enumerate(fid):
data[:, i] = line.split()
# let's make sure everything is ordered correctly
vertices = np.array(data[0], dtype=np.int32)
pos = 1e-3 * data[1:4].T
values = data[4]
order = np.argsort(vertices)
vertices = vertices[order]
pos = pos[order]
values = values[order]
label = Label(vertices, pos, values, hemi, comment, name, filename,
subject, color)
return label
@verbose
def write_label(filename, label, verbose=None):
"""Write a FreeSurfer label.
Parameters
----------
filename : string
Path to label file to produce.
label : Label
The label object to save.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Notes
-----
Note that due to file specification limitations, the Label's subject and
color attributes are not saved to disk.
See Also
--------
write_labels_to_annot
"""
hemi = label.hemi
path_head, name = op.split(filename)
if name.endswith('.label'):
name = name[:-6]
if not (name.startswith(hemi) or name.endswith(hemi)):
name += '-' + hemi
filename = op.join(path_head, name) + '.label'
logger.info('Saving label to : %s' % filename)
with open(filename, 'wb') as fid:
n_vertices = len(label.vertices)
data = np.zeros((n_vertices, 5), dtype=np.float)
data[:, 0] = label.vertices
data[:, 1:4] = 1e3 * label.pos
data[:, 4] = label.values
fid.write(b("#%s\n" % label.comment))
fid.write(b("%d\n" % n_vertices))
for d in data:
fid.write(b("%d %f %f %f %f\n" % tuple(d)))
return label
def _prep_label_split(label, subject=None, subjects_dir=None):
"""Get label and subject information prior to label splitting."""
# If necessary, find the label
if isinstance(label, BiHemiLabel):
raise TypeError("Can only split labels restricted to one hemisphere.")
elif isinstance(label, string_types):
label = read_label(label)
# Find the subject
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if label.subject is None and subject is None:
raise ValueError("The subject needs to be specified.")
elif subject is None:
subject = label.subject
elif label.subject is None:
pass
elif subject != label.subject:
raise ValueError("The label specifies a different subject (%r) from "
"the subject parameter (%r)."
% label.subject, subject)
return label, subject, subjects_dir
def _split_label_contig(label_to_split, subject=None, subjects_dir=None):
"""Split label into contiguous regions (i.e., connected components).
Parameters
----------
label_to_split : Label | str
Label which is to be split (Label object or path to a label file).
subject : None | str
Subject which this label belongs to (needed to locate surface file;
should only be specified if it is not specified in the label).
subjects_dir : None | str
Path to SUBJECTS_DIR if it is not set in the environment.
Returns
-------
labels : list of Label
The contiguous labels, in order of decending size.
"""
# Convert to correct input if necessary
label_to_split, subject, subjects_dir = _prep_label_split(label_to_split,
subject,
subjects_dir)
# Find the spherical surface to get vertices and tris
surf_fname = '.'.join((label_to_split.hemi, 'sphere'))
surf_path = op.join(subjects_dir, subject, 'surf', surf_fname)
surface_points, surface_tris = read_surface(surf_path)
# Get vertices we want to keep and compute mesh edges
verts_arr = label_to_split.vertices
edges_all = mesh_edges(surface_tris)
# Subselect rows and cols of vertices that belong to the label
select_edges = edges_all[verts_arr][:, verts_arr].tocoo()
# Compute connected components and store as lists of vertex numbers
comp_labels = _get_components(verts_arr, select_edges)
# Convert to indices in the original surface space
label_divs = []
for comp in comp_labels:
label_divs.append(verts_arr[comp])
# Construct label division names
n_parts = len(label_divs)
if label_to_split.name.endswith(('lh', 'rh')):
basename = label_to_split.name[:-3]
name_ext = label_to_split.name[-3:]
else:
basename = label_to_split.name
name_ext = ''
name_pattern = "%s_div%%i%s" % (basename, name_ext)
names = tuple(name_pattern % i for i in range(1, n_parts + 1))
# Colors
if label_to_split.color is None:
colors = (None,) * n_parts
else:
colors = _split_colors(label_to_split.color, n_parts)
# Sort label divisions by their size (in vertices)
label_divs.sort(key=lambda x: len(x), reverse=True)
labels = []
for div, name, color in zip(label_divs, names, colors):
# Get indices of dipoles within this division of the label
verts = np.array(sorted(list(div)), int)
vert_indices = np.in1d(verts_arr, verts, assume_unique=True)
# Set label attributes
pos = label_to_split.pos[vert_indices]
values = label_to_split.values[vert_indices]
hemi = label_to_split.hemi
comment = label_to_split.comment
lbl = Label(verts, pos, values, hemi, comment, name, None, subject,
color)
labels.append(lbl)
return labels
def split_label(label, parts=2, subject=None, subjects_dir=None,
freesurfer=False):
"""Split a Label into two or more parts.
Parameters
----------
label : Label | str
Label which is to be split (Label object or path to a label file).
parts : int >= 2 | tuple of str
A sequence of strings specifying label names for the new labels (from
posterior to anterior), or the number of new labels to create (default
is 2). If a number is specified, names of the new labels will be the
input label's name with div1, div2 etc. appended.
subject : None | str
Subject which this label belongs to (needed to locate surface file;
should only be specified if it is not specified in the label).
subjects_dir : None | str
Path to SUBJECTS_DIR if it is not set in the environment.
freesurfer : bool
By default (``False``) ``split_label`` uses an algorithm that is
slightly optimized for performance and numerical precision. Set
``freesurfer`` to ``True`` in order to replicate label splits from
FreeSurfer's ``mris_divide_parcellation``.
Returns
-------
labels : list of Label (len = n_parts)
The labels, starting from the lowest to the highest end of the
projection axis.
Notes
-----
Works by finding the label's principal eigen-axis on the spherical surface,
projecting all label vertex coordinates onto this axis and dividing them at
regular spatial intervals.
"""
label, subject, subjects_dir = _prep_label_split(label, subject,
subjects_dir)
# find the parts
if np.isscalar(parts):
n_parts = int(parts)
if label.name.endswith(('lh', 'rh')):
basename = label.name[:-3]
name_ext = label.name[-3:]
else:
basename = label.name
name_ext = ''
name_pattern = "%s_div%%i%s" % (basename, name_ext)
names = tuple(name_pattern % i for i in range(1, n_parts + 1))
else:
names = parts
n_parts = len(names)
if n_parts < 2:
raise ValueError("Can't split label into %i parts" % n_parts)
# find the spherical surface
surf_fname = '.'.join((label.hemi, 'sphere'))
surf_path = op.join(subjects_dir, subject, "surf", surf_fname)
surface_points, surface_tris = read_surface(surf_path)
# find the label coordinates on the surface
points = surface_points[label.vertices]
center = np.mean(points, axis=0)
centered_points = points - center
# find the label's normal
if freesurfer:
# find the Freesurfer vertex closest to the center
distance = np.sqrt(np.sum(centered_points ** 2, axis=1))
i_closest = np.argmin(distance)
closest_vertex = label.vertices[i_closest]
# find the normal according to freesurfer convention
idx = np.any(surface_tris == closest_vertex, axis=1)
tris_for_normal = surface_tris[idx]
r1 = surface_points[tris_for_normal[:, 0], :]
r2 = surface_points[tris_for_normal[:, 1], :]
r3 = surface_points[tris_for_normal[:, 2], :]
tri_normals = fast_cross_3d((r2 - r1), (r3 - r1))
normal = np.mean(tri_normals, axis=0)
normal /= linalg.norm(normal)
else:
# Normal of the center
normal = center / linalg.norm(center)
# project all vertex coordinates on the tangential plane for this point
q, _ = linalg.qr(normal[:, np.newaxis])
tangent_u = q[:, 1:]
m_obs = np.dot(centered_points, tangent_u)
# find principal eigendirection
m_cov = np.dot(m_obs.T, m_obs)
w, vr = linalg.eig(m_cov)
i = np.argmax(w)
eigendir = vr[:, i]
# project back into 3d space
axis = np.dot(tangent_u, eigendir)
# orient them from posterior to anterior
if axis[1] < 0:
axis *= -1
# project the label on the axis
proj = np.dot(points, axis)
# assign mark (new label index)
proj -= proj.min()
proj /= (proj.max() / n_parts)
mark = proj // 1
mark[mark == n_parts] = n_parts - 1
# colors
if label.color is None:
colors = (None,) * n_parts
else:
colors = _split_colors(label.color, n_parts)
# construct new labels
labels = []
for i, name, color in zip(range(n_parts), names, colors):
idx = (mark == i)
vert = label.vertices[idx]
pos = label.pos[idx]
values = label.values[idx]
hemi = label.hemi
comment = label.comment
lbl = Label(vert, pos, values, hemi, comment, name, None, subject,
color)
labels.append(lbl)
return labels
def label_sign_flip(label, src):
"""Compute sign for label averaging.
Parameters
----------
label : Label | BiHemiLabel
A label.
src : SourceSpaces
The source space over which the label is defined.
Returns
-------
flip : array
Sign flip vector (contains 1 or -1)
"""
if len(src) != 2:
raise ValueError('Only source spaces with 2 hemisphers are accepted')
lh_vertno = src[0]['vertno']
rh_vertno = src[1]['vertno']
# get source orientations
ori = list()
if label.hemi in ('lh', 'both'):
vertices = label.vertices if label.hemi == 'lh' else label.lh.vertices
vertno_sel = np.intersect1d(lh_vertno, vertices)
ori.append(src[0]['nn'][vertno_sel])
if label.hemi in ('rh', 'both'):
vertices = label.vertices if label.hemi == 'rh' else label.rh.vertices
vertno_sel = np.intersect1d(rh_vertno, vertices)
ori.append(src[1]['nn'][vertno_sel])
if len(ori) == 0:
raise Exception('Unknown hemisphere type "%s"' % (label.hemi,))
ori = np.concatenate(ori, axis=0)
if len(ori) == 0:
return np.array([], int)
_, _, Vh = linalg.svd(ori, full_matrices=False)
# The sign of Vh is ambiguous, so we should align to the max-positive
# (outward) direction
dots = np.dot(ori, Vh[0])
if np.mean(dots) < 0:
dots *= -1
# Comparing to the direction of the first right singular vector
flip = np.sign(dots)
return flip
@verbose
def stc_to_label(stc, src=None, smooth=True, connected=False,
subjects_dir=None, verbose=None):
"""Compute a label from the non-zero sources in an stc object.
Parameters
----------
stc : SourceEstimate
The source estimates.
src : SourceSpaces | str | None
The source space over which the source estimates are defined.
If it's a string it should the subject name (e.g. fsaverage).
Can be None if stc.subject is not None.
smooth : bool
Fill in vertices on the cortical surface that are not in the source
space based on the closest source space vertex (requires
src to be a SourceSpace).
connected : bool
If True a list of connected labels will be returned in each
hemisphere. The labels are ordered in decreasing order depending
of the maximum value in the stc.
subjects_dir : str | None
Path to SUBJECTS_DIR if it is not set in the environment.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
labels : list of Labels | list of list of Labels
The generated labels. If connected is False, it returns
a list of Labels (one per hemisphere). If no Label is available
in a hemisphere, None is returned. If connected is True,
it returns for each hemisphere a list of connected labels
ordered in decreasing order depending of the maximum value in the stc.
If no Label is available in an hemisphere, an empty list is returned.
"""
if not isinstance(smooth, bool):
raise ValueError('smooth should be True or False. Got %s.' % smooth)
src = stc.subject if src is None else src
if src is None:
raise ValueError('src cannot be None if stc.subject is None')
if isinstance(src, string_types):
subject = src
else:
subject = stc.subject
if not isinstance(stc, SourceEstimate):
raise ValueError('SourceEstimate should be surface source estimates')
if isinstance(src, string_types):
if connected:
raise ValueError('The option to return only connected labels is '
'only available if source spaces are provided.')
if smooth:
msg = ("stc_to_label with smooth=True requires src to be an "
"instance of SourceSpace")
raise ValueError(msg)
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
surf_path_from = op.join(subjects_dir, src, 'surf')
rr_lh, tris_lh = read_surface(op.join(surf_path_from, 'lh.white'))
rr_rh, tris_rh = read_surface(op.join(surf_path_from, 'rh.white'))
rr = [rr_lh, rr_rh]
tris = [tris_lh, tris_rh]
else:
if not isinstance(src, SourceSpaces):
raise TypeError('src must be a string or a set of source spaces')
if len(src) != 2:
raise ValueError('source space should contain the 2 hemispheres')
rr = [1e3 * src[0]['rr'], 1e3 * src[1]['rr']]
tris = [src[0]['tris'], src[1]['tris']]
src_conn = spatial_src_connectivity(src).tocsr()
labels = []
cnt = 0
cnt_full = 0
for hemi_idx, (hemi, this_vertno, this_tris, this_rr) in enumerate(
zip(['lh', 'rh'], stc.vertices, tris, rr)):
this_data = stc.data[cnt:cnt + len(this_vertno)]
e = mesh_edges(this_tris)
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
e = e + sparse.eye(n_vertices, n_vertices)
if connected: # we know src *must* be a SourceSpaces now
vertno = np.where(src[hemi_idx]['inuse'])[0]
if not len(np.setdiff1d(this_vertno, vertno)) == 0:
raise RuntimeError('stc contains vertices not present '
'in source space, did you morph?')
tmp = np.zeros((len(vertno), this_data.shape[1]))
this_vertno_idx = np.searchsorted(vertno, this_vertno)
tmp[this_vertno_idx] = this_data
this_data = tmp
offset = cnt_full + len(this_data)
this_src_conn = src_conn[cnt_full:offset, cnt_full:offset].tocoo()
this_data_abs_max = np.abs(this_data).max(axis=1)
clusters, _ = _find_clusters(this_data_abs_max, 0.,
connectivity=this_src_conn)
cnt_full += len(this_data)
# Then order clusters in descending order based on maximum value
clusters_max = np.argsort([np.max(this_data_abs_max[c])
for c in clusters])[::-1]
clusters = [clusters[k] for k in clusters_max]
clusters = [vertno[c] for c in clusters]
else:
clusters = [this_vertno[np.any(this_data, axis=1)]]
cnt += len(this_vertno)
clusters = [c for c in clusters if len(c) > 0]
if len(clusters) == 0:
if not connected:
this_labels = None
else:
this_labels = []
else:
this_labels = []
colors = _n_colors(len(clusters))
for c, color in zip(clusters, colors):
idx_use = c
label = Label(idx_use, this_rr[idx_use], None, hemi,
'Label from stc', subject=subject,
color=color)
if smooth:
label = label.fill(src)
this_labels.append(label)
if not connected:
this_labels = this_labels[0]
labels.append(this_labels)
return labels
def _verts_within_dist(graph, sources, max_dist):
"""Find all vertices wihin a maximum geodesic distance from source.
Parameters
----------
graph : scipy.sparse.csr_matrix
Sparse matrix with distances between adjacent vertices.
sources : list of int
Source vertices.
max_dist : float
Maximum geodesic distance.
Returns
-------
verts : array
Vertices within max_dist.
dist : array
Distances from source vertex.
"""
dist_map = {}
verts_added_last = []
for source in sources:
dist_map[source] = 0
verts_added_last.append(source)
# add neighbors until no more neighbors within max_dist can be found
while len(verts_added_last) > 0:
verts_added = []
for i in verts_added_last:
v_dist = dist_map[i]
row = graph[i, :]
neighbor_vert = row.indices
neighbor_dist = row.data
for j, d in zip(neighbor_vert, neighbor_dist):
n_dist = v_dist + d
if j in dist_map:
if n_dist < dist_map[j]:
dist_map[j] = n_dist
else:
if n_dist <= max_dist:
dist_map[j] = n_dist
# we found a new vertex within max_dist
verts_added.append(j)
verts_added_last = verts_added
verts = np.sort(np.array(list(dist_map.keys()), int))
dist = np.array([dist_map[v] for v in verts], int)
return verts, dist
def _grow_labels(seeds, extents, hemis, names, dist, vert, subject):
"""Parallelize grow_labels."""
labels = []
for seed, extent, hemi, name in zip(seeds, extents, hemis, names):
label_verts, label_dist = _verts_within_dist(dist[hemi], seed, extent)
# create a label
if len(seed) == 1:
seed_repr = str(seed)
else:
seed_repr = ','.join(map(str, seed))
comment = 'Circular label: seed=%s, extent=%0.1fmm' % (seed_repr,
extent)
label = Label(vertices=label_verts,
pos=vert[hemi][label_verts],
values=label_dist,
hemi=hemi,
comment=comment,
name=str(name),
subject=subject)
labels.append(label)
return labels
def grow_labels(subject, seeds, extents, hemis, subjects_dir=None, n_jobs=1,
overlap=True, names=None, surface='white'):
"""Generate circular labels in source space with region growing.
This function generates a number of labels in source space by growing
regions starting from the vertices defined in "seeds". For each seed, a
label is generated containing all vertices within a maximum geodesic
distance on the white matter surface from the seed.
Note: "extents" and "hemis" can either be arrays with the same length as
seeds, which allows using a different extent and hemisphere for each
label, or integers, in which case the same extent and hemisphere is
used for each label.
Parameters
----------
subject : string
Name of the subject as in SUBJECTS_DIR.
seeds : int | list
Seed, or list of seeds. Each seed can be either a vertex number or
a list of vertex numbers.
extents : array | float
Extents (radius in mm) of the labels.
hemis : array | int
Hemispheres to use for the labels (0: left, 1: right).
subjects_dir : string
Path to SUBJECTS_DIR if not set in the environment.
n_jobs : int
Number of jobs to run in parallel. Likely only useful if tens
or hundreds of labels are being expanded simultaneously. Does not
apply with ``overlap=False``.
overlap : bool
Produce overlapping labels. If True (default), the resulting labels
can be overlapping. If False, each label will be grown one step at a
time, and occupied territory will not be invaded.
names : None | list of str
Assign names to the new labels (list needs to have the same length as
seeds).
surface : string
The surface used to grow the labels, defaults to the white surface.
Returns
-------
labels : list of Label
The labels' ``comment`` attribute contains information on the seed
vertex and extent; the ``values`` attribute contains distance from the
seed in millimeters
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
n_jobs = check_n_jobs(n_jobs)
# make sure the inputs are arrays
if np.isscalar(seeds):
seeds = [seeds]
seeds = np.atleast_1d([np.atleast_1d(seed) for seed in seeds])
extents = np.atleast_1d(extents)
hemis = np.atleast_1d(hemis)
n_seeds = len(seeds)
if len(extents) != 1 and len(extents) != n_seeds:
raise ValueError('The extents parameter has to be of length 1 or '
'len(seeds)')
if len(hemis) != 1 and len(hemis) != n_seeds:
raise ValueError('The hemis parameter has to be of length 1 or '
'len(seeds)')
# make the arrays the same length as seeds
if len(extents) == 1:
extents = np.tile(extents, n_seeds)
if len(hemis) == 1:
hemis = np.tile(hemis, n_seeds)
hemis = np.array(['lh' if h == 0 else 'rh' for h in hemis])
# names
if names is None:
names = ["Label_%i-%s" % items for items in enumerate(hemis)]
else:
if np.isscalar(names):
names = [names]
if len(names) != n_seeds:
raise ValueError('The names parameter has to be None or have '
'length len(seeds)')
for i, hemi in enumerate(hemis):
if not names[i].endswith(hemi):
names[i] = '-'.join((names[i], hemi))
names = np.array(names)
# load the surfaces and create the distance graphs
tris, vert, dist = {}, {}, {}
for hemi in set(hemis):
surf_fname = op.join(subjects_dir, subject, 'surf', hemi + '.' +
surface)
vert[hemi], tris[hemi] = read_surface(surf_fname)
dist[hemi] = mesh_dist(tris[hemi], vert[hemi])
if overlap:
# create the patches
parallel, my_grow_labels, _ = parallel_func(_grow_labels, n_jobs)
seeds = np.array_split(seeds, n_jobs)
extents = np.array_split(extents, n_jobs)
hemis = np.array_split(hemis, n_jobs)
names = np.array_split(names, n_jobs)
labels = sum(parallel(my_grow_labels(s, e, h, n, dist, vert, subject)
for s, e, h, n
in zip(seeds, extents, hemis, names)), [])
else:
# special procedure for non-overlapping labels
labels = _grow_nonoverlapping_labels(subject, seeds, extents, hemis,
vert, dist, names)
# add a unique color to each label
colors = _n_colors(len(labels))
for label, color in zip(labels, colors):
label.color = color
return labels
def _grow_nonoverlapping_labels(subject, seeds_, extents_, hemis, vertices_,
graphs, names_):
"""Grow labels while ensuring that they don't overlap."""
labels = []
for hemi in set(hemis):
hemi_index = (hemis == hemi)
seeds = seeds_[hemi_index]
extents = extents_[hemi_index]
names = names_[hemi_index]
graph = graphs[hemi] # distance graph
n_vertices = len(vertices_[hemi])
n_labels = len(seeds)
# prepare parcellation
parc = np.empty(n_vertices, dtype='int32')
parc[:] = -1
# initialize active sources
sources = {} # vert -> (label, dist_from_seed)
edge = [] # queue of vertices to process
for label, seed in enumerate(seeds):
if np.any(parc[seed] >= 0):
raise ValueError("Overlapping seeds")
parc[seed] = label
for s in np.atleast_1d(seed):
sources[s] = (label, 0.)
edge.append(s)
# grow from sources
while edge:
vert_from = edge.pop(0)
label, old_dist = sources[vert_from]
# add neighbors within allowable distance
row = graph[vert_from, :]
for vert_to, dist in zip(row.indices, row.data):
new_dist = old_dist + dist
# abort if outside of extent
if new_dist > extents[label]:
continue
vert_to_label = parc[vert_to]
if vert_to_label >= 0:
_, vert_to_dist = sources[vert_to]
# abort if the vertex is occupied by a closer seed
if new_dist > vert_to_dist:
continue
elif vert_to in edge:
edge.remove(vert_to)
# assign label value
parc[vert_to] = label
sources[vert_to] = (label, new_dist)
edge.append(vert_to)
# convert parc to labels
for i in xrange(n_labels):
vertices = np.nonzero(parc == i)[0]
name = str(names[i])
label_ = Label(vertices, hemi=hemi, name=name, subject=subject)
labels.append(label_)
return labels
def random_parcellation(subject, n_parcel, hemi, subjects_dir=None,
surface='white', random_state=None):
"""Generate random cortex parcellation by growing labels.
This function generates a number of labels which don't intersect and
cover the whole surface. Regions are growing around randomly chosen
seeds.
Parameters
----------
subject : string
Name of the subject as in SUBJECTS_DIR.
n_parcel : int
Total number of cortical parcels.
hemi : str
hemisphere id (ie 'lh', 'rh', 'both'). In the case
of 'both', both hemispheres are processed with (n_parcel // 2)
parcels per hemisphere.
subjects_dir : string
Path to SUBJECTS_DIR if not set in the environment.
surface : string
The surface used to grow the labels, defaults to the white surface.
random_state : None | int | np.random.RandomState
To specify the random generator state.
Returns
-------
labels : list of Label
Random cortex parcellation
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if hemi == 'both':
hemi = ['lh', 'rh']
hemis = np.atleast_1d(hemi)
# load the surfaces and create the distance graphs
tris, vert, dist = {}, {}, {}
for hemi in set(hemis):
surf_fname = op.join(subjects_dir, subject, 'surf', hemi + '.' +
surface)
vert[hemi], tris[hemi] = read_surface(surf_fname)
dist[hemi] = mesh_dist(tris[hemi], vert[hemi])
# create the patches
labels = _cortex_parcellation(subject, n_parcel, hemis, vert, dist,
random_state)
# add a unique color to each label
colors = _n_colors(len(labels))
for label, color in zip(labels, colors):
label.color = color
return labels
def _cortex_parcellation(subject, n_parcel, hemis, vertices_, graphs,
random_state=None):
"""Random cortex parcellation."""
labels = []
rng = check_random_state(random_state)
for hemi in set(hemis):
parcel_size = len(hemis) * len(vertices_[hemi]) // n_parcel
graph = graphs[hemi] # distance graph
n_vertices = len(vertices_[hemi])
# prepare parcellation
parc = np.full(n_vertices, -1, dtype='int32')
# initialize active sources
s = rng.choice(range(n_vertices))
label_idx = 0
edge = [s] # queue of vertices to process
parc[s] = label_idx
label_size = 1
rest = len(parc) - 1
# grow from sources
while rest:
# if there are not free neighbors, start new parcel
if not edge:
rest_idx = np.where(parc < 0)[0]
s = rng.choice(rest_idx)
edge = [s]
label_idx += 1
label_size = 1
parc[s] = label_idx
rest -= 1
vert_from = edge.pop(0)
# add neighbors within allowable distance
# row = graph[vert_from, :]
# row_indices, row_data = row.indices, row.data
sl = slice(graph.indptr[vert_from], graph.indptr[vert_from + 1])
row_indices, row_data = graph.indices[sl], graph.data[sl]
for vert_to, dist in zip(row_indices, row_data):
vert_to_label = parc[vert_to]
# abort if the vertex is already occupied
if vert_to_label >= 0:
continue
# abort if outside of extent
if label_size > parcel_size:
label_idx += 1
label_size = 1
edge = [vert_to]
parc[vert_to] = label_idx
rest -= 1
break
# assign label value
parc[vert_to] = label_idx
label_size += 1
edge.append(vert_to)
rest -= 1
# merging small labels
# label connectivity matrix
n_labels = label_idx + 1
label_sizes = np.empty(n_labels, dtype=int)
label_conn = np.zeros([n_labels, n_labels], dtype='bool')
for i in range(n_labels):
vertices = np.nonzero(parc == i)[0]
label_sizes[i] = len(vertices)
neighbor_vertices = graph[vertices, :].indices
neighbor_labels = np.unique(np.array(parc[neighbor_vertices]))
label_conn[i, neighbor_labels] = 1
np.fill_diagonal(label_conn, 0)
# merging
label_id = range(n_labels)
while n_labels > n_parcel // len(hemis):
# smallest label and its smallest neighbor
i = np.argmin(label_sizes)
neighbors = np.nonzero(label_conn[i, :])[0]
j = neighbors[np.argmin(label_sizes[neighbors])]
# merging two labels
label_conn[j, :] += label_conn[i, :]
label_conn[:, j] += label_conn[:, i]
label_conn = np.delete(label_conn, i, 0)
label_conn = np.delete(label_conn, i, 1)
label_conn[j, j] = 0
label_sizes[j] += label_sizes[i]
label_sizes = np.delete(label_sizes, i, 0)
n_labels -= 1
vertices = np.nonzero(parc == label_id[i])[0]
parc[vertices] = label_id[j]
label_id = np.delete(label_id, i, 0)
# convert parc to labels
for i in xrange(n_labels):
vertices = np.nonzero(parc == label_id[i])[0]
name = 'label_' + str(i)
label_ = Label(vertices, hemi=hemi, name=name, subject=subject)
labels.append(label_)
return labels
def _read_annot(fname):
"""Read a Freesurfer annotation from a .annot file.
Note : Copied from PySurfer
Parameters
----------
fname : str
Path to annotation file
Returns
-------
annot : numpy array, shape=(n_verts)
Annotation id at each vertex
ctab : numpy array, shape=(n_entries, 5)
RGBA + label id colortable array
names : list of str
List of region names as stored in the annot file
"""
if not op.isfile(fname):
dir_name = op.split(fname)[0]
if not op.isdir(dir_name):
raise IOError('Directory for annotation does not exist: %s',
fname)
cands = os.listdir(dir_name)
cands = [c for c in cands if '.annot' in c]
if len(cands) == 0:
raise IOError('No such file %s, no candidate parcellations '
'found in directory' % fname)
else:
raise IOError('No such file %s, candidate parcellations in '
'that directory: %s' % (fname, ', '.join(cands)))
with open(fname, "rb") as fid:
n_verts = np.fromfile(fid, '>i4', 1)[0]
data = np.fromfile(fid, '>i4', n_verts * 2).reshape(n_verts, 2)
annot = data[data[:, 0], 1]
ctab_exists = np.fromfile(fid, '>i4', 1)[0]
if not ctab_exists:
raise Exception('Color table not found in annotation file')
n_entries = np.fromfile(fid, '>i4', 1)[0]
if n_entries > 0:
length = np.fromfile(fid, '>i4', 1)[0]
orig_tab = np.fromfile(fid, '>c', length)
orig_tab = orig_tab[:-1]
names = list()
ctab = np.zeros((n_entries, 5), np.int)
for i in range(n_entries):
name_length = np.fromfile(fid, '>i4', 1)[0]
name = np.fromfile(fid, "|S%d" % name_length, 1)[0]
names.append(name)
ctab[i, :4] = np.fromfile(fid, '>i4', 4)
ctab[i, 4] = (ctab[i, 0] + ctab[i, 1] * (2 ** 8) +
ctab[i, 2] * (2 ** 16) +
ctab[i, 3] * (2 ** 24))
else:
ctab_version = -n_entries
if ctab_version != 2:
raise Exception('Color table version not supported')
n_entries = np.fromfile(fid, '>i4', 1)[0]
ctab = np.zeros((n_entries, 5), np.int)
length = np.fromfile(fid, '>i4', 1)[0]
np.fromfile(fid, "|S%d" % length, 1) # Orig table path
entries_to_read = np.fromfile(fid, '>i4', 1)[0]
names = list()
for i in range(entries_to_read):
np.fromfile(fid, '>i4', 1) # Structure
name_length = np.fromfile(fid, '>i4', 1)[0]
name = np.fromfile(fid, "|S%d" % name_length, 1)[0]
names.append(name)
ctab[i, :4] = np.fromfile(fid, '>i4', 4)
ctab[i, 4] = (ctab[i, 0] + ctab[i, 1] * (2 ** 8) +
ctab[i, 2] * (2 ** 16))
# convert to more common alpha value
ctab[:, 3] = 255 - ctab[:, 3]
return annot, ctab, names
def _get_annot_fname(annot_fname, subject, hemi, parc, subjects_dir):
"""Get the .annot filenames and hemispheres."""
if annot_fname is not None:
# we use use the .annot file specified by the user
hemis = [op.basename(annot_fname)[:2]]
if hemis[0] not in ['lh', 'rh']:
raise ValueError('Could not determine hemisphere from filename, '
'filename has to start with "lh" or "rh".')
annot_fname = [annot_fname]
else:
# construct .annot file names for requested subject, parc, hemi
if hemi not in ['lh', 'rh', 'both']:
raise ValueError('hemi has to be "lh", "rh", or "both"')
if hemi == 'both':
hemis = ['lh', 'rh']
else:
hemis = [hemi]
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
dst = op.join(subjects_dir, subject, 'label', '%%s.%s.annot' % parc)
annot_fname = [dst % hemi_ for hemi_ in hemis]
return annot_fname, hemis
@verbose
def read_labels_from_annot(subject, parc='aparc', hemi='both',
surf_name='white', annot_fname=None, regexp=None,
subjects_dir=None, verbose=None):
"""Read labels from a FreeSurfer annotation file.
Note: Only cortical labels will be returned.
Parameters
----------
subject : str
The subject for which to read the parcellation for.
parc : str
The parcellation to use, e.g., 'aparc' or 'aparc.a2009s'.
hemi : str
The hemisphere to read the parcellation for, can be 'lh', 'rh',
or 'both'.
surf_name : str
Surface used to obtain vertex locations, e.g., 'white', 'pial'
annot_fname : str or None
Filename of the .annot file. If not None, only this file is read
and 'parc' and 'hemi' are ignored.
regexp : str
Regular expression or substring to select particular labels from the
parcellation. E.g. 'superior' will return all labels in which this
substring is contained.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
labels : list of Label
The labels, sorted by label name (ascending).
"""
logger.info('Reading labels from parcellation...')
subjects_dir = get_subjects_dir(subjects_dir)
# get the .annot filenames and hemispheres
annot_fname, hemis = _get_annot_fname(annot_fname, subject, hemi, parc,
subjects_dir)
if regexp is not None:
# allow for convenient substring match
r_ = (re.compile('.*%s.*' % regexp if regexp.replace('_', '').isalnum()
else regexp))
# now we are ready to create the labels
n_read = 0
labels = list()
for fname, hemi in zip(annot_fname, hemis):
# read annotation
annot, ctab, label_names = _read_annot(fname)
label_rgbas = ctab[:, :4]
label_ids = ctab[:, -1]
# load the vertex positions from surface
fname_surf = op.join(subjects_dir, subject, 'surf',
'%s.%s' % (hemi, surf_name))
vert_pos, _ = read_surface(fname_surf)
vert_pos /= 1e3 # the positions in labels are in meters
for label_id, label_name, label_rgba in\
zip(label_ids, label_names, label_rgbas):
vertices = np.where(annot == label_id)[0]
if len(vertices) == 0:
# label is not part of cortical surface
continue
name = label_name.decode() + '-' + hemi
if (regexp is not None) and not r_.match(name):
continue
pos = vert_pos[vertices, :]
values = np.ones(len(vertices))
label_rgba = tuple(label_rgba / 255.)
label = Label(vertices, pos, values, hemi, name=name,
subject=subject, color=label_rgba)
labels.append(label)
n_read = len(labels) - n_read
logger.info(' read %d labels from %s' % (n_read, fname))
# sort the labels by label name
labels = sorted(labels, key=lambda l: l.name)
if len(labels) == 0:
msg = 'No labels found.'
if regexp is not None:
msg += ' Maybe the regular expression %r did not match?' % regexp
raise RuntimeError(msg)
return labels
def _write_annot(fname, annot, ctab, names):
"""Write a Freesurfer annotation to a .annot file.
Parameters
----------
fname : str
Path to annotation file
annot : numpy array, shape=(n_verts)
Annotation id at each vertex. Note: IDs must be computed from
RGBA colors, otherwise the mapping will be invalid.
ctab : numpy array, shape=(n_entries, 4)
RGBA colortable array.
names : list of str
List of region names to be stored in the annot file
"""
with open(fname, 'wb') as fid:
n_verts = len(annot)
np.array(n_verts, dtype='>i4').tofile(fid)
data = np.zeros((n_verts, 2), dtype='>i4')
data[:, 0] = np.arange(n_verts)
data[:, 1] = annot
data.ravel().tofile(fid)
# indicate that color table exists
np.array(1, dtype='>i4').tofile(fid)
# color table version 2
np.array(-2, dtype='>i4').tofile(fid)
# write color table
n_entries = len(ctab)
np.array(n_entries, dtype='>i4').tofile(fid)
# write dummy color table name
table_name = 'MNE-Python Colortable'
np.array(len(table_name), dtype='>i4').tofile(fid)
np.frombuffer(table_name.encode('ascii'), dtype=np.uint8).tofile(fid)
# number of entries to write
np.array(n_entries, dtype='>i4').tofile(fid)
# write entries
for ii, (name, color) in enumerate(zip(names, ctab)):
np.array(ii, dtype='>i4').tofile(fid)
np.array(len(name), dtype='>i4').tofile(fid)
np.frombuffer(name.encode('ascii'), dtype=np.uint8).tofile(fid)
np.array(color[:4], dtype='>i4').tofile(fid)
@verbose
def write_labels_to_annot(labels, subject=None, parc=None, overwrite=False,
subjects_dir=None, annot_fname=None,
colormap='hsv', hemi='both', verbose=None):
r"""Create a FreeSurfer annotation from a list of labels.
Parameters
----------
labels : list with instances of mne.Label
The labels to create a parcellation from.
subject : str | None
The subject for which to write the parcellation for.
parc : str | None
The parcellation name to use.
overwrite : bool
Overwrite files if they already exist.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
annot_fname : str | None
Filename of the .annot file. If not None, only this file is written
and 'parc' and 'subject' are ignored.
colormap : str
Colormap to use to generate label colors for labels that do not
have a color specified.
hemi : 'both' | 'lh' | 'rh'
The hemisphere(s) for which to write \*.annot files (only applies if
annot_fname is not specified; default is 'both').
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Notes
-----
Vertices that are not covered by any of the labels are assigned to a label
named "unknown".
"""
logger.info('Writing labels to parcellation...')
subjects_dir = get_subjects_dir(subjects_dir)
# get the .annot filenames and hemispheres
annot_fname, hemis = _get_annot_fname(annot_fname, subject, hemi, parc,
subjects_dir)
if not overwrite:
for fname in annot_fname:
if op.exists(fname):
raise ValueError('File %s exists. Use "overwrite=True" to '
'overwrite it' % fname)
# prepare container for data to save:
to_save = []
# keep track of issues found in the labels
duplicate_colors = []
invalid_colors = []
overlap = []
no_color = (-1, -1, -1, -1)
no_color_rgb = (-1, -1, -1)
for hemi, fname in zip(hemis, annot_fname):
hemi_labels = [label for label in labels if label.hemi == hemi]
n_hemi_labels = len(hemi_labels)
if n_hemi_labels == 0:
ctab = np.empty((0, 4), dtype=np.int32)
ctab_rgb = ctab[:, :3]
else:
hemi_labels.sort(key=lambda label: label.name)
# convert colors to 0-255 RGBA tuples
hemi_colors = [no_color if label.color is None else
tuple(int(round(255 * i)) for i in label.color)
for label in hemi_labels]
ctab = np.array(hemi_colors, dtype=np.int32)
ctab_rgb = ctab[:, :3]
# make color dict (for annot ID, only R, G and B count)
labels_by_color = defaultdict(list)
for label, color in zip(hemi_labels, ctab_rgb):
labels_by_color[tuple(color)].append(label.name)
# check label colors
for color, names in labels_by_color.items():
if color == no_color_rgb:
continue
if color == (0, 0, 0):
# we cannot have an all-zero color, otherw. e.g. tksurfer
# refuses to read the parcellation
warn('At least one label contains a color with, "r=0, '
'g=0, b=0" value. Some FreeSurfer tools may fail '
'to read the parcellation')
if any(i > 255 for i in color):
msg = ("%s: %s (%s)" % (color, ', '.join(names), hemi))
invalid_colors.append(msg)
if len(names) > 1:
msg = "%s: %s (%s)" % (color, ', '.join(names), hemi)
duplicate_colors.append(msg)
# replace None values (labels with unspecified color)
if labels_by_color[no_color_rgb]:
default_colors = _n_colors(n_hemi_labels, bytes_=True,
cmap=colormap)
# keep track of colors known to be in hemi_colors :
safe_color_i = 0
for i in xrange(n_hemi_labels):
if ctab[i, 0] == -1:
color = default_colors[i]
# make sure to add no duplicate color
while np.any(np.all(color[:3] == ctab_rgb, 1)):
color = default_colors[safe_color_i]
safe_color_i += 1
# assign the color
ctab[i] = color
# find number of vertices in surface
if subject is not None and subjects_dir is not None:
fpath = op.join(subjects_dir, subject, 'surf', '%s.white' % hemi)
points, _ = read_surface(fpath)
n_vertices = len(points)
else:
if len(hemi_labels) > 0:
max_vert = max(np.max(label.vertices) for label in hemi_labels)
n_vertices = max_vert + 1
else:
n_vertices = 1
warn('Number of vertices in the surface could not be '
'verified because the surface file could not be found; '
'specify subject and subjects_dir parameters.')
# Create annot and color table array to write
annot = np.empty(n_vertices, dtype=np.int)
annot[:] = -1
# create the annotation ids from the colors
annot_id_coding = np.array((1, 2 ** 8, 2 ** 16))
annot_ids = list(np.sum(ctab_rgb * annot_id_coding, axis=1))
for label, annot_id in zip(hemi_labels, annot_ids):
# make sure the label is not overwriting another label
if np.any(annot[label.vertices] != -1):
other_ids = set(annot[label.vertices])
other_ids.discard(-1)
other_indices = (annot_ids.index(i) for i in other_ids)
other_names = (hemi_labels[i].name for i in other_indices)
other_repr = ', '.join(other_names)
msg = "%s: %s overlaps %s" % (hemi, label.name, other_repr)
overlap.append(msg)
annot[label.vertices] = annot_id
hemi_names = [label.name for label in hemi_labels]
if None in hemi_names:
msg = ("Found %i labels with no name. Writing annotation file"
"requires all labels named" % (hemi_names.count(None)))
# raise the error immediately rather than crash with an
# uninformative error later (e.g. cannot join NoneType)
raise ValueError(msg)
# Assign unlabeled vertices to an "unknown" label
unlabeled = (annot == -1)
if np.any(unlabeled):
msg = ("Assigning %i unlabeled vertices to "
"'unknown-%s'" % (unlabeled.sum(), hemi))
logger.info(msg)
# find an unused color (try shades of gray first)
for i in range(1, 257):
if not np.any(np.all((i, i, i) == ctab_rgb, 1)):
break
if i < 256:
color = (i, i, i, 0)
else:
err = ("Need one free shade of gray for 'unknown' label. "
"Please modify your label colors, or assign the "
"unlabeled vertices to another label.")
raise ValueError(err)
# find the id
annot_id = np.sum(annot_id_coding * color[:3])
# update data to write
annot[unlabeled] = annot_id
ctab = np.vstack((ctab, color))
hemi_names.append("unknown")
# convert to FreeSurfer alpha values
ctab[:, 3] = 255 - ctab[:, 3]
# remove hemi ending in names
hemi_names = [name[:-3] if name.endswith(hemi) else name
for name in hemi_names]
to_save.append((fname, annot, ctab, hemi_names))
issues = []
if duplicate_colors:
msg = ("Some labels have the same color values (all labels in one "
"hemisphere must have a unique color):")
duplicate_colors.insert(0, msg)
issues.append(os.linesep.join(duplicate_colors))
if invalid_colors:
msg = ("Some labels have invalid color values (all colors should be "
"RGBA tuples with values between 0 and 1)")
invalid_colors.insert(0, msg)
issues.append(os.linesep.join(invalid_colors))
if overlap:
msg = ("Some labels occupy vertices that are also occupied by one or "
"more other labels. Each vertex can only be occupied by a "
"single label in *.annot files.")
overlap.insert(0, msg)
issues.append(os.linesep.join(overlap))
if issues:
raise ValueError('\n\n'.join(issues))
# write it
for fname, annot, ctab, hemi_names in to_save:
logger.info(' writing %d labels to %s' % (len(hemi_names), fname))
_write_annot(fname, annot, ctab, hemi_names)
| bsd-3-clause |
cademarkegard/airflow | airflow/hooks/hive_hooks.py | 13 | 25357 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from builtins import zip
from past.builtins import basestring
import collections
import unicodecsv as csv
import itertools
import logging
import re
import subprocess
import time
from tempfile import NamedTemporaryFile
import hive_metastore
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils.helpers import as_flattened_list
from airflow.utils.file import TemporaryDirectory
from airflow import configuration
import airflow.security.utils as utils
HIVE_QUEUE_PRIORITIES = ['VERY_HIGH', 'HIGH', 'NORMAL', 'LOW', 'VERY_LOW']
class HiveCliHook(BaseHook):
"""Simple wrapper around the hive CLI.
It also supports the ``beeline``
a lighter CLI that runs JDBC and is replacing the heavier
traditional CLI. To enable ``beeline``, set the use_beeline param in the
extra field of your connection as in ``{ "use_beeline": true }``
Note that you can also set default hive CLI parameters using the
``hive_cli_params`` to be used in your connection as in
``{"hive_cli_params": "-hiveconf mapred.job.tracker=some.jobtracker:444"}``
Parameters passed here can be overridden by run_cli's hive_conf param
The extra connection parameter ``auth`` gets passed as in the ``jdbc``
connection string as is.
:param mapred_queue: queue used by the Hadoop Scheduler (Capacity or Fair)
:type mapred_queue: string
:param mapred_queue_priority: priority within the job queue.
Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
:type mapred_queue_priority: string
:param mapred_job_name: This name will appear in the jobtracker.
This can make monitoring easier.
:type mapred_job_name: string
"""
def __init__(
self,
hive_cli_conn_id="hive_cli_default",
run_as=None,
mapred_queue=None,
mapred_queue_priority=None,
mapred_job_name=None):
conn = self.get_connection(hive_cli_conn_id)
self.hive_cli_params = conn.extra_dejson.get('hive_cli_params', '')
self.use_beeline = conn.extra_dejson.get('use_beeline', False)
self.auth = conn.extra_dejson.get('auth', 'noSasl')
self.conn = conn
self.run_as = run_as
if mapred_queue_priority:
mapred_queue_priority = mapred_queue_priority.upper()
if mapred_queue_priority not in HIVE_QUEUE_PRIORITIES:
raise AirflowException(
"Invalid Mapred Queue Priority. Valid values are: "
"{}".format(', '.join(HIVE_QUEUE_PRIORITIES)))
self.mapred_queue = mapred_queue
self.mapred_queue_priority = mapred_queue_priority
self.mapred_job_name = mapred_job_name
def _prepare_cli_cmd(self):
"""
This function creates the command list from available information
"""
conn = self.conn
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = "jdbc:hive2://{conn.host}:{conn.port}/{conn.schema}"
if configuration.get('core', 'security') == 'kerberos':
template = conn.extra_dejson.get(
'principal', "hive/[email protected]")
if "_HOST" in template:
template = utils.replace_hostname_pattern(
utils.get_components(template))
proxy_user = "" # noqa
if conn.extra_dejson.get('proxy_user') == "login" and conn.login:
proxy_user = "hive.server2.proxy.user={0}".format(conn.login)
elif conn.extra_dejson.get('proxy_user') == "owner" and self.run_as:
proxy_user = "hive.server2.proxy.user={0}".format(self.run_as)
jdbc_url += ";principal={template};{proxy_user}"
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = jdbc_url.format(**locals())
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin] + cmd_extra + hive_params_list
def _prepare_hiveconf(self, d):
"""
This function prepares a list of hiveconf params
from a dictionary of key value pairs.
:param d:
:type d: dict
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
"""
if not d:
return []
return as_flattened_list(
itertools.izip(
["-hiveconf"] * len(d),
["{}={}".format(k, v) for k, v in d.items()]
)
)
def run_cli(self, hql, schema=None, verbose=True, hive_conf=None):
"""
Run an hql statement using the hive cli. If hive_conf is specified
it should be a dict and the entries will be set as key/value pairs
in HiveConf
:param hive_conf: if specified these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``. Note that they will be
passed after the ``hive_cli_params`` and thus will override
whatever values are specified in the database.
:type hive_conf: dict
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
if schema:
hql = "USE {schema};\n{hql}".format(**locals())
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
f.write(hql.encode('UTF-8'))
f.flush()
hive_cmd = self._prepare_cli_cmd()
hive_conf_params = self._prepare_hiveconf(hive_conf)
if self.mapred_queue:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.queuename={}'
.format(self.mapred_queue)])
if self.mapred_queue_priority:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.priority={}'
.format(self.mapred_queue_priority)])
if self.mapred_job_name:
hive_conf_params.extend(
['-hiveconf',
'mapred.job.name={}'
.format(self.mapred_job_name)])
hive_cmd.extend(hive_conf_params)
hive_cmd.extend(['-f', f.name])
if verbose:
logging.info(" ".join(hive_cmd))
sp = subprocess.Popen(
hive_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir)
self.sp = sp
stdout = ''
while True:
line = sp.stdout.readline()
if not line:
break
stdout += line.decode('UTF-8')
if verbose:
logging.info(line.decode('UTF-8').strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout
def test_hql(self, hql):
"""
Test an hql statement using the hive cli and EXPLAIN
"""
create, insert, other = [], [], []
for query in hql.split(';'): # naive
query_original = query
query = query.lower().strip()
if query.startswith('create table'):
create.append(query_original)
elif query.startswith(('set ',
'add jar ',
'create temporary function')):
other.append(query_original)
elif query.startswith('insert'):
insert.append(query_original)
other = ';'.join(other)
for query_set in [create, insert]:
for query in query_set:
query_preview = ' '.join(query.split())[:50]
logging.info("Testing HQL [{0} (...)]".format(query_preview))
if query_set == insert:
query = other + '; explain ' + query
else:
query = 'explain ' + query
try:
self.run_cli(query, verbose=False)
except AirflowException as e:
message = e.args[0].split('\n')[-2]
logging.info(message)
error_loc = re.search('(\d+):(\d+)', message)
if error_loc and error_loc.group(1).isdigit():
l = int(error_loc.group(1))
begin = max(l-2, 0)
end = min(l+3, len(query.split('\n')))
context = '\n'.join(query.split('\n')[begin:end])
logging.info("Context :\n {0}".format(context))
else:
logging.info("SUCCESS")
def load_file(
self,
filepath,
table,
delimiter=",",
field_dict=None,
create=True,
overwrite=True,
partition=None,
recreate=False):
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param create: whether to create the table if it doesn't exist
:type create: bool
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param delimiter: field delimiter in the file
:type delimiter: str
"""
hql = ''
if recreate:
hql += "DROP TABLE IF EXISTS {table};\n"
if create or recreate:
fields = ",\n ".join(
[k + ' ' + v for k, v in field_dict.items()])
hql += "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n"
if partition:
pfields = ",\n ".join(
[p + " STRING" for p in partition])
hql += "PARTITIONED BY ({pfields})\n"
hql += "ROW FORMAT DELIMITED\n"
hql += "FIELDS TERMINATED BY '{delimiter}'\n"
hql += "STORED AS textfile;"
hql = hql.format(**locals())
logging.info(hql)
self.run_cli(hql)
hql = "LOAD DATA LOCAL INPATH '{filepath}' "
if overwrite:
hql += "OVERWRITE "
hql += "INTO TABLE {table} "
if partition:
pvals = ", ".join(
["{0}='{1}'".format(k, v) for k, v in partition.items()])
hql += "PARTITION ({pvals});"
hql = hql.format(**locals())
logging.info(hql)
self.run_cli(hql)
def kill(self):
if hasattr(self, 'sp'):
if self.sp.poll() is None:
print("Killing the Hive job")
self.sp.terminate()
time.sleep(60)
self.sp.kill()
class HiveMetastoreHook(BaseHook):
""" Wrapper to interact with the Hive Metastore"""
def __init__(self, metastore_conn_id='metastore_default'):
self.metastore_conn = self.get_connection(metastore_conn_id)
self.metastore = self.get_metastore_client()
def __getstate__(self):
# This is for pickling to work despite the thirft hive client not
# being pickable
d = dict(self.__dict__)
del d['metastore']
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__['metastore'] = self.get_metastore_client()
def get_metastore_client(self):
"""
Returns a Hive thrift client.
"""
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
from hive_service import ThriftHive
ms = self.metastore_conn
auth_mechanism = ms.extra_dejson.get('authMechanism', 'NOSASL')
if configuration.get('core', 'security') == 'kerberos':
auth_mechanism = ms.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = ms.extra_dejson.get('kerberos_service_name', 'hive')
socket = TSocket.TSocket(ms.host, ms.port)
if configuration.get('core', 'security') == 'kerberos' and auth_mechanism == 'GSSAPI':
try:
import saslwrapper as sasl
except ImportError:
import sasl
def sasl_factory():
sasl_client = sasl.Client()
sasl_client.setAttr("host", ms.host)
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
from thrift_sasl import TSaslClientTransport
transport = TSaslClientTransport(sasl_factory, "GSSAPI", socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return ThriftHive.Client(protocol)
def get_conn(self):
return self.metastore
def check_for_partition(self, schema, table, partition):
"""
Checks whether a partition exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: string
:param table: Name of hive table @partition belongs to
:type schema: string
:partition: Expression that matches the partitions to check for
(eg `a = 'b' AND c = 'd'`)
:type schema: string
:rtype: boolean
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
"""
self.metastore._oprot.trans.open()
partitions = self.metastore.get_partitions_by_filter(
schema, table, partition, 1)
self.metastore._oprot.trans.close()
if partitions:
return True
else:
return False
def check_for_named_partition(self, schema, table, partition_name):
"""
Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: string
:param table: Name of hive table @partition belongs to
:type schema: string
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type schema: string
:rtype: boolean
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
"""
self.metastore._oprot.trans.open()
try:
self.metastore.get_partition_by_name(
schema, table, partition_name)
return True
except hive_metastore.ttypes.NoSuchObjectException:
return False
finally:
self.metastore._oprot.trans.close()
def get_table(self, table_name, db='default'):
"""Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
"""
self.metastore._oprot.trans.open()
if db == 'default' and '.' in table_name:
db, table_name = table_name.split('.')[:2]
table = self.metastore.get_table(dbname=db, tbl_name=table_name)
self.metastore._oprot.trans.close()
return table
def get_tables(self, db, pattern='*'):
"""
Get a metastore table object
"""
self.metastore._oprot.trans.open()
tables = self.metastore.get_tables(db_name=db, pattern=pattern)
objs = self.metastore.get_table_objects_by_name(db, tables)
self.metastore._oprot.trans.close()
return objs
def get_databases(self, pattern='*'):
"""
Get a metastore table object
"""
self.metastore._oprot.trans.open()
dbs = self.metastore.get_databases(pattern)
self.metastore._oprot.trans.close()
return dbs
def get_partitions(
self, schema, table_name, filter=None):
"""
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
"""
self.metastore._oprot.trans.open()
table = self.metastore.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if filter:
parts = self.metastore.get_partitions_by_filter(
db_name=schema, tbl_name=table_name,
filter=filter, max_parts=32767)
else:
parts = self.metastore.get_partitions(
db_name=schema, tbl_name=table_name, max_parts=32767)
self.metastore._oprot.trans.close()
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
def max_partition(self, schema, table_name, field=None, filter=None):
"""
Returns the maximum value for all partitions in a table. Works only
for tables that have a single partition key. For subpartitioned
table, we recommend using signal tables.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow', table_name=t)
'2015-01-01'
"""
parts = self.get_partitions(schema, table_name, filter)
if not parts:
return None
elif len(parts[0]) == 1:
field = list(parts[0].keys())[0]
elif not field:
raise AirflowException(
"Please specify the field you want the max "
"value for")
return max([p[field] for p in parts])
def table_exists(self, table_name, db='default'):
"""
Check if table exists
>>> hh = HiveMetastoreHook()
>>> hh.table_exists(db='airflow', table_name='static_babynames')
True
>>> hh.table_exists(db='airflow', table_name='does_not_exist')
False
"""
try:
t = self.get_table(table_name, db)
return True
except Exception as e:
return False
class HiveServer2Hook(BaseHook):
"""
Wrapper around the impyla library
Note that the default authMechanism is PLAIN, to override it you
can specify it in the ``extra`` of your connection in the UI as in
"""
def __init__(self, hiveserver2_conn_id='hiveserver2_default'):
self.hiveserver2_conn_id = hiveserver2_conn_id
def get_conn(self):
db = self.get_connection(self.hiveserver2_conn_id)
auth_mechanism = db.extra_dejson.get('authMechanism', 'PLAIN')
kerberos_service_name = None
if configuration.get('core', 'security') == 'kerberos':
auth_mechanism = db.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = db.extra_dejson.get('kerberos_service_name', 'hive')
# impyla uses GSSAPI instead of KERBEROS as a auth_mechanism identifier
if auth_mechanism == 'KERBEROS':
logging.warning("Detected deprecated 'KERBEROS' for authMechanism for %s. Please use 'GSSAPI' instead",
self.hiveserver2_conn_id)
auth_mechanism = 'GSSAPI'
from impala.dbapi import connect
return connect(
host=db.host,
port=db.port,
auth_mechanism=auth_mechanism,
kerberos_service_name=kerberos_service_name,
user=db.login,
database=db.schema or 'default')
def get_results(self, hql, schema='default', arraysize=1000):
from impala.error import ProgrammingError
with self.get_conn() as conn:
if isinstance(hql, basestring):
hql = [hql]
results = {
'data': [],
'header': [],
}
cur = conn.cursor()
for statement in hql:
cur.execute(statement)
records = []
try:
# impala Lib raises when no results are returned
# we're silencing here as some statements in the list
# may be `SET` or DDL
records = cur.fetchall()
except ProgrammingError:
logging.debug("get_results returned no records")
if records:
results = {
'data': records,
'header': cur.description,
}
return results
def to_csv(
self,
hql,
csv_filepath,
schema='default',
delimiter=',',
lineterminator='\r\n',
output_header=True,
fetch_size=1000):
schema = schema or 'default'
with self.get_conn() as conn:
with conn.cursor() as cur:
logging.info("Running query: " + hql)
cur.execute(hql)
schema = cur.description
with open(csv_filepath, 'wb') as f:
writer = csv.writer(f,
delimiter=delimiter,
lineterminator=lineterminator,
encoding='utf-8')
if output_header:
writer.writerow([c[0] for c in cur.description])
i = 0
while True:
rows = [row for row in cur.fetchmany(fetch_size) if row]
if not rows:
break
writer.writerows(rows)
i += len(rows)
logging.info("Written {0} rows so far.".format(i))
logging.info("Done. Loaded a total of {0} rows.".format(i))
def get_records(self, hql, schema='default'):
"""
Get a set of records from a Hive query.
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
"""
return self.get_results(hql, schema=schema)['data']
def get_pandas_df(self, hql, schema='default'):
"""
Get a pandas dataframe from a Hive query
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
"""
import pandas as pd
res = self.get_results(hql, schema=schema)
df = pd.DataFrame(res['data'])
df.columns = [c[0] for c in res['header']]
return df
| apache-2.0 |
eramirem/astroML | book_figures/chapter3/fig_correlations.py | 3 | 4139 | """
Correlation estimates
---------------------
Figure 3.24.
Bootstrap estimates of the distribution of Pearson's, Spearman's, and Kendall's
correlation coefficients based on 2000 resamplings of the 1000 points shown
in figure 3.23. The true values are shown by the dashed lines. It is clear
that Pearson's correlation coefficient is not robust to contamination.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from scipy import stats
from matplotlib import pyplot as plt
from astroML.stats.random import bivariate_normal
from astroML.decorators import pickle_results
# percent sign must be escaped if usetex=True
import matplotlib
if matplotlib.rcParams.get('text.usetex'):
pct = '\%'
else:
pct = '%'
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Set parameters for the distributions
Nbootstraps = 5000
N = 1000
sigma1 = 2.0
sigma2 = 1.0
mu = (10.0, 10.0)
alpha_deg = 45.0
alpha = alpha_deg * np.pi / 180
f = 0.01
#------------------------------------------------------------
# sample the distribution
# without outliers and with outliers
np.random.seed(0)
X = bivariate_normal(mu, sigma1, sigma2, alpha, N)
X_out = X.copy()
X_out[:int(f * N)] = bivariate_normal(mu, 2, 5,
45 * np.pi / 180., int(f * N))
# true values of rho (pearson/spearman r) and tau
# tau value comes from Eq. 41 of arXiv:1011.2009
rho_true = 0.6
tau_true = 2 / np.pi * np.arcsin(rho_true)
#------------------------------------------------------------
# Create a function to compute the statistics. Since this
# takes a while, we'll use the "pickle_results" decorator
# to save the results of the computation to disk
@pickle_results('fig_correlations_dump.pkl')
def compute_results(N, Nbootstraps):
results = np.zeros((3, 2, Nbootstraps))
for k in range(Nbootstraps):
ind = np.random.randint(N, size=N)
for j, data in enumerate([X, X_out]):
x = data[ind, 0]
y = data[ind, 1]
for i, statistic in enumerate([stats.pearsonr,
stats.spearmanr,
stats.kendalltau]):
results[i, j, k] = statistic(x, y)[0]
return results
results = compute_results(N, Nbootstraps)
#------------------------------------------------------------
# Plot the results in a three-panel plot
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(bottom=0.1, top=0.95, hspace=0.25)
histargs = (dict(alpha=0.5, label='No Outliers'),
dict(alpha=0.8, label='%i%s Outliers' % (int(f * 100), pct)))
distributions = ['Pearson-r', 'Spearman-r', r'Kendall-$\tau$']
xlabels = ['r_p', 'r_s', r'\tau']\
for i in range(3):
ax = fig.add_subplot(311 + i)
for j in range(2):
ax.hist(results[i, j], 40, histtype='stepfilled', fc='gray',
normed=True, **histargs[j])
if i == 0:
ax.legend(loc=2)
ylim = ax.get_ylim()
if i < 2:
ax.plot([rho_true, rho_true], ylim, '--k', lw=1)
ax.set_xlim(0.34, 0.701)
else:
ax.plot([tau_true, tau_true], ylim, '--k', lw=1)
ax.set_xlim(0.31, 0.48)
ax.set_ylim(ylim)
ax.text(0.98, 0.95, distributions[i], ha='right', va='top',
transform=ax.transAxes, bbox=dict(fc='w', ec='w'))
ax.set_xlabel('$%s$' % xlabels[i])
ax.set_ylabel('$N(%s)$' % xlabels[i])
plt.show()
| bsd-2-clause |
kkarrancsu/copula-bayesian-networks | copulamnsig.py | 1 | 24012 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#******************************************************************************
#*
#* Copyright (C) 2015 Kiran Karra <[email protected]>
#*
#* This program is free software: you can redistribute it and/or modify
#* it under the terms of the GNU General Public License as published by
#* the Free Software Foundation, either version 3 of the License, or
#* (at your option) any later version.
#*
#* This program is distributed in the hope that it will be useful,
#* but WITHOUT ANY WARRANTY; without even the implied warranty of
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#* GNU General Public License for more details.
#*
#* You should have received a copy of the GNU General Public License
#* along with this program. If not, see <http://www.gnu.org/licenses/>.
#******************************************************************************
import math
import numpy as np
from cvolume import cvolume
import multivariate_stats
from ecdf import probability_integral_transform
from scipy.stats import entropy
def copulamnsig(family, K, *args):
"""
Computes the copula multinomial signature as described in the paper
"Highly Efficient Learning of Mixed Copula Networks" for a specified
copula family. Essentially, it breaks up the unit grid into a K x K boxes,
and computes the probability of a sample from that copula pdf falling in
that grid. This is then aggregated into a multinomial probability
distribution. This so called "multinomial" signature of a copula is then
used to efficiently determine the structure of the Bayesian network, as well
as the copula which would describe the dependency between the nodes.
The grid over the unit cube is numbered as follows, for a 4 x 4 grid
___________________
| 4 | 8 | 12 | 16 |
|---|---|----|----|
| 3 | 7 | 11 | 15 |
|-----------------|
| 2 | 6 | 10 | 14 |
|-----------------|
| 1 | 5 | 9 | 13 |
|___|___|____|____|
Currently, this computes the multinomial signature for a specified copula
family of 2 dimensions. It would be nice to expand this to multiple
dimensions, and we can use the general formula for C-volume
family - the copula type, must be:
'Gaussian'
'T'
'Clayton'
'Frank'
'Gumbel'
args - must be atleast of length 2, for which the first element in args
is expected to be a string which describes the dependency value
being provided, must be one of the following:
'kendall' - means kendall's Tau is being provided
'spearman' - means spearman's rho is being provided
'native' - means that the dependency parameter of the copula family
itself is being provided directly
the second argmuent must be the value of the dependency type
provided. For kendall and spearman, a scalar value is expected.
For native, if the family type is Frank, Gumbel, or Clayton, then
a scalar value is expected, which represents the dependency
parameter. If the family type is Gaussian, then a 2 x 2 numpy array
is expected, which represents the correlation matrix defining the
Gaussian copula. If the family is T, then the 2nd argument is the
2x2 numpy array representing the correlation matrix, and the 3rd
argument is the degrees of freedom
"""
coords_list = _makeCoordsList(K)
# mnsig is a list of dictionaries. The (list index+1) corresponds to the
# grid of interest in the unit cube. In the dictionary, the actual lower
# left coordinates of the box and the upper right coordinates of the box
# are stored as keys 'u1v1' and 'u2v2', and then the actual value of the
# multinomial signature in that grid is stored as 'val'
mnsig = []
for coord in coords_list:
# compute the C-volume and store
u1v1 = coord[0]
u1v2 = coord[1]
u2v1 = coord[2]
u2v2 = coord[3]
try:
val = cvolume(family, u1v1, u1v2, u2v1, u2v2, *args)
except ValueError:
val = np.array([-1]) # for compatibility we put the numpy wrapper
mnsig.append(val[0])
return mnsig
def empirical_copulamnsig(X, K):
"""
Computes an empirical copula multinomial signature based on the dataset
provided by U. U must be a numpy array of dimensions [M x N], where M is
the number of data points in the dataset and, N is the dimensionality of the
data
"""
M = X.shape[0]
N = X.shape[1]
# convert X to U by using the probability integral transform: F(X) = U
U = probability_integral_transform(X)
# generate the coordinates so we can then compare and see where each sample
# falls into in the unit cube
coords_list = _makeCoordsList(K)
# this will be a list of dictionaries which has all the combinations of the
# empirical binomial signature
esig = []
# for all i < j, compute pairwise bivariate multinomial signature
for dim1 in range(0,N-1):
for dim2 in range(dim1+1,N):
# to compute the pairwise bivariate multinomial signature, what
# we do is essentially grid as before, and compute a histogram
# for each grid .. whcih is our empirical estimate
# the grid is lay-ed out in the exact same way as described before,
# so the index of mnsig from copulamnsig and the index of the value
# generated here will be directly comparable
# ___________________
# | 4 | 8 | 12 | 16 |
# |---|---|----|----|
# | 3 | 7 | 11 | 15 |
# |-----------------|
# | 2 | 6 | 10 | 14 |
# |-----------------|
# | 1 | 5 | 9 | 13 |
# |___|___|____|____|
tmp = {}
# RV 1 that we are comparing
tmp['rv1'] = dim1+1
# RV 2 that we are comparing
tmp['rv2'] = dim2+1
# the value for the zone -- initialize to 0
esig_vec = np.zeros(K*K)
# there is probably a more efficient way to do this than to loop
# over each value, but this is a first cut at implementing this
u = U[:,dim1]
v = U[:,dim2]
for ii in range(0,M):
# find which zone this specific (u,v) sample falls in
for jj in range(0,K*K):
u1 = coords_list[jj][0][0][0]
v1 = coords_list[jj][0][0][1]
u2 = coords_list[jj][3][0][0]
v2 = coords_list[jj][3][0][1]
if(u[ii] >= u1 and u[ii] < u2 and
v[ii] >= v1 and v[ii] < v2):
# add one to the zone that it falls into
esig_vec[jj] = (esig_vec[jj] + 1.0/M)
# process the next pair by kicking out of this loop
break
tmp['esig'] = esig_vec
esig.append(tmp)
return esig
def _makeCoordsList(K):
eps = np.finfo(float).eps
u = np.linspace(0+eps, 1-eps, K+1)
v = np.linspace(0+eps, 1-eps, K+1)
coords_list = []
for ii in range(0,len(u)-1):
for jj in range(0,len(v)-1):
u1 = u[ii]
u2 = u[ii+1]
v1 = v[jj]
v2 = v[jj+1]
u1v1 = np.array([[u1,v1]])
u1v2 = np.array([[u1,v2]])
u2v1 = np.array([[u2,v1]])
u2v2 = np.array([[u2,v2]])
x = []
x.append(u1v1)
x.append(u1v2)
x.append(u2v1)
x.append(u2v2)
coords_list.append(x)
return coords_list
# the master function, which computes the correct copula family to choose from
# will compare the empirical signatures to the actual signature for refernence
# will do the following:
# 1.) compute the empirical kendall's tau
# 2.) load the precomputed multinomial signature for that kendall's tau
# for all the copula families
# 3.) minimize the distance metric
def optimalCopulaFamily(X, K=4, family_search=['Gaussian', 'Clayton', 'Gumbel', 'Frank']):
"""
This function, given a multivariate data set X, computes the best copula family which fits
the data, using the procedure described in the paper "Highly Efficient Learning of Mixed
Copula Networks," by Gal Elidan
X - the multivariate dataset for which we desire the copula. Must be a numpy array of
dimension [M x N], where M is the number of data points, and N is the dimensionality
of the dataset
K - the square root of the number of grid points (for now, we assume square gridding of the
unit cube)
family_search - a list of all the copula families to search. Currently, what is supported is
Gaussian, Clayton, Gumbel, and Frank. As more copula's are added, the default list will
be expanded.
"""
# compute the empirical Kendall's Tau
tau_hat = multivariate_stats.kendalls_tau(X)
# compute empirical multinomial signature
empirical_mnsig = empirical_copulamnsig(X, K)
empirical_mnsig = empirical_mnsig[0]['esig']
# replace any 0 values w/ smallest possible float value
empirical_mnsig[empirical_mnsig==0] = np.spacing(1)
# compute the multinomial signature for each of the copula families specified
# and simultaneously compute the kullback leibler divergence between the empirical
# and the computed, and store that info
distances = {}
for family in family_search:
# because the Clayton and Gumbel Copula's have restrictions for the valid values of
# Kendall's tau, we do checks here to ensure those restrictions are met, because there
# will be a certain variance associated with the tau_hat measurement
if(family.lower()=='clayton'):
# here we add some additional optimizatons as follows. We know that the Clayton copula
# captures only positive concordance. Like any estimator, tau_hat will have some variance
# associated with it. Thus, the optimization we make is as follows, if tau_hat is within
# a configurable amount less than 0, then we will set tau_hat to 0 and continue processing.
# However, if tau_hat is greater than that, we theoretically wouldn't have to test against
# the Clayton copula model, so we set the KL-divergence to be infinity to exclude
# this family from being selected
if(tau_hat<-0.05):
distances[family] = np.inf
continue
elif(tau_hat>=-0.05 and tau_hat<0):
tau_hat = 0
elif(tau_hat>=1):
tau_hat = 1 - np.spacing(1) # as close to 1 as possible in our precision
elif(family.lower()=='gumbel'):
# here we add some additional optimizatons as follows. We know that the Gumbel copula
# captures only positive concordance. Like any estimator, tau_hat will have some variance
# associated with it. Thus, the optimization we make is as follows, if tau_hat is within
# a configurable amount less than 0, then we will set tau_hat to 0 and continue processing.
# However, if tau_hat is greater than that, we theoretically wouldn't have to test against
# the Gumbel copula model, so we set the KL-divergence to be infinity to exclude
# this family from being selected
if(tau_hat<-0.05):
distances[family] = np.inf
continue
elif(tau_hat>=-0.05 and tau_hat<0):
tau_hat = 0
elif(tau_hat>=1):
tau_hat = 1 - np.spacing(1) # as close to 1 as possible in our precision
# any other copula families with restrictions can go here
mnsig = copulamnsig(family,K,'kendall',tau_hat)
# replace any 0 values w/ smallest possible float value
mnsig[mnsig==0] = np.spacing(1)
# compute KL divergence, see
# http://docs.scipy.org/doc/scipy-dev/reference/generated/scipy.stats.entropy.html
distances[family] = entropy(mnsig, empirical_mnsig)
# search for the minimum distance, that is the optimal copula family to use
minDistance = np.inf
for family, distance in distances.iteritems():
if distance<minDistance:
minDistance = distance
optimalFamily = family
depParams = invcopulastat(optimalFamily, 'kendall', tau_hat)
return (optimalFamily, depParams, tau_hat)
def testHELM(tau, M, N, familyToTest, numMCSims, copulaFamiliesToTest):
results = {}
for fam in copulaFamiliesToTest:
results[fam.lower()] = 0
for ii in range(0,numMCSims):
# generate samples of the requested copula with tau same as the
# empirical signature we calculated above
if(familyToTest.lower()=='gaussian'):
r = invcopulastat(familyToTest, 'kendall', tau)
Rho = np.empty((N,N))
for jj in range(0,N):
for kk in range(0,N):
if(jj==kk):
Rho[jj][kk] = 1
else:
Rho[jj][kk] = r
try:
U = copularnd(familyToTest, M, Rho)
except ValueError:
# copularnd will throw a ValueError if Rho is not a positive semidefinite matrix
return results # return 0, which will then be ignored by tests
else: # assume Clayton, Frank, or Gumbel
try:
alpha = invcopulastat(familyToTest, 'kendall', tau)
U = copularnd(familyToTest, M, N, alpha)
except ValueError:
continue
lst = []
for jj in range(0,N):
U_conditioned = U[:,jj]
# if there are any 1's, condition it
U_conditioned[U_conditioned==1] = 0.99
if(jj%2==0):
lst.append(norm.ppf(U_conditioned))
else:
lst.append(expon.ppf(U_conditioned))
# combine X and Y into the joint distribution w/ the copula
X = np.vstack(lst)
X = X.T
ret = optimalCopulaFamily(X, family_search=copulaFamiliesToTest)
ret_family = ret[0].lower()
# aggregate results
results[ret_family] = results[ret_family] + 1.0
# display some progress
sys.stdout.write("\rComputing " + str(familyToTest) + " Copula (DIM=%d) (tau=%f)-- %d%%" % (N,tau,ii+1))
sys.stdout.flush()
sys.stdout.write("\r")
# convert results to percentage
for fam in copulaFamiliesToTest:
results[fam.lower()] = results[fam.lower()]/float(numMCSims) * 100
return results
def plotPieChartResults(results, family, title):
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral'] # for the pie chart
# explode the Gaussian portion fo the pychart
expTup = [0,0,0,0]
expTup[results.keys().index(family.lower())] = 0.1
plt.pie(results.values(), explode=expTup, labels=results.keys(),
colors=colors, autopct='%1.1f%%', shadow=True, startangle=90)
plt.title(title)
plt.show()
def testHELM_parametric(K,M,N,tauVec,families):
# some tests on the copula multinomial signature
# Monte-Carlo style simulations to test each copula generation
numMCSims = 1000
resultsAggregate = {}
for family in families:
famResults = {}
for tau in tauVec:
results = testHELM(tau, M, N, family, numMCSims, families)
famResults[tau] = results
resultsAggregate[family] = famResults
return resultsAggregate
def visualizeMNSig():
# some tests on the copula multinomial signature
K = 4
M = 1000
N = 3
tauVec = np.arange(-0.9,0.95,0.05)
# the families to test against and pick optimal copula
families = ['Gaussian', 'Clayton', 'Gumbel', 'Frank']
helmAccuracyResults = testHELM_parametric(K,M,N,tauVec,families)
resultsAggregate = {}
for family in families:
famResults = {}
for tau in tauVec:
mnsig = copulamnsig(family,K,'kendall',tau)
famResults[tau] = mnsig
resultsAggregate[family] = famResults
# visualize the results
for tau in tauVec:
# we would also like to visualize this copula on the side, to try to
# understand what may be a better way todo model selection
try:
r = invcopulastat('Gaussian', 'kendall', tau)
except ValueError:
r = -1
Rho = np.empty((N,N))
for jj in range(0,N):
for kk in range(0,N):
if(jj==kk):
Rho[jj][kk] = 1
else:
Rho[jj][kk] = r
try:
alpha_clayton = invcopulastat('Clayton', 'kendall', tau)
except ValueError:
alpha_clayton = -1
try:
alpha_gumbel = invcopulastat('Gumbel', 'kendall', tau)
except ValueError:
alpha_gumbel = -1
try:
alpha_frank = invcopulastat('Frank', 'kendall', tau)
except ValueError:
alpha_frank = -1
if(r!=-1):
try:
U_gauss = copularnd('Gaussian', M, Rho)
except ValueError:
U_gauss = np.zeros((M,N))
if(alpha_clayton!=-1):
try:
U_clayton = copularnd('Clayton', M, N, alpha_clayton)
except ValueError:
U_clayton = np.zeros((M,N))
if(alpha_frank!=-1):
try:
U_frank = copularnd('Frank', M, N, alpha_frank)
except ValueError:
U_frank = np.zeros((M,N))
if(alpha_gumbel!=-1):
try:
U_gumbel = copularnd('Gumbel', M, N, alpha_gumbel)
except ValueError:
U_gumbel = np.zeros((M,N))
# get each family's MN signature and plot it
plt.figure(figsize=(30,20))
plt.subplot(231)
if(np.sum(resultsAggregate['Gaussian'][tau])>0):
plt.plot(np.arange(1,K*K+1), resultsAggregate['Gaussian'][tau], 'b.-', label='Gaussian Copula')
if(np.sum(resultsAggregate['Clayton'][tau])>0):
plt.plot(np.arange(1,K*K+1), resultsAggregate['Clayton'][tau], 'g.-', label='Clayton Copula')
if(np.sum(resultsAggregate['Gumbel'][tau])>0):
plt.plot(np.arange(1,K*K+1), resultsAggregate['Gumbel'][tau], 'r.-', label='Gumbel Copula')
if(np.sum(resultsAggregate['Frank'][tau])>0):
plt.plot(np.arange(1,K*K+1), resultsAggregate['Frank'][tau], 'k.-', label='Frank Copula')
plt.title(r'Copula Multinomial Signature $\tau$=' + "{0:.2f}".format(tau) + ' K=' + str(K))
plt.legend()
plt.grid()
plt.subplot(232)
if(r!=-1):
plt.scatter(U_gauss[:,0], U_gauss[:,1])
plt.grid()
plt.title(r'Gaussian Copula, $\rho$=' + "{0:.2f}".format(r) + r' $\tau$=' + "{0:.2f}".format(tau))
plt.subplot(233)
if(alpha_clayton!=-1):
plt.scatter(U_clayton[:,0], U_clayton[:,1])
plt.grid()
plt.title(r'Clayton Copula, $\alpha$=' + "{0:.2f}".format(alpha_clayton) + r' $\tau$=' + "{0:.2f}".format(tau))
plt.subplot(235)
if(alpha_frank!=-1):
plt.scatter(U_frank[:,0], U_frank[:,1])
plt.grid()
plt.title(r'Frank Copula, $\alpha$=' + "{0:.2f}".format(alpha_frank) + r' $\tau$=' + "{0:.2f}".format(tau))
plt.subplot(236)
if(alpha_gumbel!=-1):
plt.scatter(U_gumbel[:,0], U_gumbel[:,1])
plt.grid()
plt.title(r'Gumbel Copula, $\alpha$=' + "{0:.2f}".format(alpha_gumbel) + r' $\tau$=' + "{0:.2f}".format(tau))
plt.subplot(234)
# index manually to ensure accuracy
cla = np.array([helmAccuracyResults['Clayton'][tau]['clayton'],
helmAccuracyResults['Gaussian'][tau]['clayton'],
helmAccuracyResults['Gumbel'][tau]['clayton'],
helmAccuracyResults['Frank'][tau]['clayton']])
gau = np.array([helmAccuracyResults['Clayton'][tau]['gaussian'],
helmAccuracyResults['Gaussian'][tau]['gaussian'],
helmAccuracyResults['Gumbel'][tau]['gaussian'],
helmAccuracyResults['Frank'][tau]['gaussian']])
gum = np.array([helmAccuracyResults['Clayton'][tau]['gumbel'],
helmAccuracyResults['Gaussian'][tau]['gumbel'],
helmAccuracyResults['Gumbel'][tau]['gumbel'],
helmAccuracyResults['Frank'][tau]['gumbel']])
fra = np.array([helmAccuracyResults['Clayton'][tau]['frank'],
helmAccuracyResults['Gaussian'][tau]['frank'],
helmAccuracyResults['Gumbel'][tau]['frank'],
helmAccuracyResults['Frank'][tau]['frank']])
ind = np.arange(4)
width = 0.2
p1 = plt.bar(ind,cla,width,color='b')
p2 = plt.bar(ind,gau,width,color='g',bottom=cla)
p3 = plt.bar(ind,gum,width,color='k',bottom=cla+gau)
p4 = plt.bar(ind,fra,width,color='r',bottom=cla+gau+gum)
plt.xticks(ind+width/2.,('Clayton', 'Gaussian', 'Gumbel', 'Frank'))
plt.legend( (p1[0], p2[0], p3[0], p4[0]), ('Clayton', 'Gaussian', 'Gumbel', 'Frank') )
plt.grid()
plt.savefig(os.path.join('figures/HELM_performance/',
'HELM_DIM_' + str(N) + '_tau_' + "{0:.2f}".format(tau) + ' _K_' + str(K) + '.png'))
plt.close()
if __name__=='__main__':
from copularnd import copularnd
from invcopulastat import invcopulastat
from scipy.stats import norm
from scipy.stats import expon
import sys
import matplotlib.pyplot as plt
import os
# some tests on the copula multinomial signature
tau = 0.4
K = 4
mnsig = copulamnsig('Gumbel',K,'kendall',tau)
# iterate through mnsig to make sure we add upto 1 as a simple sanity check
val_total = 0
for ii in range(0,len(mnsig)):
val_total = val_total + mnsig[ii] #['val']
if(np.isclose(val_total, 1.0)):
print 'CopulaMNSig total probability check passed!'
else:
print 'CopulaMNSig total probability check failed!'
M = 1000
N = 2
# Monte-Carlo style simulations to test each copula generation
numMCSims = 100
# the families to test against and pick optimal copula
families = ['Gaussian', 'Clayton', 'Gumbel', 'Frank']
"""
for family in families:
title = 'Reference Bivariate ' + str(family) + ' Copula - HELM Identification Breakdown'
results = testHELM(tau, M, N, family, numMCSims, families)
plotPieChartResults(results, family, title)
N = 3
for family in families:
title = 'Reference Bivariate ' + str(family) + ' Copula - HELM Identification Breakdown'
results = testHELM(tau, M, N, family, numMCSims, families)
plotPieChartResults(results, family, title)
"""
#tauVec = np.arange(-0.9,0.95,0.05)
#resultsAggregate = testHELM_parametric(K,M,N,tauVec)
visualizeMNSig()
| gpl-3.0 |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/matplotlib/_cm.py | 15 | 94005 | """
Nothing here but dictionaries for generating LinearSegmentedColormaps,
and a dictionary of these dictionaries.
Documentation for each is in pyplot.colormaps()
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
_binary_data = {
'red': ((0., 1., 1.), (1., 0., 0.)),
'green': ((0., 1., 1.), (1., 0., 0.)),
'blue': ((0., 1., 1.), (1., 0., 0.))
}
_autumn_data = {'red': ((0., 1.0, 1.0), (1.0, 1.0, 1.0)),
'green': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.), (1.0, 0., 0.))}
_bone_data = {'red': ((0., 0., 0.),
(0.746032, 0.652778, 0.652778),
(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),
(0.365079, 0.319444, 0.319444),
(0.746032, 0.777778, 0.777778),
(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),
(0.365079, 0.444444, 0.444444),
(1.0, 1.0, 1.0))}
_cool_data = {'red': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'green': ((0., 1., 1.), (1.0, 0., 0.)),
'blue': ((0., 1., 1.), (1.0, 1., 1.))}
_copper_data = {'red': ((0., 0., 0.),
(0.809524, 1.000000, 1.000000),
(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),
(1.0, 0.7812, 0.7812)),
'blue': ((0., 0., 0.),
(1.0, 0.4975, 0.4975))}
_flag_data = {
'red': lambda x: 0.75 * np.sin((x * 31.5 + 0.25) * np.pi) + 0.5,
'green': lambda x: np.sin(x * 31.5 * np.pi),
'blue': lambda x: 0.75 * np.sin((x * 31.5 - 0.25) * np.pi) + 0.5,
}
_prism_data = {
'red': lambda x: 0.75 * np.sin((x * 20.9 + 0.25) * np.pi) + 0.67,
'green': lambda x: 0.75 * np.sin((x * 20.9 - 0.25) * np.pi) + 0.33,
'blue': lambda x: -1.1 * np.sin((x * 20.9) * np.pi),
}
def cubehelix(gamma=1.0, s=0.5, r=-1.5, h=1.0):
"""Return custom data dictionary of (r,g,b) conversion functions, which
can be used with :func:`register_cmap`, for the cubehelix color scheme.
Unlike most other color schemes cubehelix was designed by D.A. Green to
be monotonically increasing in terms of perceived brightness.
Also, when printed on a black and white postscript printer, the scheme
results in a greyscale with monotonically increasing brightness.
This color scheme is named cubehelix because the r,g,b values produced
can be visualised as a squashed helix around the diagonal in the
r,g,b color cube.
For a unit color cube (i.e. 3-D coordinates for r,g,b each in the
range 0 to 1) the color scheme starts at (r,g,b) = (0,0,0), i.e. black,
and finishes at (r,g,b) = (1,1,1), i.e. white. For some fraction *x*,
between 0 and 1, the color is the corresponding grey value at that
fraction along the black to white diagonal (x,x,x) plus a color
element. This color element is calculated in a plane of constant
perceived intensity and controlled by the following parameters.
Optional keyword arguments:
========= =======================================================
Keyword Description
========= =======================================================
gamma gamma factor to emphasise either low intensity values
(gamma < 1), or high intensity values (gamma > 1);
defaults to 1.0.
s the start color; defaults to 0.5 (i.e. purple).
r the number of r,g,b rotations in color that are made
from the start to the end of the color scheme; defaults
to -1.5 (i.e. -> B -> G -> R -> B).
h the hue parameter which controls how saturated the
colors are. If this parameter is zero then the color
scheme is purely a greyscale; defaults to 1.0.
========= =======================================================
"""
def get_color_function(p0, p1):
def color(x):
# Apply gamma factor to emphasise low or high intensity values
xg = x ** gamma
# Calculate amplitude and angle of deviation from the black
# to white diagonal in the plane of constant
# perceived intensity.
a = h * xg * (1 - xg) / 2
phi = 2 * np.pi * (s / 3 + r * x)
return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi))
return color
return {
'red': get_color_function(-0.14861, 1.78277),
'green': get_color_function(-0.29227, -0.90649),
'blue': get_color_function(1.97294, 0.0),
}
_cubehelix_data = cubehelix()
_bwr_data = ((0.0, 0.0, 1.0), (1.0, 1.0, 1.0), (1.0, 0.0, 0.0))
_brg_data = ((0.0, 0.0, 1.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0))
# Gnuplot palette functions
gfunc = {
0: lambda x: 0,
1: lambda x: 0.5,
2: lambda x: 1,
3: lambda x: x,
4: lambda x: x ** 2,
5: lambda x: x ** 3,
6: lambda x: x ** 4,
7: lambda x: np.sqrt(x),
8: lambda x: np.sqrt(np.sqrt(x)),
9: lambda x: np.sin(x * np.pi / 2),
10: lambda x: np.cos(x * np.pi / 2),
11: lambda x: np.abs(x - 0.5),
12: lambda x: (2 * x - 1) ** 2,
13: lambda x: np.sin(x * np.pi),
14: lambda x: np.abs(np.cos(x * np.pi)),
15: lambda x: np.sin(x * 2 * np.pi),
16: lambda x: np.cos(x * 2 * np.pi),
17: lambda x: np.abs(np.sin(x * 2 * np.pi)),
18: lambda x: np.abs(np.cos(x * 2 * np.pi)),
19: lambda x: np.abs(np.sin(x * 4 * np.pi)),
20: lambda x: np.abs(np.cos(x * 4 * np.pi)),
21: lambda x: 3 * x,
22: lambda x: 3 * x - 1,
23: lambda x: 3 * x - 2,
24: lambda x: np.abs(3 * x - 1),
25: lambda x: np.abs(3 * x - 2),
26: lambda x: (3 * x - 1) / 2,
27: lambda x: (3 * x - 2) / 2,
28: lambda x: np.abs((3 * x - 1) / 2),
29: lambda x: np.abs((3 * x - 2) / 2),
30: lambda x: x / 0.32 - 0.78125,
31: lambda x: 2 * x - 0.84,
32: lambda x: gfunc32(x),
33: lambda x: np.abs(2 * x - 0.5),
34: lambda x: 2 * x,
35: lambda x: 2 * x - 0.5,
36: lambda x: 2 * x - 1.
}
def gfunc32(x):
ret = np.zeros(len(x))
m = (x < 0.25)
ret[m] = 4 * x[m]
m = (x >= 0.25) & (x < 0.92)
ret[m] = -2 * x[m] + 1.84
m = (x >= 0.92)
ret[m] = x[m] / 0.08 - 11.5
return ret
_gnuplot_data = {
'red': gfunc[7],
'green': gfunc[5],
'blue': gfunc[15],
}
_gnuplot2_data = {
'red': gfunc[30],
'green': gfunc[31],
'blue': gfunc[32],
}
_ocean_data = {
'red': gfunc[23],
'green': gfunc[28],
'blue': gfunc[3],
}
_afmhot_data = {
'red': gfunc[34],
'green': gfunc[35],
'blue': gfunc[36],
}
_rainbow_data = {
'red': gfunc[33],
'green': gfunc[13],
'blue': gfunc[10],
}
_seismic_data = (
(0.0, 0.0, 0.3), (0.0, 0.0, 1.0),
(1.0, 1.0, 1.0), (1.0, 0.0, 0.0),
(0.5, 0.0, 0.0))
_terrain_data = (
(0.00, (0.2, 0.2, 0.6)),
(0.15, (0.0, 0.6, 1.0)),
(0.25, (0.0, 0.8, 0.4)),
(0.50, (1.0, 1.0, 0.6)),
(0.75, (0.5, 0.36, 0.33)),
(1.00, (1.0, 1.0, 1.0)))
_gray_data = {'red': ((0., 0, 0), (1., 1, 1)),
'green': ((0., 0, 0), (1., 1, 1)),
'blue': ((0., 0, 0), (1., 1, 1))}
_hot_data = {'red': ((0., 0.0416, 0.0416),
(0.365079, 1.000000, 1.000000),
(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),
(0.365079, 0.000000, 0.000000),
(0.746032, 1.000000, 1.000000),
(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),
(0.746032, 0.000000, 0.000000),
(1.0, 1.0, 1.0))}
_hsv_data = {'red': ((0., 1., 1.),
(0.158730, 1.000000, 1.000000),
(0.174603, 0.968750, 0.968750),
(0.333333, 0.031250, 0.031250),
(0.349206, 0.000000, 0.000000),
(0.666667, 0.000000, 0.000000),
(0.682540, 0.031250, 0.031250),
(0.841270, 0.968750, 0.968750),
(0.857143, 1.000000, 1.000000),
(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),
(0.158730, 0.937500, 0.937500),
(0.174603, 1.000000, 1.000000),
(0.507937, 1.000000, 1.000000),
(0.666667, 0.062500, 0.062500),
(0.682540, 0.000000, 0.000000),
(1.0, 0., 0.)),
'blue': ((0., 0., 0.),
(0.333333, 0.000000, 0.000000),
(0.349206, 0.062500, 0.062500),
(0.507937, 1.000000, 1.000000),
(0.841270, 1.000000, 1.000000),
(0.857143, 0.937500, 0.937500),
(1.0, 0.09375, 0.09375))}
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89, 1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125, 0, 0), (0.375, 1, 1), (0.64, 1, 1),
(0.91, 0, 0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1),
(0.65, 0, 0), (1, 0, 0))}
_pink_data = {'red': ((0., 0.1178, 0.1178), (0.015873, 0.195857, 0.195857),
(0.031746, 0.250661, 0.250661),
(0.047619, 0.295468, 0.295468),
(0.063492, 0.334324, 0.334324),
(0.079365, 0.369112, 0.369112),
(0.095238, 0.400892, 0.400892),
(0.111111, 0.430331, 0.430331),
(0.126984, 0.457882, 0.457882),
(0.142857, 0.483867, 0.483867),
(0.158730, 0.508525, 0.508525),
(0.174603, 0.532042, 0.532042),
(0.190476, 0.554563, 0.554563),
(0.206349, 0.576204, 0.576204),
(0.222222, 0.597061, 0.597061),
(0.238095, 0.617213, 0.617213),
(0.253968, 0.636729, 0.636729),
(0.269841, 0.655663, 0.655663),
(0.285714, 0.674066, 0.674066),
(0.301587, 0.691980, 0.691980),
(0.317460, 0.709441, 0.709441),
(0.333333, 0.726483, 0.726483),
(0.349206, 0.743134, 0.743134),
(0.365079, 0.759421, 0.759421),
(0.380952, 0.766356, 0.766356),
(0.396825, 0.773229, 0.773229),
(0.412698, 0.780042, 0.780042),
(0.428571, 0.786796, 0.786796),
(0.444444, 0.793492, 0.793492),
(0.460317, 0.800132, 0.800132),
(0.476190, 0.806718, 0.806718),
(0.492063, 0.813250, 0.813250),
(0.507937, 0.819730, 0.819730),
(0.523810, 0.826160, 0.826160),
(0.539683, 0.832539, 0.832539),
(0.555556, 0.838870, 0.838870),
(0.571429, 0.845154, 0.845154),
(0.587302, 0.851392, 0.851392),
(0.603175, 0.857584, 0.857584),
(0.619048, 0.863731, 0.863731),
(0.634921, 0.869835, 0.869835),
(0.650794, 0.875897, 0.875897),
(0.666667, 0.881917, 0.881917),
(0.682540, 0.887896, 0.887896),
(0.698413, 0.893835, 0.893835),
(0.714286, 0.899735, 0.899735),
(0.730159, 0.905597, 0.905597),
(0.746032, 0.911421, 0.911421),
(0.761905, 0.917208, 0.917208),
(0.777778, 0.922958, 0.922958),
(0.793651, 0.928673, 0.928673),
(0.809524, 0.934353, 0.934353),
(0.825397, 0.939999, 0.939999),
(0.841270, 0.945611, 0.945611),
(0.857143, 0.951190, 0.951190),
(0.873016, 0.956736, 0.956736),
(0.888889, 0.962250, 0.962250),
(0.904762, 0.967733, 0.967733),
(0.920635, 0.973185, 0.973185),
(0.936508, 0.978607, 0.978607),
(0.952381, 0.983999, 0.983999),
(0.968254, 0.989361, 0.989361),
(0.984127, 0.994695, 0.994695), (1.0, 1.0, 1.0)),
'green': ((0., 0., 0.), (0.015873, 0.102869, 0.102869),
(0.031746, 0.145479, 0.145479),
(0.047619, 0.178174, 0.178174),
(0.063492, 0.205738, 0.205738),
(0.079365, 0.230022, 0.230022),
(0.095238, 0.251976, 0.251976),
(0.111111, 0.272166, 0.272166),
(0.126984, 0.290957, 0.290957),
(0.142857, 0.308607, 0.308607),
(0.158730, 0.325300, 0.325300),
(0.174603, 0.341178, 0.341178),
(0.190476, 0.356348, 0.356348),
(0.206349, 0.370899, 0.370899),
(0.222222, 0.384900, 0.384900),
(0.238095, 0.398410, 0.398410),
(0.253968, 0.411476, 0.411476),
(0.269841, 0.424139, 0.424139),
(0.285714, 0.436436, 0.436436),
(0.301587, 0.448395, 0.448395),
(0.317460, 0.460044, 0.460044),
(0.333333, 0.471405, 0.471405),
(0.349206, 0.482498, 0.482498),
(0.365079, 0.493342, 0.493342),
(0.380952, 0.517549, 0.517549),
(0.396825, 0.540674, 0.540674),
(0.412698, 0.562849, 0.562849),
(0.428571, 0.584183, 0.584183),
(0.444444, 0.604765, 0.604765),
(0.460317, 0.624669, 0.624669),
(0.476190, 0.643958, 0.643958),
(0.492063, 0.662687, 0.662687),
(0.507937, 0.680900, 0.680900),
(0.523810, 0.698638, 0.698638),
(0.539683, 0.715937, 0.715937),
(0.555556, 0.732828, 0.732828),
(0.571429, 0.749338, 0.749338),
(0.587302, 0.765493, 0.765493),
(0.603175, 0.781313, 0.781313),
(0.619048, 0.796819, 0.796819),
(0.634921, 0.812029, 0.812029),
(0.650794, 0.826960, 0.826960),
(0.666667, 0.841625, 0.841625),
(0.682540, 0.856040, 0.856040),
(0.698413, 0.870216, 0.870216),
(0.714286, 0.884164, 0.884164),
(0.730159, 0.897896, 0.897896),
(0.746032, 0.911421, 0.911421),
(0.761905, 0.917208, 0.917208),
(0.777778, 0.922958, 0.922958),
(0.793651, 0.928673, 0.928673),
(0.809524, 0.934353, 0.934353),
(0.825397, 0.939999, 0.939999),
(0.841270, 0.945611, 0.945611),
(0.857143, 0.951190, 0.951190),
(0.873016, 0.956736, 0.956736),
(0.888889, 0.962250, 0.962250),
(0.904762, 0.967733, 0.967733),
(0.920635, 0.973185, 0.973185),
(0.936508, 0.978607, 0.978607),
(0.952381, 0.983999, 0.983999),
(0.968254, 0.989361, 0.989361),
(0.984127, 0.994695, 0.994695), (1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.), (0.015873, 0.102869, 0.102869),
(0.031746, 0.145479, 0.145479),
(0.047619, 0.178174, 0.178174),
(0.063492, 0.205738, 0.205738),
(0.079365, 0.230022, 0.230022),
(0.095238, 0.251976, 0.251976),
(0.111111, 0.272166, 0.272166),
(0.126984, 0.290957, 0.290957),
(0.142857, 0.308607, 0.308607),
(0.158730, 0.325300, 0.325300),
(0.174603, 0.341178, 0.341178),
(0.190476, 0.356348, 0.356348),
(0.206349, 0.370899, 0.370899),
(0.222222, 0.384900, 0.384900),
(0.238095, 0.398410, 0.398410),
(0.253968, 0.411476, 0.411476),
(0.269841, 0.424139, 0.424139),
(0.285714, 0.436436, 0.436436),
(0.301587, 0.448395, 0.448395),
(0.317460, 0.460044, 0.460044),
(0.333333, 0.471405, 0.471405),
(0.349206, 0.482498, 0.482498),
(0.365079, 0.493342, 0.493342),
(0.380952, 0.503953, 0.503953),
(0.396825, 0.514344, 0.514344),
(0.412698, 0.524531, 0.524531),
(0.428571, 0.534522, 0.534522),
(0.444444, 0.544331, 0.544331),
(0.460317, 0.553966, 0.553966),
(0.476190, 0.563436, 0.563436),
(0.492063, 0.572750, 0.572750),
(0.507937, 0.581914, 0.581914),
(0.523810, 0.590937, 0.590937),
(0.539683, 0.599824, 0.599824),
(0.555556, 0.608581, 0.608581),
(0.571429, 0.617213, 0.617213),
(0.587302, 0.625727, 0.625727),
(0.603175, 0.634126, 0.634126),
(0.619048, 0.642416, 0.642416),
(0.634921, 0.650600, 0.650600),
(0.650794, 0.658682, 0.658682),
(0.666667, 0.666667, 0.666667),
(0.682540, 0.674556, 0.674556),
(0.698413, 0.682355, 0.682355),
(0.714286, 0.690066, 0.690066),
(0.730159, 0.697691, 0.697691),
(0.746032, 0.705234, 0.705234),
(0.761905, 0.727166, 0.727166),
(0.777778, 0.748455, 0.748455),
(0.793651, 0.769156, 0.769156),
(0.809524, 0.789314, 0.789314),
(0.825397, 0.808969, 0.808969),
(0.841270, 0.828159, 0.828159),
(0.857143, 0.846913, 0.846913),
(0.873016, 0.865261, 0.865261),
(0.888889, 0.883229, 0.883229),
(0.904762, 0.900837, 0.900837),
(0.920635, 0.918109, 0.918109),
(0.936508, 0.935061, 0.935061),
(0.952381, 0.951711, 0.951711),
(0.968254, 0.968075, 0.968075),
(0.984127, 0.984167, 0.984167), (1.0, 1.0, 1.0))}
_spring_data = {'red': ((0., 1., 1.), (1.0, 1.0, 1.0)),
'green': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'blue': ((0., 1., 1.), (1.0, 0.0, 0.0))}
_summer_data = {'red': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'green': ((0., 0.5, 0.5), (1.0, 1.0, 1.0)),
'blue': ((0., 0.4, 0.4), (1.0, 0.4, 0.4))}
_winter_data = {'red': ((0., 0., 0.), (1.0, 0.0, 0.0)),
'green': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'blue': ((0., 1., 1.), (1.0, 0.5, 0.5))}
_nipy_spectral_data = {
'red': [(0.0, 0.0, 0.0), (0.05, 0.4667, 0.4667),
(0.10, 0.5333, 0.5333), (0.15, 0.0, 0.0),
(0.20, 0.0, 0.0), (0.25, 0.0, 0.0),
(0.30, 0.0, 0.0), (0.35, 0.0, 0.0),
(0.40, 0.0, 0.0), (0.45, 0.0, 0.0),
(0.50, 0.0, 0.0), (0.55, 0.0, 0.0),
(0.60, 0.0, 0.0), (0.65, 0.7333, 0.7333),
(0.70, 0.9333, 0.9333), (0.75, 1.0, 1.0),
(0.80, 1.0, 1.0), (0.85, 1.0, 1.0),
(0.90, 0.8667, 0.8667), (0.95, 0.80, 0.80),
(1.0, 0.80, 0.80)],
'green': [(0.0, 0.0, 0.0), (0.05, 0.0, 0.0),
(0.10, 0.0, 0.0), (0.15, 0.0, 0.0),
(0.20, 0.0, 0.0), (0.25, 0.4667, 0.4667),
(0.30, 0.6000, 0.6000), (0.35, 0.6667, 0.6667),
(0.40, 0.6667, 0.6667), (0.45, 0.6000, 0.6000),
(0.50, 0.7333, 0.7333), (0.55, 0.8667, 0.8667),
(0.60, 1.0, 1.0), (0.65, 1.0, 1.0),
(0.70, 0.9333, 0.9333), (0.75, 0.8000, 0.8000),
(0.80, 0.6000, 0.6000), (0.85, 0.0, 0.0),
(0.90, 0.0, 0.0), (0.95, 0.0, 0.0),
(1.0, 0.80, 0.80)],
'blue': [(0.0, 0.0, 0.0), (0.05, 0.5333, 0.5333),
(0.10, 0.6000, 0.6000), (0.15, 0.6667, 0.6667),
(0.20, 0.8667, 0.8667), (0.25, 0.8667, 0.8667),
(0.30, 0.8667, 0.8667), (0.35, 0.6667, 0.6667),
(0.40, 0.5333, 0.5333), (0.45, 0.0, 0.0),
(0.5, 0.0, 0.0), (0.55, 0.0, 0.0),
(0.60, 0.0, 0.0), (0.65, 0.0, 0.0),
(0.70, 0.0, 0.0), (0.75, 0.0, 0.0),
(0.80, 0.0, 0.0), (0.85, 0.0, 0.0),
(0.90, 0.0, 0.0), (0.95, 0.0, 0.0),
(1.0, 0.80, 0.80)],
}
# 34 colormaps based on color specifications and designs
# developed by Cynthia Brewer (http://colorbrewer.org).
# The ColorBrewer palettes have been included under the terms
# of an Apache-stype license (for details, see the file
# LICENSE_COLORBREWER in the license directory of the matplotlib
# source distribution).
_Accent_data = {'blue': [(0.0, 0.49803921580314636,
0.49803921580314636), (0.14285714285714285, 0.83137255907058716,
0.83137255907058716), (0.2857142857142857, 0.52549022436141968,
0.52549022436141968), (0.42857142857142855, 0.60000002384185791,
0.60000002384185791), (0.5714285714285714, 0.69019609689712524,
0.69019609689712524), (0.7142857142857143, 0.49803921580314636,
0.49803921580314636), (0.8571428571428571, 0.090196080505847931,
0.090196080505847931), (1.0, 0.40000000596046448,
0.40000000596046448)],
'green': [(0.0, 0.78823530673980713, 0.78823530673980713),
(0.14285714285714285, 0.68235296010971069, 0.68235296010971069),
(0.2857142857142857, 0.75294119119644165, 0.75294119119644165),
(0.42857142857142855, 1.0, 1.0), (0.5714285714285714,
0.42352941632270813, 0.42352941632270813), (0.7142857142857143,
0.0078431377187371254, 0.0078431377187371254),
(0.8571428571428571, 0.35686275362968445, 0.35686275362968445),
(1.0, 0.40000000596046448, 0.40000000596046448)],
'red': [(0.0, 0.49803921580314636, 0.49803921580314636),
(0.14285714285714285, 0.7450980544090271, 0.7450980544090271),
(0.2857142857142857, 0.99215686321258545, 0.99215686321258545),
(0.42857142857142855, 1.0, 1.0), (0.5714285714285714,
0.21960784494876862, 0.21960784494876862), (0.7142857142857143,
0.94117647409439087, 0.94117647409439087), (0.8571428571428571,
0.74901962280273438, 0.74901962280273438), (1.0,
0.40000000596046448, 0.40000000596046448)]}
_Blues_data = {'blue': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.93725490570068359, 0.93725490570068359),
(0.375, 0.88235294818878174, 0.88235294818878174), (0.5,
0.83921569585800171, 0.83921569585800171), (0.625, 0.7764706015586853,
0.7764706015586853), (0.75, 0.70980393886566162, 0.70980393886566162),
(0.875, 0.61176472902297974, 0.61176472902297974), (1.0,
0.41960784792900085, 0.41960784792900085)],
'green': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.92156863212585449, 0.92156863212585449), (0.25,
0.85882353782653809, 0.85882353782653809), (0.375,
0.7921568751335144, 0.7921568751335144), (0.5,
0.68235296010971069, 0.68235296010971069), (0.625,
0.57254904508590698, 0.57254904508590698), (0.75,
0.44313725829124451, 0.44313725829124451), (0.875,
0.31764706969261169, 0.31764706969261169), (1.0,
0.18823529779911041, 0.18823529779911041)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87058824300765991, 0.87058824300765991), (0.25,
0.7764706015586853, 0.7764706015586853), (0.375,
0.61960786581039429, 0.61960786581039429), (0.5,
0.41960784792900085, 0.41960784792900085), (0.625,
0.25882354378700256, 0.25882354378700256), (0.75,
0.12941177189350128, 0.12941177189350128), (0.875,
0.031372550874948502, 0.031372550874948502), (1.0,
0.031372550874948502, 0.031372550874948502)]}
_BrBG_data = {'blue': [(0.0, 0.019607843831181526,
0.019607843831181526), (0.10000000000000001, 0.039215687662363052,
0.039215687662363052), (0.20000000000000001, 0.17647059261798859,
0.17647059261798859), (0.29999999999999999, 0.49019607901573181,
0.49019607901573181), (0.40000000000000002, 0.76470589637756348,
0.76470589637756348), (0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.89803922176361084, 0.89803922176361084),
(0.69999999999999996, 0.75686275959014893, 0.75686275959014893),
(0.80000000000000004, 0.56078433990478516, 0.56078433990478516),
(0.90000000000000002, 0.36862745881080627, 0.36862745881080627), (1.0,
0.18823529779911041, 0.18823529779911041)],
'green': [(0.0, 0.18823529779911041, 0.18823529779911041),
(0.10000000000000001, 0.31764706969261169, 0.31764706969261169),
(0.20000000000000001, 0.5058823823928833, 0.5058823823928833),
(0.29999999999999999, 0.7607843279838562, 0.7607843279838562),
(0.40000000000000002, 0.90980392694473267, 0.90980392694473267),
(0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.91764706373214722, 0.91764706373214722),
(0.69999999999999996, 0.80392158031463623, 0.80392158031463623),
(0.80000000000000004, 0.59215688705444336, 0.59215688705444336),
(0.90000000000000002, 0.40000000596046448, 0.40000000596046448),
(1.0, 0.23529411852359772, 0.23529411852359772)],
'red': [(0.0, 0.32941177487373352, 0.32941177487373352),
(0.10000000000000001, 0.54901963472366333, 0.54901963472366333),
(0.20000000000000001, 0.74901962280273438, 0.74901962280273438),
(0.29999999999999999, 0.87450981140136719, 0.87450981140136719),
(0.40000000000000002, 0.96470588445663452, 0.96470588445663452),
(0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.78039216995239258, 0.78039216995239258),
(0.69999999999999996, 0.50196081399917603, 0.50196081399917603),
(0.80000000000000004, 0.20784313976764679, 0.20784313976764679),
(0.90000000000000002, 0.0039215688593685627,
0.0039215688593685627), (1.0, 0.0, 0.0)]}
_BuGn_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.97647058963775635,
0.97647058963775635), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.78823530673980713,
0.78823530673980713), (0.5, 0.64313727617263794, 0.64313727617263794),
(0.625, 0.46274510025978088, 0.46274510025978088), (0.75,
0.27058824896812439, 0.27058824896812439), (0.875,
0.17254902422428131, 0.17254902422428131), (1.0, 0.10588235408067703,
0.10588235408067703)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.96078431606292725, 0.96078431606292725), (0.25,
0.92549020051956177, 0.92549020051956177), (0.375,
0.84705883264541626, 0.84705883264541626), (0.5,
0.7607843279838562, 0.7607843279838562), (0.625,
0.68235296010971069, 0.68235296010971069), (0.75,
0.54509806632995605, 0.54509806632995605), (0.875,
0.42745098471641541, 0.42745098471641541), (1.0,
0.26666668057441711, 0.26666668057441711)], 'red': [(0.0,
0.9686274528503418, 0.9686274528503418), (0.125,
0.89803922176361084, 0.89803922176361084), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.60000002384185791, 0.60000002384185791), (0.5,
0.40000000596046448, 0.40000000596046448), (0.625,
0.25490197539329529, 0.25490197539329529), (0.75,
0.13725490868091583, 0.13725490868091583), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)]}
_BuPu_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.95686274766921997,
0.95686274766921997), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.85490196943283081,
0.85490196943283081), (0.5, 0.7764706015586853, 0.7764706015586853),
(0.625, 0.69411766529083252, 0.69411766529083252), (0.75,
0.61568629741668701, 0.61568629741668701), (0.875,
0.48627451062202454, 0.48627451062202454), (1.0, 0.29411765933036804,
0.29411765933036804)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.92549020051956177, 0.92549020051956177), (0.25,
0.82745099067687988, 0.82745099067687988), (0.375,
0.73725491762161255, 0.73725491762161255), (0.5,
0.58823531866073608, 0.58823531866073608), (0.625,
0.41960784792900085, 0.41960784792900085), (0.75,
0.25490197539329529, 0.25490197539329529), (0.875,
0.058823529630899429, 0.058823529630899429), (1.0, 0.0, 0.0)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.74901962280273438, 0.74901962280273438), (0.375,
0.61960786581039429, 0.61960786581039429), (0.5,
0.54901963472366333, 0.54901963472366333), (0.625,
0.54901963472366333, 0.54901963472366333), (0.75,
0.53333336114883423, 0.53333336114883423), (0.875,
0.5058823823928833, 0.5058823823928833), (1.0,
0.30196079611778259, 0.30196079611778259)]}
_Dark2_data = {'blue': [(0.0, 0.46666666865348816,
0.46666666865348816), (0.14285714285714285, 0.0078431377187371254,
0.0078431377187371254), (0.2857142857142857, 0.70196080207824707,
0.70196080207824707), (0.42857142857142855, 0.54117649793624878,
0.54117649793624878), (0.5714285714285714, 0.11764705926179886,
0.11764705926179886), (0.7142857142857143, 0.0078431377187371254,
0.0078431377187371254), (0.8571428571428571, 0.11372549086809158,
0.11372549086809158), (1.0, 0.40000000596046448,
0.40000000596046448)],
'green': [(0.0, 0.61960786581039429, 0.61960786581039429),
(0.14285714285714285, 0.37254902720451355, 0.37254902720451355),
(0.2857142857142857, 0.43921568989753723, 0.43921568989753723),
(0.42857142857142855, 0.16078431904315948, 0.16078431904315948),
(0.5714285714285714, 0.65098041296005249, 0.65098041296005249),
(0.7142857142857143, 0.67058825492858887, 0.67058825492858887),
(0.8571428571428571, 0.46274510025978088, 0.46274510025978088),
(1.0, 0.40000000596046448, 0.40000000596046448)],
'red': [(0.0, 0.10588235408067703, 0.10588235408067703),
(0.14285714285714285, 0.85098040103912354, 0.85098040103912354),
(0.2857142857142857, 0.45882353186607361, 0.45882353186607361),
(0.42857142857142855, 0.90588235855102539, 0.90588235855102539),
(0.5714285714285714, 0.40000000596046448, 0.40000000596046448),
(0.7142857142857143, 0.90196079015731812, 0.90196079015731812),
(0.8571428571428571, 0.65098041296005249, 0.65098041296005249),
(1.0, 0.40000000596046448, 0.40000000596046448)]}
_GnBu_data = {'blue': [(0.0, 0.94117647409439087,
0.94117647409439087), (0.125, 0.85882353782653809,
0.85882353782653809), (0.25, 0.77254903316497803,
0.77254903316497803), (0.375, 0.70980393886566162,
0.70980393886566162), (0.5, 0.76862746477127075, 0.76862746477127075),
(0.625, 0.82745099067687988, 0.82745099067687988), (0.75,
0.7450980544090271, 0.7450980544090271), (0.875, 0.67450982332229614,
0.67450982332229614), (1.0, 0.5058823823928833, 0.5058823823928833)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.9529411792755127, 0.9529411792755127), (0.25,
0.92156863212585449, 0.92156863212585449), (0.375,
0.86666667461395264, 0.86666667461395264), (0.5,
0.80000001192092896, 0.80000001192092896), (0.625,
0.70196080207824707, 0.70196080207824707), (0.75,
0.54901963472366333, 0.54901963472366333), (0.875,
0.40784314274787903, 0.40784314274787903), (1.0,
0.25098040699958801, 0.25098040699958801)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.65882354974746704, 0.65882354974746704), (0.5,
0.48235294222831726, 0.48235294222831726), (0.625,
0.30588236451148987, 0.30588236451148987), (0.75,
0.16862745583057404, 0.16862745583057404), (0.875,
0.031372550874948502, 0.031372550874948502), (1.0,
0.031372550874948502, 0.031372550874948502)]}
_Greens_data = {'blue': [(0.0, 0.96078431606292725,
0.96078431606292725), (0.125, 0.87843137979507446,
0.87843137979507446), (0.25, 0.75294119119644165,
0.75294119119644165), (0.375, 0.60784316062927246,
0.60784316062927246), (0.5, 0.46274510025978088, 0.46274510025978088),
(0.625, 0.364705890417099, 0.364705890417099), (0.75,
0.27058824896812439, 0.27058824896812439), (0.875,
0.17254902422428131, 0.17254902422428131), (1.0, 0.10588235408067703,
0.10588235408067703)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.96078431606292725, 0.96078431606292725), (0.25,
0.91372549533843994, 0.91372549533843994), (0.375,
0.85098040103912354, 0.85098040103912354), (0.5,
0.76862746477127075, 0.76862746477127075), (0.625,
0.67058825492858887, 0.67058825492858887), (0.75,
0.54509806632995605, 0.54509806632995605), (0.875,
0.42745098471641541, 0.42745098471641541), (1.0,
0.26666668057441711, 0.26666668057441711)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.89803922176361084, 0.89803922176361084), (0.25,
0.78039216995239258, 0.78039216995239258), (0.375,
0.63137257099151611, 0.63137257099151611), (0.5,
0.45490196347236633, 0.45490196347236633), (0.625,
0.25490197539329529, 0.25490197539329529), (0.75,
0.13725490868091583, 0.13725490868091583), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)]}
_Greys_data = {'blue': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608, 0.58823531866073608),
(0.625, 0.45098039507865906, 0.45098039507865906), (0.75,
0.32156863808631897, 0.32156863808631897), (0.875,
0.14509804546833038, 0.14509804546833038), (1.0, 0.0, 0.0)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608,
0.58823531866073608), (0.625, 0.45098039507865906,
0.45098039507865906), (0.75, 0.32156863808631897,
0.32156863808631897), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608,
0.58823531866073608), (0.625, 0.45098039507865906,
0.45098039507865906), (0.75, 0.32156863808631897,
0.32156863808631897), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.0, 0.0)]}
_Oranges_data = {'blue': [(0.0, 0.92156863212585449,
0.92156863212585449), (0.125, 0.80784314870834351,
0.80784314870834351), (0.25, 0.63529413938522339,
0.63529413938522339), (0.375, 0.41960784792900085,
0.41960784792900085), (0.5, 0.23529411852359772, 0.23529411852359772),
(0.625, 0.074509806931018829, 0.074509806931018829), (0.75,
0.0039215688593685627, 0.0039215688593685627), (0.875,
0.011764706112444401, 0.011764706112444401), (1.0,
0.015686275437474251, 0.015686275437474251)],
'green': [(0.0, 0.96078431606292725, 0.96078431606292725), (0.125,
0.90196079015731812, 0.90196079015731812), (0.25,
0.81568628549575806, 0.81568628549575806), (0.375,
0.68235296010971069, 0.68235296010971069), (0.5,
0.55294120311737061, 0.55294120311737061), (0.625,
0.4117647111415863, 0.4117647111415863), (0.75,
0.28235295414924622, 0.28235295414924622), (0.875,
0.21176470816135406, 0.21176470816135406), (1.0,
0.15294118225574493, 0.15294118225574493)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.99215686321258545,
0.99215686321258545), (0.375, 0.99215686321258545,
0.99215686321258545), (0.5, 0.99215686321258545,
0.99215686321258545), (0.625, 0.94509804248809814,
0.94509804248809814), (0.75, 0.85098040103912354,
0.85098040103912354), (0.875, 0.65098041296005249,
0.65098041296005249), (1.0, 0.49803921580314636,
0.49803921580314636)]}
_OrRd_data = {'blue': [(0.0, 0.92549020051956177,
0.92549020051956177), (0.125, 0.78431373834609985,
0.78431373834609985), (0.25, 0.61960786581039429,
0.61960786581039429), (0.375, 0.51764708757400513,
0.51764708757400513), (0.5, 0.3490196168422699, 0.3490196168422699),
(0.625, 0.28235295414924622, 0.28235295414924622), (0.75,
0.12156862765550613, 0.12156862765550613), (0.875, 0.0, 0.0), (1.0,
0.0, 0.0)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90980392694473267, 0.90980392694473267), (0.25,
0.83137255907058716, 0.83137255907058716), (0.375,
0.73333334922790527, 0.73333334922790527), (0.5,
0.55294120311737061, 0.55294120311737061), (0.625,
0.3960784375667572, 0.3960784375667572), (0.75,
0.18823529779911041, 0.18823529779911041), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.99215686321258545,
0.99215686321258545), (0.375, 0.99215686321258545,
0.99215686321258545), (0.5, 0.98823529481887817,
0.98823529481887817), (0.625, 0.93725490570068359,
0.93725490570068359), (0.75, 0.84313726425170898,
0.84313726425170898), (0.875, 0.70196080207824707,
0.70196080207824707), (1.0, 0.49803921580314636,
0.49803921580314636)]}
_Paired_data = {'blue': [(0.0, 0.89019608497619629,
0.89019608497619629), (0.090909090909090912, 0.70588237047195435,
0.70588237047195435), (0.18181818181818182, 0.54117649793624878,
0.54117649793624878), (0.27272727272727271, 0.17254902422428131,
0.17254902422428131), (0.36363636363636365, 0.60000002384185791,
0.60000002384185791), (0.45454545454545453, 0.10980392247438431,
0.10980392247438431), (0.54545454545454541, 0.43529412150382996,
0.43529412150382996), (0.63636363636363635, 0.0, 0.0),
(0.72727272727272729, 0.83921569585800171, 0.83921569585800171),
(0.81818181818181823, 0.60392159223556519, 0.60392159223556519),
(0.90909090909090906, 0.60000002384185791, 0.60000002384185791), (1.0,
0.15686275064945221, 0.15686275064945221)],
'green': [(0.0, 0.80784314870834351, 0.80784314870834351),
(0.090909090909090912, 0.47058823704719543, 0.47058823704719543),
(0.18181818181818182, 0.87450981140136719, 0.87450981140136719),
(0.27272727272727271, 0.62745100259780884, 0.62745100259780884),
(0.36363636363636365, 0.60392159223556519, 0.60392159223556519),
(0.45454545454545453, 0.10196078568696976, 0.10196078568696976),
(0.54545454545454541, 0.74901962280273438, 0.74901962280273438),
(0.63636363636363635, 0.49803921580314636, 0.49803921580314636),
(0.72727272727272729, 0.69803923368453979, 0.69803923368453979),
(0.81818181818181823, 0.23921568691730499, 0.23921568691730499),
(0.90909090909090906, 1.0, 1.0), (1.0, 0.3490196168422699,
0.3490196168422699)],
'red': [(0.0, 0.65098041296005249, 0.65098041296005249),
(0.090909090909090912, 0.12156862765550613, 0.12156862765550613),
(0.18181818181818182, 0.69803923368453979, 0.69803923368453979),
(0.27272727272727271, 0.20000000298023224, 0.20000000298023224),
(0.36363636363636365, 0.9843137264251709, 0.9843137264251709),
(0.45454545454545453, 0.89019608497619629, 0.89019608497619629),
(0.54545454545454541, 0.99215686321258545, 0.99215686321258545),
(0.63636363636363635, 1.0, 1.0), (0.72727272727272729,
0.7921568751335144, 0.7921568751335144), (0.81818181818181823,
0.41568627953529358, 0.41568627953529358), (0.90909090909090906,
1.0, 1.0), (1.0, 0.69411766529083252, 0.69411766529083252)]}
_Pastel1_data = {'blue': [(0.0, 0.68235296010971069,
0.68235296010971069), (0.125, 0.89019608497619629,
0.89019608497619629), (0.25, 0.77254903316497803,
0.77254903316497803), (0.375, 0.89411765336990356,
0.89411765336990356), (0.5, 0.65098041296005249, 0.65098041296005249),
(0.625, 0.80000001192092896, 0.80000001192092896), (0.75,
0.74117648601531982, 0.74117648601531982), (0.875,
0.92549020051956177, 0.92549020051956177), (1.0, 0.94901961088180542,
0.94901961088180542)],
'green': [(0.0, 0.70588237047195435, 0.70588237047195435), (0.125,
0.80392158031463623, 0.80392158031463623), (0.25,
0.92156863212585449, 0.92156863212585449), (0.375,
0.79607844352722168, 0.79607844352722168), (0.5,
0.85098040103912354, 0.85098040103912354), (0.625, 1.0, 1.0),
(0.75, 0.84705883264541626, 0.84705883264541626), (0.875,
0.85490196943283081, 0.85490196943283081), (1.0,
0.94901961088180542, 0.94901961088180542)],
'red': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.70196080207824707, 0.70196080207824707), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.87058824300765991, 0.87058824300765991), (0.5,
0.99607843160629272, 0.99607843160629272), (0.625, 1.0, 1.0),
(0.75, 0.89803922176361084, 0.89803922176361084), (0.875,
0.99215686321258545, 0.99215686321258545), (1.0,
0.94901961088180542, 0.94901961088180542)]}
_Pastel2_data = {'blue': [(0.0, 0.80392158031463623,
0.80392158031463623), (0.14285714285714285, 0.67450982332229614,
0.67450982332229614), (0.2857142857142857, 0.90980392694473267,
0.90980392694473267), (0.42857142857142855, 0.89411765336990356,
0.89411765336990356), (0.5714285714285714, 0.78823530673980713,
0.78823530673980713), (0.7142857142857143, 0.68235296010971069,
0.68235296010971069), (0.8571428571428571, 0.80000001192092896,
0.80000001192092896), (1.0, 0.80000001192092896,
0.80000001192092896)],
'green': [(0.0, 0.88627451658248901, 0.88627451658248901),
(0.14285714285714285, 0.80392158031463623, 0.80392158031463623),
(0.2857142857142857, 0.83529412746429443, 0.83529412746429443),
(0.42857142857142855, 0.7921568751335144, 0.7921568751335144),
(0.5714285714285714, 0.96078431606292725, 0.96078431606292725),
(0.7142857142857143, 0.94901961088180542, 0.94901961088180542),
(0.8571428571428571, 0.88627451658248901, 0.88627451658248901),
(1.0, 0.80000001192092896, 0.80000001192092896)],
'red': [(0.0, 0.70196080207824707, 0.70196080207824707),
(0.14285714285714285, 0.99215686321258545, 0.99215686321258545),
(0.2857142857142857, 0.79607844352722168, 0.79607844352722168),
(0.42857142857142855, 0.95686274766921997, 0.95686274766921997),
(0.5714285714285714, 0.90196079015731812, 0.90196079015731812),
(0.7142857142857143, 1.0, 1.0), (0.8571428571428571,
0.94509804248809814, 0.94509804248809814), (1.0,
0.80000001192092896, 0.80000001192092896)]}
_PiYG_data = {'blue': [(0.0, 0.32156863808631897,
0.32156863808631897), (0.10000000000000001, 0.49019607901573181,
0.49019607901573181), (0.20000000000000001, 0.68235296010971069,
0.68235296010971069), (0.29999999999999999, 0.85490196943283081,
0.85490196943283081), (0.40000000000000002, 0.93725490570068359,
0.93725490570068359), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.81568628549575806, 0.81568628549575806),
(0.69999999999999996, 0.52549022436141968, 0.52549022436141968),
(0.80000000000000004, 0.25490197539329529, 0.25490197539329529),
(0.90000000000000002, 0.12941177189350128, 0.12941177189350128), (1.0,
0.098039217293262482, 0.098039217293262482)],
'green': [(0.0, 0.0039215688593685627, 0.0039215688593685627),
(0.10000000000000001, 0.10588235408067703, 0.10588235408067703),
(0.20000000000000001, 0.46666666865348816, 0.46666666865348816),
(0.29999999999999999, 0.7137255072593689, 0.7137255072593689),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.96078431606292725, 0.96078431606292725),
(0.69999999999999996, 0.88235294818878174, 0.88235294818878174),
(0.80000000000000004, 0.73725491762161255, 0.73725491762161255),
(0.90000000000000002, 0.57254904508590698, 0.57254904508590698),
(1.0, 0.39215686917304993, 0.39215686917304993)],
'red': [(0.0, 0.55686277151107788, 0.55686277151107788),
(0.10000000000000001, 0.77254903316497803, 0.77254903316497803),
(0.20000000000000001, 0.87058824300765991, 0.87058824300765991),
(0.29999999999999999, 0.94509804248809814, 0.94509804248809814),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.90196079015731812, 0.90196079015731812),
(0.69999999999999996, 0.72156864404678345, 0.72156864404678345),
(0.80000000000000004, 0.49803921580314636, 0.49803921580314636),
(0.90000000000000002, 0.30196079611778259, 0.30196079611778259),
(1.0, 0.15294118225574493, 0.15294118225574493)]}
_PRGn_data = {'blue': [(0.0, 0.29411765933036804,
0.29411765933036804), (0.10000000000000001, 0.51372551918029785,
0.51372551918029785), (0.20000000000000001, 0.67058825492858887,
0.67058825492858887), (0.29999999999999999, 0.81176471710205078,
0.81176471710205078), (0.40000000000000002, 0.90980392694473267,
0.90980392694473267), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.82745099067687988, 0.82745099067687988),
(0.69999999999999996, 0.62745100259780884, 0.62745100259780884),
(0.80000000000000004, 0.3803921639919281, 0.3803921639919281),
(0.90000000000000002, 0.21568627655506134, 0.21568627655506134), (1.0,
0.10588235408067703, 0.10588235408067703)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.16470588743686676, 0.16470588743686676), (0.20000000000000001,
0.43921568989753723, 0.43921568989753723), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.83137255907058716, 0.83137255907058716), (0.5,
0.9686274528503418, 0.9686274528503418), (0.59999999999999998,
0.94117647409439087, 0.94117647409439087), (0.69999999999999996,
0.85882353782653809, 0.85882353782653809), (0.80000000000000004,
0.68235296010971069, 0.68235296010971069), (0.90000000000000002,
0.47058823704719543, 0.47058823704719543), (1.0,
0.26666668057441711, 0.26666668057441711)],
'red': [(0.0, 0.25098040699958801, 0.25098040699958801),
(0.10000000000000001, 0.46274510025978088, 0.46274510025978088),
(0.20000000000000001, 0.60000002384185791, 0.60000002384185791),
(0.29999999999999999, 0.7607843279838562, 0.7607843279838562),
(0.40000000000000002, 0.90588235855102539, 0.90588235855102539),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.85098040103912354, 0.85098040103912354),
(0.69999999999999996, 0.65098041296005249, 0.65098041296005249),
(0.80000000000000004, 0.35294118523597717, 0.35294118523597717),
(0.90000000000000002, 0.10588235408067703, 0.10588235408067703),
(1.0, 0.0, 0.0)]}
_PuBu_data = {'blue': [(0.0, 0.9843137264251709, 0.9843137264251709),
(0.125, 0.94901961088180542, 0.94901961088180542), (0.25,
0.90196079015731812, 0.90196079015731812), (0.375,
0.85882353782653809, 0.85882353782653809), (0.5, 0.81176471710205078,
0.81176471710205078), (0.625, 0.75294119119644165,
0.75294119119644165), (0.75, 0.69019609689712524,
0.69019609689712524), (0.875, 0.55294120311737061,
0.55294120311737061), (1.0, 0.34509804844856262,
0.34509804844856262)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90588235855102539, 0.90588235855102539), (0.25,
0.81960785388946533, 0.81960785388946533), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.66274511814117432, 0.66274511814117432), (0.625,
0.56470590829849243, 0.56470590829849243), (0.75,
0.43921568989753723, 0.43921568989753723), (0.875,
0.35294118523597717, 0.35294118523597717), (1.0,
0.21960784494876862, 0.21960784494876862)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92549020051956177,
0.92549020051956177), (0.25, 0.81568628549575806,
0.81568628549575806), (0.375, 0.65098041296005249,
0.65098041296005249), (0.5, 0.45490196347236633,
0.45490196347236633), (0.625, 0.21176470816135406,
0.21176470816135406), (0.75, 0.019607843831181526,
0.019607843831181526), (0.875, 0.015686275437474251,
0.015686275437474251), (1.0, 0.0078431377187371254,
0.0078431377187371254)]}
_PuBuGn_data = {'blue': [(0.0, 0.9843137264251709,
0.9843137264251709), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.85882353782653809,
0.85882353782653809), (0.5, 0.81176471710205078, 0.81176471710205078),
(0.625, 0.75294119119644165, 0.75294119119644165), (0.75,
0.54117649793624878, 0.54117649793624878), (0.875, 0.3490196168422699,
0.3490196168422699), (1.0, 0.21176470816135406, 0.21176470816135406)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.88627451658248901, 0.88627451658248901), (0.25,
0.81960785388946533, 0.81960785388946533), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.66274511814117432, 0.66274511814117432), (0.625,
0.56470590829849243, 0.56470590829849243), (0.75,
0.5058823823928833, 0.5058823823928833), (0.875,
0.42352941632270813, 0.42352941632270813), (1.0,
0.27450981736183167, 0.27450981736183167)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92549020051956177,
0.92549020051956177), (0.25, 0.81568628549575806,
0.81568628549575806), (0.375, 0.65098041296005249,
0.65098041296005249), (0.5, 0.40392157435417175,
0.40392157435417175), (0.625, 0.21176470816135406,
0.21176470816135406), (0.75, 0.0078431377187371254,
0.0078431377187371254), (0.875, 0.0039215688593685627,
0.0039215688593685627), (1.0, 0.0039215688593685627,
0.0039215688593685627)]}
_PuOr_data = {'blue': [(0.0, 0.031372550874948502,
0.031372550874948502), (0.10000000000000001, 0.023529412224888802,
0.023529412224888802), (0.20000000000000001, 0.078431375324726105,
0.078431375324726105), (0.29999999999999999, 0.38823530077934265,
0.38823530077934265), (0.40000000000000002, 0.7137255072593689,
0.7137255072593689), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.92156863212585449, 0.92156863212585449),
(0.69999999999999996, 0.82352942228317261, 0.82352942228317261),
(0.80000000000000004, 0.67450982332229614, 0.67450982332229614),
(0.90000000000000002, 0.53333336114883423, 0.53333336114883423), (1.0,
0.29411765933036804, 0.29411765933036804)],
'green': [(0.0, 0.23137255012989044, 0.23137255012989044),
(0.10000000000000001, 0.34509804844856262, 0.34509804844856262),
(0.20000000000000001, 0.50980395078659058, 0.50980395078659058),
(0.29999999999999999, 0.72156864404678345, 0.72156864404678345),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.85490196943283081, 0.85490196943283081),
(0.69999999999999996, 0.67058825492858887, 0.67058825492858887),
(0.80000000000000004, 0.45098039507865906, 0.45098039507865906),
(0.90000000000000002, 0.15294118225574493, 0.15294118225574493),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.49803921580314636, 0.49803921580314636),
(0.10000000000000001, 0.70196080207824707, 0.70196080207824707),
(0.20000000000000001, 0.87843137979507446, 0.87843137979507446),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.84705883264541626, 0.84705883264541626),
(0.69999999999999996, 0.69803923368453979, 0.69803923368453979),
(0.80000000000000004, 0.50196081399917603, 0.50196081399917603),
(0.90000000000000002, 0.32941177487373352, 0.32941177487373352),
(1.0, 0.17647059261798859, 0.17647059261798859)]}
_PuRd_data = {'blue': [(0.0, 0.97647058963775635,
0.97647058963775635), (0.125, 0.93725490570068359,
0.93725490570068359), (0.25, 0.85490196943283081,
0.85490196943283081), (0.375, 0.78039216995239258,
0.78039216995239258), (0.5, 0.69019609689712524, 0.69019609689712524),
(0.625, 0.54117649793624878, 0.54117649793624878), (0.75,
0.33725491166114807, 0.33725491166114807), (0.875,
0.26274511218070984, 0.26274511218070984), (1.0, 0.12156862765550613,
0.12156862765550613)],
'green': [(0.0, 0.95686274766921997, 0.95686274766921997), (0.125,
0.88235294818878174, 0.88235294818878174), (0.25,
0.72549021244049072, 0.72549021244049072), (0.375,
0.58039218187332153, 0.58039218187332153), (0.5,
0.3960784375667572, 0.3960784375667572), (0.625,
0.16078431904315948, 0.16078431904315948), (0.75,
0.070588238537311554, 0.070588238537311554), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90588235855102539, 0.90588235855102539), (0.25,
0.83137255907058716, 0.83137255907058716), (0.375,
0.78823530673980713, 0.78823530673980713), (0.5,
0.87450981140136719, 0.87450981140136719), (0.625,
0.90588235855102539, 0.90588235855102539), (0.75,
0.80784314870834351, 0.80784314870834351), (0.875,
0.59607845544815063, 0.59607845544815063), (1.0,
0.40392157435417175, 0.40392157435417175)]}
_Purples_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.96078431606292725,
0.96078431606292725), (0.25, 0.92156863212585449,
0.92156863212585449), (0.375, 0.86274510622024536,
0.86274510622024536), (0.5, 0.78431373834609985, 0.78431373834609985),
(0.625, 0.729411780834198, 0.729411780834198), (0.75,
0.63921570777893066, 0.63921570777893066), (0.875,
0.56078433990478516, 0.56078433990478516), (1.0, 0.49019607901573181,
0.49019607901573181)],
'green': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.92941176891326904, 0.92941176891326904), (0.25,
0.85490196943283081, 0.85490196943283081), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.60392159223556519, 0.60392159223556519), (0.625,
0.49019607901573181, 0.49019607901573181), (0.75,
0.31764706969261169, 0.31764706969261169), (0.875,
0.15294118225574493, 0.15294118225574493), (1.0, 0.0, 0.0)],
'red': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.93725490570068359, 0.93725490570068359), (0.25,
0.85490196943283081, 0.85490196943283081), (0.375,
0.73725491762161255, 0.73725491762161255), (0.5,
0.61960786581039429, 0.61960786581039429), (0.625,
0.50196081399917603, 0.50196081399917603), (0.75,
0.41568627953529358, 0.41568627953529358), (0.875,
0.32941177487373352, 0.32941177487373352), (1.0,
0.24705882370471954, 0.24705882370471954)]}
_RdBu_data = {'blue': [(0.0, 0.12156862765550613,
0.12156862765550613), (0.10000000000000001, 0.16862745583057404,
0.16862745583057404), (0.20000000000000001, 0.30196079611778259,
0.30196079611778259), (0.29999999999999999, 0.50980395078659058,
0.50980395078659058), (0.40000000000000002, 0.78039216995239258,
0.78039216995239258), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.94117647409439087, 0.94117647409439087),
(0.69999999999999996, 0.87058824300765991, 0.87058824300765991),
(0.80000000000000004, 0.76470589637756348, 0.76470589637756348),
(0.90000000000000002, 0.67450982332229614, 0.67450982332229614), (1.0,
0.3803921639919281, 0.3803921639919281)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.094117648899555206, 0.094117648899555206), (0.20000000000000001,
0.37647059559822083, 0.37647059559822083), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.85882353782653809, 0.85882353782653809), (0.5,
0.9686274528503418, 0.9686274528503418), (0.59999999999999998,
0.89803922176361084, 0.89803922176361084), (0.69999999999999996,
0.77254903316497803, 0.77254903316497803), (0.80000000000000004,
0.57647061347961426, 0.57647061347961426), (0.90000000000000002,
0.40000000596046448, 0.40000000596046448), (1.0,
0.18823529779911041, 0.18823529779911041)],
'red': [(0.0, 0.40392157435417175, 0.40392157435417175),
(0.10000000000000001, 0.69803923368453979, 0.69803923368453979),
(0.20000000000000001, 0.83921569585800171, 0.83921569585800171),
(0.29999999999999999, 0.95686274766921997, 0.95686274766921997),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.81960785388946533, 0.81960785388946533),
(0.69999999999999996, 0.57254904508590698, 0.57254904508590698),
(0.80000000000000004, 0.26274511218070984, 0.26274511218070984),
(0.90000000000000002, 0.12941177189350128, 0.12941177189350128),
(1.0, 0.019607843831181526, 0.019607843831181526)]}
_RdGy_data = {'blue': [(0.0, 0.12156862765550613,
0.12156862765550613), (0.10000000000000001, 0.16862745583057404,
0.16862745583057404), (0.20000000000000001, 0.30196079611778259,
0.30196079611778259), (0.29999999999999999, 0.50980395078659058,
0.50980395078659058), (0.40000000000000002, 0.78039216995239258,
0.78039216995239258), (0.5, 1.0, 1.0), (0.59999999999999998,
0.87843137979507446, 0.87843137979507446), (0.69999999999999996,
0.729411780834198, 0.729411780834198), (0.80000000000000004,
0.52941179275512695, 0.52941179275512695), (0.90000000000000002,
0.30196079611778259, 0.30196079611778259), (1.0, 0.10196078568696976,
0.10196078568696976)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.094117648899555206, 0.094117648899555206), (0.20000000000000001,
0.37647059559822083, 0.37647059559822083), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.85882353782653809, 0.85882353782653809), (0.5, 1.0, 1.0),
(0.59999999999999998, 0.87843137979507446, 0.87843137979507446),
(0.69999999999999996, 0.729411780834198, 0.729411780834198),
(0.80000000000000004, 0.52941179275512695, 0.52941179275512695),
(0.90000000000000002, 0.30196079611778259, 0.30196079611778259),
(1.0, 0.10196078568696976, 0.10196078568696976)],
'red': [(0.0, 0.40392157435417175, 0.40392157435417175),
(0.10000000000000001, 0.69803923368453979, 0.69803923368453979),
(0.20000000000000001, 0.83921569585800171, 0.83921569585800171),
(0.29999999999999999, 0.95686274766921997, 0.95686274766921997),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.87843137979507446,
0.87843137979507446), (0.69999999999999996, 0.729411780834198,
0.729411780834198), (0.80000000000000004, 0.52941179275512695,
0.52941179275512695), (0.90000000000000002, 0.30196079611778259,
0.30196079611778259), (1.0, 0.10196078568696976,
0.10196078568696976)]}
_RdPu_data = {'blue': [(0.0, 0.9529411792755127, 0.9529411792755127),
(0.125, 0.86666667461395264, 0.86666667461395264), (0.25,
0.75294119119644165, 0.75294119119644165), (0.375,
0.70980393886566162, 0.70980393886566162), (0.5, 0.63137257099151611,
0.63137257099151611), (0.625, 0.59215688705444336,
0.59215688705444336), (0.75, 0.49411764740943909,
0.49411764740943909), (0.875, 0.46666666865348816,
0.46666666865348816), (1.0, 0.41568627953529358,
0.41568627953529358)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.77254903316497803, 0.77254903316497803), (0.375,
0.62352943420410156, 0.62352943420410156), (0.5,
0.40784314274787903, 0.40784314274787903), (0.625,
0.20392157137393951, 0.20392157137393951), (0.75,
0.0039215688593685627, 0.0039215688593685627), (0.875,
0.0039215688593685627, 0.0039215688593685627), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99215686321258545,
0.99215686321258545), (0.25, 0.98823529481887817,
0.98823529481887817), (0.375, 0.98039215803146362,
0.98039215803146362), (0.5, 0.9686274528503418,
0.9686274528503418), (0.625, 0.86666667461395264,
0.86666667461395264), (0.75, 0.68235296010971069,
0.68235296010971069), (0.875, 0.47843137383460999,
0.47843137383460999), (1.0, 0.28627452254295349,
0.28627452254295349)]}
_RdYlBu_data = {'blue': [(0.0, 0.14901961386203766,
0.14901961386203766), (0.10000000149011612,
0.15294118225574493, 0.15294118225574493),
(0.20000000298023224, 0.26274511218070984,
0.26274511218070984), (0.30000001192092896,
0.3803921639919281, 0.3803921639919281),
(0.40000000596046448, 0.56470590829849243,
0.56470590829849243), (0.5, 0.74901962280273438,
0.74901962280273438), (0.60000002384185791,
0.97254902124404907, 0.97254902124404907),
(0.69999998807907104, 0.91372549533843994,
0.91372549533843994), (0.80000001192092896,
0.81960785388946533, 0.81960785388946533),
(0.89999997615814209, 0.70588237047195435,
0.70588237047195435), (1.0, 0.58431375026702881,
0.58431375026702881)], 'green': [(0.0, 0.0, 0.0),
(0.10000000149011612, 0.18823529779911041,
0.18823529779911041), (0.20000000298023224,
0.42745098471641541, 0.42745098471641541),
(0.30000001192092896, 0.68235296010971069,
0.68235296010971069), (0.40000000596046448,
0.87843137979507446, 0.87843137979507446), (0.5, 1.0,
1.0), (0.60000002384185791, 0.9529411792755127,
0.9529411792755127), (0.69999998807907104,
0.85098040103912354, 0.85098040103912354),
(0.80000001192092896, 0.67843139171600342,
0.67843139171600342), (0.89999997615814209,
0.45882353186607361, 0.45882353186607361), (1.0,
0.21176470816135406, 0.21176470816135406)], 'red':
[(0.0, 0.64705884456634521, 0.64705884456634521),
(0.10000000149011612, 0.84313726425170898,
0.84313726425170898), (0.20000000298023224,
0.95686274766921997, 0.95686274766921997),
(0.30000001192092896, 0.99215686321258545,
0.99215686321258545), (0.40000000596046448,
0.99607843160629272, 0.99607843160629272), (0.5, 1.0,
1.0), (0.60000002384185791, 0.87843137979507446,
0.87843137979507446), (0.69999998807907104,
0.67058825492858887, 0.67058825492858887),
(0.80000001192092896, 0.45490196347236633,
0.45490196347236633), (0.89999997615814209,
0.27058824896812439, 0.27058824896812439), (1.0,
0.19215686619281769, 0.19215686619281769)]}
_RdYlGn_data = {'blue': [(0.0, 0.14901961386203766,
0.14901961386203766), (0.10000000000000001, 0.15294118225574493,
0.15294118225574493), (0.20000000000000001, 0.26274511218070984,
0.26274511218070984), (0.29999999999999999, 0.3803921639919281,
0.3803921639919281), (0.40000000000000002, 0.54509806632995605,
0.54509806632995605), (0.5, 0.74901962280273438, 0.74901962280273438),
(0.59999999999999998, 0.54509806632995605, 0.54509806632995605),
(0.69999999999999996, 0.41568627953529358, 0.41568627953529358),
(0.80000000000000004, 0.38823530077934265, 0.38823530077934265),
(0.90000000000000002, 0.31372550129890442, 0.31372550129890442), (1.0,
0.21568627655506134, 0.21568627655506134)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.18823529779911041, 0.18823529779911041), (0.20000000000000001,
0.42745098471641541, 0.42745098471641541), (0.29999999999999999,
0.68235296010971069, 0.68235296010971069), (0.40000000000000002,
0.87843137979507446, 0.87843137979507446), (0.5, 1.0, 1.0),
(0.59999999999999998, 0.93725490570068359, 0.93725490570068359),
(0.69999999999999996, 0.85098040103912354, 0.85098040103912354),
(0.80000000000000004, 0.74117648601531982, 0.74117648601531982),
(0.90000000000000002, 0.59607845544815063, 0.59607845544815063),
(1.0, 0.40784314274787903, 0.40784314274787903)],
'red': [(0.0, 0.64705884456634521, 0.64705884456634521),
(0.10000000000000001, 0.84313726425170898, 0.84313726425170898),
(0.20000000000000001, 0.95686274766921997, 0.95686274766921997),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.85098040103912354,
0.85098040103912354), (0.69999999999999996, 0.65098041296005249,
0.65098041296005249), (0.80000000000000004, 0.40000000596046448,
0.40000000596046448), (0.90000000000000002, 0.10196078568696976,
0.10196078568696976), (1.0, 0.0, 0.0)]}
_Reds_data = {'blue': [(0.0, 0.94117647409439087,
0.94117647409439087), (0.125, 0.82352942228317261,
0.82352942228317261), (0.25, 0.63137257099151611,
0.63137257099151611), (0.375, 0.44705882668495178,
0.44705882668495178), (0.5, 0.29019609093666077, 0.29019609093666077),
(0.625, 0.17254902422428131, 0.17254902422428131), (0.75,
0.11372549086809158, 0.11372549086809158), (0.875,
0.08235294371843338, 0.08235294371843338), (1.0, 0.050980392843484879,
0.050980392843484879)],
'green': [(0.0, 0.96078431606292725, 0.96078431606292725), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.73333334922790527, 0.73333334922790527), (0.375,
0.57254904508590698, 0.57254904508590698), (0.5,
0.41568627953529358, 0.41568627953529358), (0.625,
0.23137255012989044, 0.23137255012989044), (0.75,
0.094117648899555206, 0.094117648899555206), (0.875,
0.058823529630899429, 0.058823529630899429), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.98823529481887817,
0.98823529481887817), (0.375, 0.98823529481887817,
0.98823529481887817), (0.5, 0.9843137264251709,
0.9843137264251709), (0.625, 0.93725490570068359,
0.93725490570068359), (0.75, 0.79607844352722168,
0.79607844352722168), (0.875, 0.64705884456634521,
0.64705884456634521), (1.0, 0.40392157435417175,
0.40392157435417175)]}
_Set1_data = {'blue': [(0.0, 0.10980392247438431,
0.10980392247438431), (0.125, 0.72156864404678345,
0.72156864404678345), (0.25, 0.29019609093666077,
0.29019609093666077), (0.375, 0.63921570777893066,
0.63921570777893066), (0.5, 0.0, 0.0), (0.625, 0.20000000298023224,
0.20000000298023224), (0.75, 0.15686275064945221,
0.15686275064945221), (0.875, 0.74901962280273438,
0.74901962280273438), (1.0, 0.60000002384185791,
0.60000002384185791)],
'green': [(0.0, 0.10196078568696976, 0.10196078568696976), (0.125,
0.49411764740943909, 0.49411764740943909), (0.25,
0.68627452850341797, 0.68627452850341797), (0.375,
0.30588236451148987, 0.30588236451148987), (0.5,
0.49803921580314636, 0.49803921580314636), (0.625, 1.0, 1.0),
(0.75, 0.33725491166114807, 0.33725491166114807), (0.875,
0.5058823823928833, 0.5058823823928833), (1.0,
0.60000002384185791, 0.60000002384185791)],
'red': [(0.0, 0.89411765336990356, 0.89411765336990356), (0.125,
0.21568627655506134, 0.21568627655506134), (0.25,
0.30196079611778259, 0.30196079611778259), (0.375,
0.59607845544815063, 0.59607845544815063), (0.5, 1.0, 1.0),
(0.625, 1.0, 1.0), (0.75, 0.65098041296005249,
0.65098041296005249), (0.875, 0.9686274528503418,
0.9686274528503418), (1.0, 0.60000002384185791,
0.60000002384185791)]}
_Set2_data = {'blue': [(0.0, 0.64705884456634521,
0.64705884456634521), (0.14285714285714285, 0.38431373238563538,
0.38431373238563538), (0.2857142857142857, 0.79607844352722168,
0.79607844352722168), (0.42857142857142855, 0.76470589637756348,
0.76470589637756348), (0.5714285714285714, 0.32941177487373352,
0.32941177487373352), (0.7142857142857143, 0.18431372940540314,
0.18431372940540314), (0.8571428571428571, 0.58039218187332153,
0.58039218187332153), (1.0, 0.70196080207824707,
0.70196080207824707)],
'green': [(0.0, 0.7607843279838562, 0.7607843279838562),
(0.14285714285714285, 0.55294120311737061, 0.55294120311737061),
(0.2857142857142857, 0.62745100259780884, 0.62745100259780884),
(0.42857142857142855, 0.54117649793624878, 0.54117649793624878),
(0.5714285714285714, 0.84705883264541626, 0.84705883264541626),
(0.7142857142857143, 0.85098040103912354, 0.85098040103912354),
(0.8571428571428571, 0.76862746477127075, 0.76862746477127075),
(1.0, 0.70196080207824707, 0.70196080207824707)],
'red': [(0.0, 0.40000000596046448, 0.40000000596046448),
(0.14285714285714285, 0.98823529481887817, 0.98823529481887817),
(0.2857142857142857, 0.55294120311737061, 0.55294120311737061),
(0.42857142857142855, 0.90588235855102539, 0.90588235855102539),
(0.5714285714285714, 0.65098041296005249, 0.65098041296005249),
(0.7142857142857143, 1.0, 1.0), (0.8571428571428571,
0.89803922176361084, 0.89803922176361084), (1.0,
0.70196080207824707, 0.70196080207824707)]}
_Set3_data = {'blue': [(0.0, 0.78039216995239258,
0.78039216995239258), (0.090909090909090912, 0.70196080207824707,
0.70196080207824707), (0.18181818181818182, 0.85490196943283081,
0.85490196943283081), (0.27272727272727271, 0.44705882668495178,
0.44705882668495178), (0.36363636363636365, 0.82745099067687988,
0.82745099067687988), (0.45454545454545453, 0.38431373238563538,
0.38431373238563538), (0.54545454545454541, 0.4117647111415863,
0.4117647111415863), (0.63636363636363635, 0.89803922176361084,
0.89803922176361084), (0.72727272727272729, 0.85098040103912354,
0.85098040103912354), (0.81818181818181823, 0.74117648601531982,
0.74117648601531982), (0.90909090909090906, 0.77254903316497803,
0.77254903316497803), (1.0, 0.43529412150382996,
0.43529412150382996)],
'green': [(0.0, 0.82745099067687988, 0.82745099067687988),
(0.090909090909090912, 1.0, 1.0), (0.18181818181818182,
0.729411780834198, 0.729411780834198), (0.27272727272727271,
0.50196081399917603, 0.50196081399917603), (0.36363636363636365,
0.69411766529083252, 0.69411766529083252), (0.45454545454545453,
0.70588237047195435, 0.70588237047195435), (0.54545454545454541,
0.87058824300765991, 0.87058824300765991), (0.63636363636363635,
0.80392158031463623, 0.80392158031463623), (0.72727272727272729,
0.85098040103912354, 0.85098040103912354), (0.81818181818181823,
0.50196081399917603, 0.50196081399917603), (0.90909090909090906,
0.92156863212585449, 0.92156863212585449), (1.0,
0.92941176891326904, 0.92941176891326904)],
'red': [(0.0, 0.55294120311737061, 0.55294120311737061),
(0.090909090909090912, 1.0, 1.0), (0.18181818181818182,
0.7450980544090271, 0.7450980544090271), (0.27272727272727271,
0.9843137264251709, 0.9843137264251709), (0.36363636363636365,
0.50196081399917603, 0.50196081399917603), (0.45454545454545453,
0.99215686321258545, 0.99215686321258545), (0.54545454545454541,
0.70196080207824707, 0.70196080207824707), (0.63636363636363635,
0.98823529481887817, 0.98823529481887817), (0.72727272727272729,
0.85098040103912354, 0.85098040103912354), (0.81818181818181823,
0.73725491762161255, 0.73725491762161255), (0.90909090909090906,
0.80000001192092896, 0.80000001192092896), (1.0, 1.0, 1.0)]}
_Spectral_data = {'blue': [(0.0, 0.25882354378700256,
0.25882354378700256), (0.10000000000000001, 0.30980393290519714,
0.30980393290519714), (0.20000000000000001, 0.26274511218070984,
0.26274511218070984), (0.29999999999999999, 0.3803921639919281,
0.3803921639919281), (0.40000000000000002, 0.54509806632995605,
0.54509806632995605), (0.5, 0.74901962280273438, 0.74901962280273438),
(0.59999999999999998, 0.59607845544815063, 0.59607845544815063),
(0.69999999999999996, 0.64313727617263794, 0.64313727617263794),
(0.80000000000000004, 0.64705884456634521, 0.64705884456634521),
(0.90000000000000002, 0.74117648601531982, 0.74117648601531982), (1.0,
0.63529413938522339, 0.63529413938522339)],
'green': [(0.0, 0.0039215688593685627, 0.0039215688593685627),
(0.10000000000000001, 0.24313725531101227, 0.24313725531101227),
(0.20000000000000001, 0.42745098471641541, 0.42745098471641541),
(0.29999999999999999, 0.68235296010971069, 0.68235296010971069),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.96078431606292725,
0.96078431606292725), (0.69999999999999996, 0.86666667461395264,
0.86666667461395264), (0.80000000000000004, 0.7607843279838562,
0.7607843279838562), (0.90000000000000002, 0.53333336114883423,
0.53333336114883423), (1.0, 0.30980393290519714,
0.30980393290519714)],
'red': [(0.0, 0.61960786581039429, 0.61960786581039429),
(0.10000000000000001, 0.83529412746429443, 0.83529412746429443),
(0.20000000000000001, 0.95686274766921997, 0.95686274766921997),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.90196079015731812,
0.90196079015731812), (0.69999999999999996, 0.67058825492858887,
0.67058825492858887), (0.80000000000000004, 0.40000000596046448,
0.40000000596046448), (0.90000000000000002, 0.19607843458652496,
0.19607843458652496), (1.0, 0.36862745881080627,
0.36862745881080627)]}
_YlGn_data = {'blue': [(0.0, 0.89803922176361084,
0.89803922176361084), (0.125, 0.72549021244049072,
0.72549021244049072), (0.25, 0.63921570777893066,
0.63921570777893066), (0.375, 0.55686277151107788,
0.55686277151107788), (0.5, 0.47450980544090271, 0.47450980544090271),
(0.625, 0.364705890417099, 0.364705890417099), (0.75,
0.26274511218070984, 0.26274511218070984), (0.875,
0.21568627655506134, 0.21568627655506134), (1.0, 0.16078431904315948,
0.16078431904315948)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.98823529481887817,
0.98823529481887817), (0.25, 0.94117647409439087,
0.94117647409439087), (0.375, 0.86666667461395264,
0.86666667461395264), (0.5, 0.7764706015586853,
0.7764706015586853), (0.625, 0.67058825492858887,
0.67058825492858887), (0.75, 0.51764708757400513,
0.51764708757400513), (0.875, 0.40784314274787903,
0.40784314274787903), (1.0, 0.27058824896812439,
0.27058824896812439)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.67843139171600342,
0.67843139171600342), (0.5, 0.47058823704719543,
0.47058823704719543), (0.625, 0.25490197539329529,
0.25490197539329529), (0.75, 0.13725490868091583,
0.13725490868091583), (0.875, 0.0, 0.0), (1.0, 0.0, 0.0)]}
_YlGnBu_data = {'blue': [(0.0, 0.85098040103912354,
0.85098040103912354), (0.125, 0.69411766529083252,
0.69411766529083252), (0.25, 0.70588237047195435,
0.70588237047195435), (0.375, 0.73333334922790527,
0.73333334922790527), (0.5, 0.76862746477127075, 0.76862746477127075),
(0.625, 0.75294119119644165, 0.75294119119644165), (0.75,
0.65882354974746704, 0.65882354974746704), (0.875,
0.58039218187332153, 0.58039218187332153), (1.0, 0.34509804844856262,
0.34509804844856262)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.97254902124404907,
0.97254902124404907), (0.25, 0.91372549533843994,
0.91372549533843994), (0.375, 0.80392158031463623,
0.80392158031463623), (0.5, 0.7137255072593689,
0.7137255072593689), (0.625, 0.56862747669219971,
0.56862747669219971), (0.75, 0.36862745881080627,
0.36862745881080627), (0.875, 0.20392157137393951,
0.20392157137393951), (1.0, 0.11372549086809158,
0.11372549086809158)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92941176891326904,
0.92941176891326904), (0.25, 0.78039216995239258,
0.78039216995239258), (0.375, 0.49803921580314636,
0.49803921580314636), (0.5, 0.25490197539329529,
0.25490197539329529), (0.625, 0.11372549086809158,
0.11372549086809158), (0.75, 0.13333334028720856,
0.13333334028720856), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.031372550874948502,
0.031372550874948502)]}
_YlOrBr_data = {'blue': [(0.0, 0.89803922176361084,
0.89803922176361084), (0.125, 0.73725491762161255,
0.73725491762161255), (0.25, 0.56862747669219971,
0.56862747669219971), (0.375, 0.30980393290519714,
0.30980393290519714), (0.5, 0.16078431904315948, 0.16078431904315948),
(0.625, 0.078431375324726105, 0.078431375324726105), (0.75,
0.0078431377187371254, 0.0078431377187371254), (0.875,
0.015686275437474251, 0.015686275437474251), (1.0,
0.023529412224888802, 0.023529412224888802)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.89019608497619629,
0.89019608497619629), (0.375, 0.76862746477127075,
0.76862746477127075), (0.5, 0.60000002384185791,
0.60000002384185791), (0.625, 0.43921568989753723,
0.43921568989753723), (0.75, 0.29803922772407532,
0.29803922772407532), (0.875, 0.20392157137393951,
0.20392157137393951), (1.0, 0.14509804546833038,
0.14509804546833038)],
'red': [(0.0, 1.0, 1.0), (0.125, 1.0, 1.0), (0.25,
0.99607843160629272, 0.99607843160629272), (0.375,
0.99607843160629272, 0.99607843160629272), (0.5,
0.99607843160629272, 0.99607843160629272), (0.625,
0.92549020051956177, 0.92549020051956177), (0.75,
0.80000001192092896, 0.80000001192092896), (0.875,
0.60000002384185791, 0.60000002384185791), (1.0,
0.40000000596046448, 0.40000000596046448)]}
_YlOrRd_data = {'blue': [(0.0, 0.80000001192092896,
0.80000001192092896), (0.125, 0.62745100259780884,
0.62745100259780884), (0.25, 0.46274510025978088,
0.46274510025978088), (0.375, 0.29803922772407532,
0.29803922772407532), (0.5, 0.23529411852359772, 0.23529411852359772),
(0.625, 0.16470588743686676, 0.16470588743686676), (0.75,
0.10980392247438431, 0.10980392247438431), (0.875,
0.14901961386203766, 0.14901961386203766), (1.0, 0.14901961386203766,
0.14901961386203766)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.92941176891326904,
0.92941176891326904), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.69803923368453979,
0.69803923368453979), (0.5, 0.55294120311737061,
0.55294120311737061), (0.625, 0.30588236451148987,
0.30588236451148987), (0.75, 0.10196078568696976,
0.10196078568696976), (0.875, 0.0, 0.0), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 1.0, 1.0), (0.25,
0.99607843160629272, 0.99607843160629272), (0.375,
0.99607843160629272, 0.99607843160629272), (0.5,
0.99215686321258545, 0.99215686321258545), (0.625,
0.98823529481887817, 0.98823529481887817), (0.75,
0.89019608497619629, 0.89019608497619629), (0.875,
0.74117648601531982, 0.74117648601531982), (1.0,
0.50196081399917603, 0.50196081399917603)]}
# The next 7 palettes are from the Yorick scientific visalisation package,
# an evolution of the GIST package, both by David H. Munro.
# They are released under a BSD-like license (see LICENSE_YORICK in
# the license directory of the matplotlib source distribution).
#
# Most palette functions have been reduced to simple function descriptions
# by Reinier Heeres, since the rgb components were mostly straight lines.
# gist_earth_data and gist_ncar_data were simplified by a script and some
# manual effort.
_gist_earth_data = \
{'red': (
(0.0, 0.0, 0.0000),
(0.2824, 0.1882, 0.1882),
(0.4588, 0.2714, 0.2714),
(0.5490, 0.4719, 0.4719),
(0.6980, 0.7176, 0.7176),
(0.7882, 0.7553, 0.7553),
(1.0000, 0.9922, 0.9922),
), 'green': (
(0.0, 0.0, 0.0000),
(0.0275, 0.0000, 0.0000),
(0.1098, 0.1893, 0.1893),
(0.1647, 0.3035, 0.3035),
(0.2078, 0.3841, 0.3841),
(0.2824, 0.5020, 0.5020),
(0.5216, 0.6397, 0.6397),
(0.6980, 0.7171, 0.7171),
(0.7882, 0.6392, 0.6392),
(0.7922, 0.6413, 0.6413),
(0.8000, 0.6447, 0.6447),
(0.8078, 0.6481, 0.6481),
(0.8157, 0.6549, 0.6549),
(0.8667, 0.6991, 0.6991),
(0.8745, 0.7103, 0.7103),
(0.8824, 0.7216, 0.7216),
(0.8902, 0.7323, 0.7323),
(0.8980, 0.7430, 0.7430),
(0.9412, 0.8275, 0.8275),
(0.9569, 0.8635, 0.8635),
(0.9647, 0.8816, 0.8816),
(0.9961, 0.9733, 0.9733),
(1.0000, 0.9843, 0.9843),
), 'blue': (
(0.0, 0.0, 0.0000),
(0.0039, 0.1684, 0.1684),
(0.0078, 0.2212, 0.2212),
(0.0275, 0.4329, 0.4329),
(0.0314, 0.4549, 0.4549),
(0.2824, 0.5004, 0.5004),
(0.4667, 0.2748, 0.2748),
(0.5451, 0.3205, 0.3205),
(0.7843, 0.3961, 0.3961),
(0.8941, 0.6651, 0.6651),
(1.0000, 0.9843, 0.9843),
)}
_gist_gray_data = {
'red': gfunc[3],
'green': gfunc[3],
'blue': gfunc[3],
}
_gist_heat_data = {
'red': lambda x: 1.5 * x,
'green': lambda x: 2 * x - 1,
'blue': lambda x: 4 * x - 3,
}
_gist_ncar_data = \
{'red': (
(0.0, 0.0, 0.0000),
(0.3098, 0.0000, 0.0000),
(0.3725, 0.3993, 0.3993),
(0.4235, 0.5003, 0.5003),
(0.5333, 1.0000, 1.0000),
(0.7922, 1.0000, 1.0000),
(0.8471, 0.6218, 0.6218),
(0.8980, 0.9235, 0.9235),
(1.0000, 0.9961, 0.9961),
), 'green': (
(0.0, 0.0, 0.0000),
(0.0510, 0.3722, 0.3722),
(0.1059, 0.0000, 0.0000),
(0.1569, 0.7202, 0.7202),
(0.1608, 0.7537, 0.7537),
(0.1647, 0.7752, 0.7752),
(0.2157, 1.0000, 1.0000),
(0.2588, 0.9804, 0.9804),
(0.2706, 0.9804, 0.9804),
(0.3176, 1.0000, 1.0000),
(0.3686, 0.8081, 0.8081),
(0.4275, 1.0000, 1.0000),
(0.5216, 1.0000, 1.0000),
(0.6314, 0.7292, 0.7292),
(0.6863, 0.2796, 0.2796),
(0.7451, 0.0000, 0.0000),
(0.7922, 0.0000, 0.0000),
(0.8431, 0.1753, 0.1753),
(0.8980, 0.5000, 0.5000),
(1.0000, 0.9725, 0.9725),
), 'blue': (
(0.0, 0.5020, 0.5020),
(0.0510, 0.0222, 0.0222),
(0.1098, 1.0000, 1.0000),
(0.2039, 1.0000, 1.0000),
(0.2627, 0.6145, 0.6145),
(0.3216, 0.0000, 0.0000),
(0.4157, 0.0000, 0.0000),
(0.4745, 0.2342, 0.2342),
(0.5333, 0.0000, 0.0000),
(0.5804, 0.0000, 0.0000),
(0.6314, 0.0549, 0.0549),
(0.6902, 0.0000, 0.0000),
(0.7373, 0.0000, 0.0000),
(0.7922, 0.9738, 0.9738),
(0.8000, 1.0000, 1.0000),
(0.8431, 1.0000, 1.0000),
(0.8980, 0.9341, 0.9341),
(1.0000, 0.9961, 0.9961),
)}
_gist_rainbow_data = (
(0.000, (1.00, 0.00, 0.16)),
(0.030, (1.00, 0.00, 0.00)),
(0.215, (1.00, 1.00, 0.00)),
(0.400, (0.00, 1.00, 0.00)),
(0.586, (0.00, 1.00, 1.00)),
(0.770, (0.00, 0.00, 1.00)),
(0.954, (1.00, 0.00, 1.00)),
(1.000, (1.00, 0.00, 0.75))
)
_gist_stern_data = {
'red': (
(0.000, 0.000, 0.000), (0.0547, 1.000, 1.000),
(0.250, 0.027, 0.250), # (0.2500, 0.250, 0.250),
(1.000, 1.000, 1.000)),
'green': ((0, 0, 0), (1, 1, 1)),
'blue': (
(0.000, 0.000, 0.000), (0.500, 1.000, 1.000),
(0.735, 0.000, 0.000), (1.000, 1.000, 1.000))
}
_gist_yarg_data = {
'red': lambda x: 1 - x,
'green': lambda x: 1 - x,
'blue': lambda x: 1 - x,
}
# This bipolar color map was generated from CoolWarmFloat33.csv of
# "Diverging Color Maps for Scientific Visualization" by Kenneth Moreland.
# <http://www.cs.unm.edu/~kmorel/documents/ColorMaps/>
_coolwarm_data = {
'red': [
(0.0, 0.2298057, 0.2298057),
(0.03125, 0.26623388, 0.26623388),
(0.0625, 0.30386891, 0.30386891),
(0.09375, 0.342804478, 0.342804478),
(0.125, 0.38301334, 0.38301334),
(0.15625, 0.424369608, 0.424369608),
(0.1875, 0.46666708, 0.46666708),
(0.21875, 0.509635204, 0.509635204),
(0.25, 0.552953156, 0.552953156),
(0.28125, 0.596262162, 0.596262162),
(0.3125, 0.639176211, 0.639176211),
(0.34375, 0.681291281, 0.681291281),
(0.375, 0.722193294, 0.722193294),
(0.40625, 0.761464949, 0.761464949),
(0.4375, 0.798691636, 0.798691636),
(0.46875, 0.833466556, 0.833466556),
(0.5, 0.865395197, 0.865395197),
(0.53125, 0.897787179, 0.897787179),
(0.5625, 0.924127593, 0.924127593),
(0.59375, 0.944468518, 0.944468518),
(0.625, 0.958852946, 0.958852946),
(0.65625, 0.96732803, 0.96732803),
(0.6875, 0.969954137, 0.969954137),
(0.71875, 0.966811177, 0.966811177),
(0.75, 0.958003065, 0.958003065),
(0.78125, 0.943660866, 0.943660866),
(0.8125, 0.923944917, 0.923944917),
(0.84375, 0.89904617, 0.89904617),
(0.875, 0.869186849, 0.869186849),
(0.90625, 0.834620542, 0.834620542),
(0.9375, 0.795631745, 0.795631745),
(0.96875, 0.752534934, 0.752534934),
(1.0, 0.705673158, 0.705673158)],
'green': [
(0.0, 0.298717966, 0.298717966),
(0.03125, 0.353094838, 0.353094838),
(0.0625, 0.406535296, 0.406535296),
(0.09375, 0.458757618, 0.458757618),
(0.125, 0.50941904, 0.50941904),
(0.15625, 0.558148092, 0.558148092),
(0.1875, 0.604562568, 0.604562568),
(0.21875, 0.648280772, 0.648280772),
(0.25, 0.688929332, 0.688929332),
(0.28125, 0.726149107, 0.726149107),
(0.3125, 0.759599947, 0.759599947),
(0.34375, 0.788964712, 0.788964712),
(0.375, 0.813952739, 0.813952739),
(0.40625, 0.834302879, 0.834302879),
(0.4375, 0.849786142, 0.849786142),
(0.46875, 0.860207984, 0.860207984),
(0.5, 0.86541021, 0.86541021),
(0.53125, 0.848937047, 0.848937047),
(0.5625, 0.827384882, 0.827384882),
(0.59375, 0.800927443, 0.800927443),
(0.625, 0.769767752, 0.769767752),
(0.65625, 0.734132809, 0.734132809),
(0.6875, 0.694266682, 0.694266682),
(0.71875, 0.650421156, 0.650421156),
(0.75, 0.602842431, 0.602842431),
(0.78125, 0.551750968, 0.551750968),
(0.8125, 0.49730856, 0.49730856),
(0.84375, 0.439559467, 0.439559467),
(0.875, 0.378313092, 0.378313092),
(0.90625, 0.312874446, 0.312874446),
(0.9375, 0.24128379, 0.24128379),
(0.96875, 0.157246067, 0.157246067),
(1.0, 0.01555616, 0.01555616)],
'blue': [
(0.0, 0.753683153, 0.753683153),
(0.03125, 0.801466763, 0.801466763),
(0.0625, 0.84495867, 0.84495867),
(0.09375, 0.883725899, 0.883725899),
(0.125, 0.917387822, 0.917387822),
(0.15625, 0.945619588, 0.945619588),
(0.1875, 0.968154911, 0.968154911),
(0.21875, 0.98478814, 0.98478814),
(0.25, 0.995375608, 0.995375608),
(0.28125, 0.999836203, 0.999836203),
(0.3125, 0.998151185, 0.998151185),
(0.34375, 0.990363227, 0.990363227),
(0.375, 0.976574709, 0.976574709),
(0.40625, 0.956945269, 0.956945269),
(0.4375, 0.931688648, 0.931688648),
(0.46875, 0.901068838, 0.901068838),
(0.5, 0.865395561, 0.865395561),
(0.53125, 0.820880546, 0.820880546),
(0.5625, 0.774508472, 0.774508472),
(0.59375, 0.726736146, 0.726736146),
(0.625, 0.678007945, 0.678007945),
(0.65625, 0.628751763, 0.628751763),
(0.6875, 0.579375448, 0.579375448),
(0.71875, 0.530263762, 0.530263762),
(0.75, 0.481775914, 0.481775914),
(0.78125, 0.434243684, 0.434243684),
(0.8125, 0.387970225, 0.387970225),
(0.84375, 0.343229596, 0.343229596),
(0.875, 0.300267182, 0.300267182),
(0.90625, 0.259301199, 0.259301199),
(0.9375, 0.220525627, 0.220525627),
(0.96875, 0.184115123, 0.184115123),
(1.0, 0.150232812, 0.150232812)]
}
# Implementation of Carey Rappaport's CMRmap.
# See `A Color Map for Effective Black-and-White Rendering of Color-Scale
# Images' by Carey Rappaport
# http://www.mathworks.com/matlabcentral/fileexchange/2662-cmrmap-m
_CMRmap_data = {'red': ((0.000, 0.00, 0.00),
(0.125, 0.15, 0.15),
(0.250, 0.30, 0.30),
(0.375, 0.60, 0.60),
(0.500, 1.00, 1.00),
(0.625, 0.90, 0.90),
(0.750, 0.90, 0.90),
(0.875, 0.90, 0.90),
(1.000, 1.00, 1.00)),
'green': ((0.000, 0.00, 0.00),
(0.125, 0.15, 0.15),
(0.250, 0.15, 0.15),
(0.375, 0.20, 0.20),
(0.500, 0.25, 0.25),
(0.625, 0.50, 0.50),
(0.750, 0.75, 0.75),
(0.875, 0.90, 0.90),
(1.000, 1.00, 1.00)),
'blue': ((0.000, 0.00, 0.00),
(0.125, 0.50, 0.50),
(0.250, 0.75, 0.75),
(0.375, 0.50, 0.50),
(0.500, 0.15, 0.15),
(0.625, 0.00, 0.00),
(0.750, 0.10, 0.10),
(0.875, 0.50, 0.50),
(1.000, 1.00, 1.00))}
# An MIT licensed, colorblind-friendly heatmap from Wistia:
# https://github.com/wistia/heatmap-palette
# http://wistia.com/blog/heatmaps-for-colorblindness
#
# >>> import matplotlib.colors as c
# >>> colors = ["#e4ff7a", "#ffe81a", "#ffbd00", "#ffa000", "#fc7f00"]
# >>> cm = c.LinearSegmentedColormap.from_list('wistia', colors)
# >>> _wistia_data = cm._segmentdata
# >>> del _wistia_data['alpha']
#
_wistia_data = {
'red': [(0.0, 0.8941176470588236, 0.8941176470588236),
(0.25, 1.0, 1.0),
(0.5, 1.0, 1.0),
(0.75, 1.0, 1.0),
(1.0, 0.9882352941176471, 0.9882352941176471)],
'green': [(0.0, 1.0, 1.0),
(0.25, 0.9098039215686274, 0.9098039215686274),
(0.5, 0.7411764705882353, 0.7411764705882353),
(0.75, 0.6274509803921569, 0.6274509803921569),
(1.0, 0.4980392156862745, 0.4980392156862745)],
'blue': [(0.0, 0.47843137254901963, 0.47843137254901963),
(0.25, 0.10196078431372549, 0.10196078431372549),
(0.5, 0.0, 0.0),
(0.75, 0.0, 0.0),
(1.0, 0.0, 0.0)],
}
datad = {
'afmhot': _afmhot_data,
'autumn': _autumn_data,
'bone': _bone_data,
'binary': _binary_data,
'bwr': _bwr_data,
'brg': _brg_data,
'CMRmap': _CMRmap_data,
'cool': _cool_data,
'copper': _copper_data,
'cubehelix': _cubehelix_data,
'flag': _flag_data,
'gnuplot': _gnuplot_data,
'gnuplot2': _gnuplot2_data,
'gray': _gray_data,
'hot': _hot_data,
'hsv': _hsv_data,
'jet': _jet_data,
'ocean': _ocean_data,
'pink': _pink_data,
'prism': _prism_data,
'rainbow': _rainbow_data,
'seismic': _seismic_data,
'spring': _spring_data,
'summer': _summer_data,
'terrain': _terrain_data,
'winter': _winter_data,
'nipy_spectral': _nipy_spectral_data,
'spectral': _nipy_spectral_data, # alias for backward compatibility
}
datad['Accent'] = _Accent_data
datad['Blues'] = _Blues_data
datad['BrBG'] = _BrBG_data
datad['BuGn'] = _BuGn_data
datad['BuPu'] = _BuPu_data
datad['Dark2'] = _Dark2_data
datad['GnBu'] = _GnBu_data
datad['Greens'] = _Greens_data
datad['Greys'] = _Greys_data
datad['Oranges'] = _Oranges_data
datad['OrRd'] = _OrRd_data
datad['Paired'] = _Paired_data
datad['Pastel1'] = _Pastel1_data
datad['Pastel2'] = _Pastel2_data
datad['PiYG'] = _PiYG_data
datad['PRGn'] = _PRGn_data
datad['PuBu'] = _PuBu_data
datad['PuBuGn'] = _PuBuGn_data
datad['PuOr'] = _PuOr_data
datad['PuRd'] = _PuRd_data
datad['Purples'] = _Purples_data
datad['RdBu'] = _RdBu_data
datad['RdGy'] = _RdGy_data
datad['RdPu'] = _RdPu_data
datad['RdYlBu'] = _RdYlBu_data
datad['RdYlGn'] = _RdYlGn_data
datad['Reds'] = _Reds_data
datad['Set1'] = _Set1_data
datad['Set2'] = _Set2_data
datad['Set3'] = _Set3_data
datad['Spectral'] = _Spectral_data
datad['YlGn'] = _YlGn_data
datad['YlGnBu'] = _YlGnBu_data
datad['YlOrBr'] = _YlOrBr_data
datad['YlOrRd'] = _YlOrRd_data
datad['gist_earth'] = _gist_earth_data
datad['gist_gray'] = _gist_gray_data
datad['gist_heat'] = _gist_heat_data
datad['gist_ncar'] = _gist_ncar_data
datad['gist_rainbow'] = _gist_rainbow_data
datad['gist_stern'] = _gist_stern_data
datad['gist_yarg'] = _gist_yarg_data
datad['coolwarm'] = _coolwarm_data
datad['Wistia'] = _wistia_data
| mit |
quantopian/pyfolio | pyfolio/timeseries.py | 2 | 37350 | #
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from collections import OrderedDict
from functools import partial
import empyrical as ep
import numpy as np
import pandas as pd
import scipy as sp
import scipy.stats as stats
from sklearn import linear_model
from .deprecate import deprecated
from .interesting_periods import PERIODS
from .txn import get_turnover
from .utils import APPROX_BDAYS_PER_MONTH, APPROX_BDAYS_PER_YEAR
from .utils import DAILY
DEPRECATION_WARNING = ("Risk functions in pyfolio.timeseries are deprecated "
"and will be removed in a future release. Please "
"install the empyrical package instead.")
def var_cov_var_normal(P, c, mu=0, sigma=1):
"""
Variance-covariance calculation of daily Value-at-Risk in a
portfolio.
Parameters
----------
P : float
Portfolio value.
c : float
Confidence level.
mu : float, optional
Mean.
Returns
-------
float
Variance-covariance.
"""
alpha = sp.stats.norm.ppf(1 - c, mu, sigma)
return P - P * (alpha + 1)
@deprecated(msg=DEPRECATION_WARNING)
def max_drawdown(returns):
"""
Determines the maximum drawdown of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
Returns
-------
float
Maximum drawdown.
Note
-----
See https://en.wikipedia.org/wiki/Drawdown_(economics) for more details.
"""
return ep.max_drawdown(returns)
@deprecated(msg=DEPRECATION_WARNING)
def annual_return(returns, period=DAILY):
"""
Determines the mean annual growth rate of returns.
Parameters
----------
returns : pd.Series
Periodic returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Can be 'monthly', 'weekly', or 'daily'.
- Defaults to 'daily'.
Returns
-------
float
Annual Return as CAGR (Compounded Annual Growth Rate).
"""
return ep.annual_return(returns, period=period)
@deprecated(msg=DEPRECATION_WARNING)
def annual_volatility(returns, period=DAILY):
"""
Determines the annual volatility of a strategy.
Parameters
----------
returns : pd.Series
Periodic returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing volatility. Can be 'monthly' or 'weekly' or 'daily'.
- Defaults to 'daily'.
Returns
-------
float
Annual volatility.
"""
return ep.annual_volatility(returns, period=period)
@deprecated(msg=DEPRECATION_WARNING)
def calmar_ratio(returns, period=DAILY):
"""
Determines the Calmar ratio, or drawdown ratio, of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Can be 'monthly', 'weekly', or 'daily'.
- Defaults to 'daily'.
Returns
-------
float
Calmar ratio (drawdown ratio) as float. Returns np.nan if there is no
calmar ratio.
Note
-----
See https://en.wikipedia.org/wiki/Calmar_ratio for more details.
"""
return ep.calmar_ratio(returns, period=period)
@deprecated(msg=DEPRECATION_WARNING)
def omega_ratio(returns, annual_return_threshhold=0.0):
"""
Determines the Omega ratio of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
annual_return_threshold : float, optional
Minimum acceptable return of the investor. Annual threshold over which
returns are considered positive or negative. It is converted to a
value appropriate for the period of the returns for this ratio.
E.g. An annual minimum acceptable return of 100 translates to a daily
minimum acceptable return of 0.01848.
(1 + 100) ** (1. / 252) - 1 = 0.01848
Daily returns must exceed this value to be considered positive. The
daily return yields the desired annual return when compounded over
the average number of business days in a year.
(1 + 0.01848) ** 252 - 1 = 99.93
- Defaults to 0.0
Returns
-------
float
Omega ratio.
Note
-----
See https://en.wikipedia.org/wiki/Omega_ratio for more details.
"""
return ep.omega_ratio(returns,
required_return=annual_return_threshhold)
@deprecated(msg=DEPRECATION_WARNING)
def sortino_ratio(returns, required_return=0, period=DAILY):
"""
Determines the Sortino ratio of a strategy.
Parameters
----------
returns : pd.Series or pd.DataFrame
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
required_return: float / series
minimum acceptable return
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Can be 'monthly', 'weekly', or 'daily'.
- Defaults to 'daily'.
Returns
-------
depends on input type
series ==> float
DataFrame ==> np.array
Annualized Sortino ratio.
"""
return ep.sortino_ratio(returns, required_return=required_return)
@deprecated(msg=DEPRECATION_WARNING)
def downside_risk(returns, required_return=0, period=DAILY):
"""
Determines the downside deviation below a threshold
Parameters
----------
returns : pd.Series or pd.DataFrame
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
required_return: float / series
minimum acceptable return
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Can be 'monthly', 'weekly', or 'daily'.
- Defaults to 'daily'.
Returns
-------
depends on input type
series ==> float
DataFrame ==> np.array
Annualized downside deviation
"""
return ep.downside_risk(returns,
required_return=required_return,
period=period)
@deprecated(msg=DEPRECATION_WARNING)
def sharpe_ratio(returns, risk_free=0, period=DAILY):
"""
Determines the Sharpe ratio of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
risk_free : int, float
Constant risk-free return throughout the period.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Can be 'monthly', 'weekly', or 'daily'.
- Defaults to 'daily'.
Returns
-------
float
Sharpe ratio.
np.nan
If insufficient length of returns or if if adjusted returns are 0.
Note
-----
See https://en.wikipedia.org/wiki/Sharpe_ratio for more details.
"""
return ep.sharpe_ratio(returns, risk_free=risk_free, period=period)
@deprecated(msg=DEPRECATION_WARNING)
def alpha_beta(returns, factor_returns):
"""
Calculates both alpha and beta.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
factor_returns : pd.Series
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
Returns
-------
float
Alpha.
float
Beta.
"""
return ep.alpha_beta(returns, factor_returns=factor_returns)
@deprecated(msg=DEPRECATION_WARNING)
def alpha(returns, factor_returns):
"""
Calculates annualized alpha.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
factor_returns : pd.Series
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
Returns
-------
float
Alpha.
"""
return ep.alpha(returns, factor_returns=factor_returns)
@deprecated(msg=DEPRECATION_WARNING)
def beta(returns, factor_returns):
"""
Calculates beta.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
factor_returns : pd.Series
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
Returns
-------
float
Beta.
"""
return ep.beta(returns, factor_returns)
@deprecated(msg=DEPRECATION_WARNING)
def stability_of_timeseries(returns):
"""
Determines R-squared of a linear fit to the cumulative
log returns. Computes an ordinary least squares linear fit,
and returns R-squared.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
Returns
-------
float
R-squared.
"""
return ep.stability_of_timeseries(returns)
@deprecated(msg=DEPRECATION_WARNING)
def tail_ratio(returns):
"""
Determines the ratio between the right (95%) and left tail (5%).
For example, a ratio of 0.25 means that losses are four times
as bad as profits.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
Returns
-------
float
tail ratio
"""
return ep.tail_ratio(returns)
def common_sense_ratio(returns):
"""
Common sense ratio is the multiplication of the tail ratio and the
Gain-to-Pain-Ratio -- sum(profits) / sum(losses).
See http://bit.ly/1ORzGBk for more information on motivation of
this metric.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
Returns
-------
float
common sense ratio
"""
return ep.tail_ratio(returns) * \
(1 + ep.annual_return(returns))
def normalize(returns, starting_value=1):
"""
Normalizes a returns timeseries based on the first value.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
starting_value : float, optional
The starting returns (default 1).
Returns
-------
pd.Series
Normalized returns.
"""
return starting_value * (returns / returns.iloc[0])
@deprecated(msg=DEPRECATION_WARNING)
def cum_returns(returns, starting_value=0):
"""
Compute cumulative returns from simple returns.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
starting_value : float, optional
The starting returns (default 1).
Returns
-------
pandas.Series
Series of cumulative returns.
Notes
-----
For increased numerical accuracy, convert input to log returns
where it is possible to sum instead of multiplying.
"""
return ep.cum_returns(returns, starting_value=starting_value)
@deprecated(msg=DEPRECATION_WARNING)
def aggregate_returns(returns, convert_to):
"""
Aggregates returns by week, month, or year.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
convert_to : str
Can be 'weekly', 'monthly', or 'yearly'.
Returns
-------
pd.Series
Aggregated returns.
"""
return ep.aggregate_returns(returns, convert_to=convert_to)
def rolling_beta(returns, factor_returns,
rolling_window=APPROX_BDAYS_PER_MONTH * 6):
"""
Determines the rolling beta of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series or pd.DataFrame
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- If DataFrame is passed, computes rolling beta for each column.
- This is in the same style as returns.
rolling_window : int, optional
The size of the rolling window, in days, over which to compute
beta (default 6 months).
Returns
-------
pd.Series
Rolling beta.
Note
-----
See https://en.wikipedia.org/wiki/Beta_(finance) for more details.
"""
if factor_returns.ndim > 1:
# Apply column-wise
return factor_returns.apply(partial(rolling_beta, returns),
rolling_window=rolling_window)
else:
out = pd.Series(index=returns.index)
for beg, end in zip(returns.index[0:-rolling_window],
returns.index[rolling_window:]):
out.loc[end] = ep.beta(
returns.loc[beg:end],
factor_returns.loc[beg:end])
return out
def rolling_regression(returns, factor_returns,
rolling_window=APPROX_BDAYS_PER_MONTH * 6,
nan_threshold=0.1):
"""
Computes rolling factor betas using a multivariate linear regression
(separate linear regressions is problematic because the factors may be
confounded).
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.DataFrame
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- Computes rolling beta for each column.
- This is in the same style as returns.
rolling_window : int, optional
The days window over which to compute the beta. Defaults to 6 months.
nan_threshold : float, optional
If there are more than this fraction of NaNs, the rolling regression
for the given date will be skipped.
Returns
-------
pandas.DataFrame
DataFrame containing rolling beta coefficients to SMB, HML and UMD
"""
# We need to drop NaNs to regress
ret_no_na = returns.dropna()
columns = ['alpha'] + factor_returns.columns.tolist()
rolling_risk = pd.DataFrame(columns=columns,
index=ret_no_na.index)
rolling_risk.index.name = 'dt'
for beg, end in zip(ret_no_na.index[:-rolling_window],
ret_no_na.index[rolling_window:]):
returns_period = ret_no_na[beg:end]
factor_returns_period = factor_returns.loc[returns_period.index]
if np.all(factor_returns_period.isnull().mean()) < nan_threshold:
factor_returns_period_dnan = factor_returns_period.dropna()
reg = linear_model.LinearRegression(fit_intercept=True).fit(
factor_returns_period_dnan,
returns_period.loc[factor_returns_period_dnan.index])
rolling_risk.loc[end, factor_returns.columns] = reg.coef_
rolling_risk.loc[end, 'alpha'] = reg.intercept_
return rolling_risk
def gross_lev(positions):
"""
Calculates the gross leverage of a strategy.
Parameters
----------
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
Returns
-------
pd.Series
Gross leverage.
"""
exposure = positions.drop('cash', axis=1).abs().sum(axis=1)
return exposure / positions.sum(axis=1)
def value_at_risk(returns, period=None, sigma=2.0):
"""
Get value at risk (VaR).
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
period : str, optional
Period over which to calculate VaR. Set to 'weekly',
'monthly', or 'yearly', otherwise defaults to period of
returns (typically daily).
sigma : float, optional
Standard deviations of VaR, default 2.
"""
if period is not None:
returns_agg = ep.aggregate_returns(returns, period)
else:
returns_agg = returns.copy()
value_at_risk = returns_agg.mean() - sigma * returns_agg.std()
return value_at_risk
SIMPLE_STAT_FUNCS = [
ep.annual_return,
ep.cum_returns_final,
ep.annual_volatility,
ep.sharpe_ratio,
ep.calmar_ratio,
ep.stability_of_timeseries,
ep.max_drawdown,
ep.omega_ratio,
ep.sortino_ratio,
stats.skew,
stats.kurtosis,
ep.tail_ratio,
value_at_risk
]
FACTOR_STAT_FUNCS = [
ep.alpha,
ep.beta,
]
STAT_FUNC_NAMES = {
'annual_return': 'Annual return',
'cum_returns_final': 'Cumulative returns',
'annual_volatility': 'Annual volatility',
'sharpe_ratio': 'Sharpe ratio',
'calmar_ratio': 'Calmar ratio',
'stability_of_timeseries': 'Stability',
'max_drawdown': 'Max drawdown',
'omega_ratio': 'Omega ratio',
'sortino_ratio': 'Sortino ratio',
'skew': 'Skew',
'kurtosis': 'Kurtosis',
'tail_ratio': 'Tail ratio',
'common_sense_ratio': 'Common sense ratio',
'value_at_risk': 'Daily value at risk',
'alpha': 'Alpha',
'beta': 'Beta',
}
def perf_stats(returns, factor_returns=None, positions=None,
transactions=None, turnover_denom='AGB'):
"""
Calculates various performance metrics of a strategy, for use in
plotting.show_perf_stats.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series, optional
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
- If None, do not compute alpha, beta, and information ratio.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
turnover_denom : str
Either AGB or portfolio_value, default AGB.
- See full explanation in txn.get_turnover.
Returns
-------
pd.Series
Performance metrics.
"""
stats = pd.Series()
for stat_func in SIMPLE_STAT_FUNCS:
stats[STAT_FUNC_NAMES[stat_func.__name__]] = stat_func(returns)
if positions is not None:
stats['Gross leverage'] = gross_lev(positions).mean()
if transactions is not None:
stats['Daily turnover'] = get_turnover(positions,
transactions,
turnover_denom).mean()
if factor_returns is not None:
for stat_func in FACTOR_STAT_FUNCS:
res = stat_func(returns, factor_returns)
stats[STAT_FUNC_NAMES[stat_func.__name__]] = res
return stats
def perf_stats_bootstrap(returns, factor_returns=None, return_stats=True,
**kwargs):
"""Calculates various bootstrapped performance metrics of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series, optional
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
- If None, do not compute alpha, beta, and information ratio.
return_stats : boolean (optional)
If True, returns a DataFrame of mean, median, 5 and 95 percentiles
for each perf metric.
If False, returns a DataFrame with the bootstrap samples for
each perf metric.
Returns
-------
pd.DataFrame
if return_stats is True:
- Distributional statistics of bootstrapped sampling
distribution of performance metrics.
if return_stats is False:
- Bootstrap samples for each performance metric.
"""
bootstrap_values = OrderedDict()
for stat_func in SIMPLE_STAT_FUNCS:
stat_name = STAT_FUNC_NAMES[stat_func.__name__]
bootstrap_values[stat_name] = calc_bootstrap(stat_func,
returns)
if factor_returns is not None:
for stat_func in FACTOR_STAT_FUNCS:
stat_name = STAT_FUNC_NAMES[stat_func.__name__]
bootstrap_values[stat_name] = calc_bootstrap(
stat_func,
returns,
factor_returns=factor_returns)
bootstrap_values = pd.DataFrame(bootstrap_values)
if return_stats:
stats = bootstrap_values.apply(calc_distribution_stats)
return stats.T[['mean', 'median', '5%', '95%']]
else:
return bootstrap_values
def calc_bootstrap(func, returns, *args, **kwargs):
"""Performs a bootstrap analysis on a user-defined function returning
a summary statistic.
Parameters
----------
func : function
Function that either takes a single array (commonly returns)
or two arrays (commonly returns and factor returns) and
returns a single value (commonly a summary
statistic). Additional args and kwargs are passed as well.
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series, optional
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
n_samples : int, optional
Number of bootstrap samples to draw. Default is 1000.
Increasing this will lead to more stable / accurate estimates.
Returns
-------
numpy.ndarray
Bootstrapped sampling distribution of passed in func.
"""
n_samples = kwargs.pop('n_samples', 1000)
out = np.empty(n_samples)
factor_returns = kwargs.pop('factor_returns', None)
for i in range(n_samples):
idx = np.random.randint(len(returns), size=len(returns))
returns_i = returns.iloc[idx].reset_index(drop=True)
if factor_returns is not None:
factor_returns_i = factor_returns.iloc[idx].reset_index(drop=True)
out[i] = func(returns_i, factor_returns_i,
*args, **kwargs)
else:
out[i] = func(returns_i,
*args, **kwargs)
return out
def calc_distribution_stats(x):
"""Calculate various summary statistics of data.
Parameters
----------
x : numpy.ndarray or pandas.Series
Array to compute summary statistics for.
Returns
-------
pandas.Series
Series containing mean, median, std, as well as 5, 25, 75 and
95 percentiles of passed in values.
"""
return pd.Series({'mean': np.mean(x),
'median': np.median(x),
'std': np.std(x),
'5%': np.percentile(x, 5),
'25%': np.percentile(x, 25),
'75%': np.percentile(x, 75),
'95%': np.percentile(x, 95),
'IQR': np.subtract.reduce(
np.percentile(x, [75, 25])),
})
def get_max_drawdown_underwater(underwater):
"""
Determines peak, valley, and recovery dates given an 'underwater'
DataFrame.
An underwater DataFrame is a DataFrame that has precomputed
rolling drawdown.
Parameters
----------
underwater : pd.Series
Underwater returns (rolling drawdown) of a strategy.
Returns
-------
peak : datetime
The maximum drawdown's peak.
valley : datetime
The maximum drawdown's valley.
recovery : datetime
The maximum drawdown's recovery.
"""
valley = underwater.idxmin() # end of the period
# Find first 0
peak = underwater[:valley][underwater[:valley] == 0].index[-1]
# Find last 0
try:
recovery = underwater[valley:][underwater[valley:] == 0].index[0]
except IndexError:
recovery = np.nan # drawdown not recovered
return peak, valley, recovery
def get_max_drawdown(returns):
"""
Determines the maximum drawdown of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
Returns
-------
float
Maximum drawdown.
Note
-----
See https://en.wikipedia.org/wiki/Drawdown_(economics) for more details.
"""
returns = returns.copy()
df_cum = cum_returns(returns, 1.0)
running_max = np.maximum.accumulate(df_cum)
underwater = df_cum / running_max - 1
return get_max_drawdown_underwater(underwater)
def get_top_drawdowns(returns, top=10):
"""
Finds top drawdowns, sorted by drawdown amount.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, optional
The amount of top drawdowns to find (default 10).
Returns
-------
drawdowns : list
List of drawdown peaks, valleys, and recoveries. See get_max_drawdown.
"""
returns = returns.copy()
df_cum = ep.cum_returns(returns, 1.0)
running_max = np.maximum.accumulate(df_cum)
underwater = df_cum / running_max - 1
drawdowns = []
for _ in range(top):
peak, valley, recovery = get_max_drawdown_underwater(underwater)
# Slice out draw-down period
if not pd.isnull(recovery):
underwater.drop(underwater[peak: recovery].index[1:-1],
inplace=True)
else:
# drawdown has not ended yet
underwater = underwater.loc[:peak]
drawdowns.append((peak, valley, recovery))
if ((len(returns) == 0)
or (len(underwater) == 0)
or (np.min(underwater) == 0)):
break
return drawdowns
def gen_drawdown_table(returns, top=10):
"""
Places top drawdowns in a table.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, optional
The amount of top drawdowns to find (default 10).
Returns
-------
df_drawdowns : pd.DataFrame
Information about top drawdowns.
"""
df_cum = ep.cum_returns(returns, 1.0)
drawdown_periods = get_top_drawdowns(returns, top=top)
df_drawdowns = pd.DataFrame(index=list(range(top)),
columns=['Net drawdown in %',
'Peak date',
'Valley date',
'Recovery date',
'Duration'])
for i, (peak, valley, recovery) in enumerate(drawdown_periods):
if pd.isnull(recovery):
df_drawdowns.loc[i, 'Duration'] = np.nan
else:
df_drawdowns.loc[i, 'Duration'] = len(pd.date_range(peak,
recovery,
freq='B'))
df_drawdowns.loc[i, 'Peak date'] = (peak.to_pydatetime()
.strftime('%Y-%m-%d'))
df_drawdowns.loc[i, 'Valley date'] = (valley.to_pydatetime()
.strftime('%Y-%m-%d'))
if isinstance(recovery, float):
df_drawdowns.loc[i, 'Recovery date'] = recovery
else:
df_drawdowns.loc[i, 'Recovery date'] = (recovery.to_pydatetime()
.strftime('%Y-%m-%d'))
df_drawdowns.loc[i, 'Net drawdown in %'] = (
(df_cum.loc[peak] - df_cum.loc[valley]) / df_cum.loc[peak]) * 100
df_drawdowns['Peak date'] = pd.to_datetime(df_drawdowns['Peak date'])
df_drawdowns['Valley date'] = pd.to_datetime(df_drawdowns['Valley date'])
df_drawdowns['Recovery date'] = pd.to_datetime(
df_drawdowns['Recovery date'])
return df_drawdowns
def rolling_volatility(returns, rolling_vol_window):
"""
Determines the rolling volatility of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
rolling_vol_window : int
Length of rolling window, in days, over which to compute.
Returns
-------
pd.Series
Rolling volatility.
"""
return returns.rolling(rolling_vol_window).std() \
* np.sqrt(APPROX_BDAYS_PER_YEAR)
def rolling_sharpe(returns, rolling_sharpe_window):
"""
Determines the rolling Sharpe ratio of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
rolling_sharpe_window : int
Length of rolling window, in days, over which to compute.
Returns
-------
pd.Series
Rolling Sharpe ratio.
Note
-----
See https://en.wikipedia.org/wiki/Sharpe_ratio for more details.
"""
return returns.rolling(rolling_sharpe_window).mean() \
/ returns.rolling(rolling_sharpe_window).std() \
* np.sqrt(APPROX_BDAYS_PER_YEAR)
def simulate_paths(is_returns, num_days,
starting_value=1, num_samples=1000, random_seed=None):
"""
Gnerate alternate paths using available values from in-sample returns.
Parameters
----------
is_returns : pandas.core.frame.DataFrame
Non-cumulative in-sample returns.
num_days : int
Number of days to project the probability cone forward.
starting_value : int or float
Starting value of the out of sample period.
num_samples : int
Number of samples to draw from the in-sample daily returns.
Each sample will be an array with length num_days.
A higher number of samples will generate a more accurate
bootstrap cone.
random_seed : int
Seed for the pseudorandom number generator used by the pandas
sample method.
Returns
-------
samples : numpy.ndarray
"""
samples = np.empty((num_samples, num_days))
seed = np.random.RandomState(seed=random_seed)
for i in range(num_samples):
samples[i, :] = is_returns.sample(num_days, replace=True,
random_state=seed)
return samples
def summarize_paths(samples, cone_std=(1., 1.5, 2.), starting_value=1.):
"""
Gnerate the upper and lower bounds of an n standard deviation
cone of forecasted cumulative returns.
Parameters
----------
samples : numpy.ndarray
Alternative paths, or series of possible outcomes.
cone_std : list of int/float
Number of standard devations to use in the boundaries of
the cone. If multiple values are passed, cone bounds will
be generated for each value.
Returns
-------
samples : pandas.core.frame.DataFrame
"""
cum_samples = ep.cum_returns(samples.T,
starting_value=starting_value).T
cum_mean = cum_samples.mean(axis=0)
cum_std = cum_samples.std(axis=0)
if isinstance(cone_std, (float, int)):
cone_std = [cone_std]
cone_bounds = pd.DataFrame(columns=pd.Float64Index([]))
for num_std in cone_std:
cone_bounds.loc[:, float(num_std)] = cum_mean + cum_std * num_std
cone_bounds.loc[:, float(-num_std)] = cum_mean - cum_std * num_std
return cone_bounds
def forecast_cone_bootstrap(is_returns, num_days, cone_std=(1., 1.5, 2.),
starting_value=1, num_samples=1000,
random_seed=None):
"""
Determines the upper and lower bounds of an n standard deviation
cone of forecasted cumulative returns. Future cumulative mean and
standard devation are computed by repeatedly sampling from the
in-sample daily returns (i.e. bootstrap). This cone is non-parametric,
meaning it does not assume that returns are normally distributed.
Parameters
----------
is_returns : pd.Series
In-sample daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
num_days : int
Number of days to project the probability cone forward.
cone_std : int, float, or list of int/float
Number of standard devations to use in the boundaries of
the cone. If multiple values are passed, cone bounds will
be generated for each value.
starting_value : int or float
Starting value of the out of sample period.
num_samples : int
Number of samples to draw from the in-sample daily returns.
Each sample will be an array with length num_days.
A higher number of samples will generate a more accurate
bootstrap cone.
random_seed : int
Seed for the pseudorandom number generator used by the pandas
sample method.
Returns
-------
pd.DataFrame
Contains upper and lower cone boundaries. Column names are
strings corresponding to the number of standard devations
above (positive) or below (negative) the projected mean
cumulative returns.
"""
samples = simulate_paths(
is_returns=is_returns,
num_days=num_days,
starting_value=starting_value,
num_samples=num_samples,
random_seed=random_seed
)
cone_bounds = summarize_paths(
samples=samples,
cone_std=cone_std,
starting_value=starting_value
)
return cone_bounds
def extract_interesting_date_ranges(returns, periods=None):
"""
Extracts returns based on interesting events. See
gen_date_range_interesting.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
Returns
-------
ranges : OrderedDict
Date ranges, with returns, of all valid events.
"""
if periods is None:
periods = PERIODS
returns_dupe = returns.copy()
returns_dupe.index = returns_dupe.index.map(pd.Timestamp)
ranges = OrderedDict()
for name, (start, end) in periods.items():
try:
period = returns_dupe.loc[start:end]
if len(period) == 0:
continue
ranges[name] = period
except BaseException:
continue
return ranges
| apache-2.0 |
CroatianMeteorNetwork/RMS | Utils/ShowThresholdLevels.py | 2 | 3363 | """ The script will open all FF files in the given folder and plot color-coded images where the color
represents the threshold needed to detect individual feacutres on the image.
"""
from __future__ import print_function, division, absolute_import
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from RMS.Formats.FFfile import validFFName
from RMS.Formats.FFfile import read as readFF
if __name__ == "__main__":
import RMS.ConfigReader as cr
### PARSE INPUT ARGUMENTS ###
# Init the command line arguments parser
arg_parser = argparse.ArgumentParser(description=""" Show threshold levels needed to detect certain meteors.
""")
arg_parser.add_argument('dir_path', type=str, help="Path to the folder with FF files.")
arg_parser.add_argument('-f', '--fireball', action="store_true", help="""Estimate threshold for fireball
detection, not meteor detection. """)
arg_parser.add_argument('-c', '--config', nargs=1, metavar='CONFIG_PATH', type=str, \
help="Path to a config file which will be used instead of the default one.")
#############################
# Parse the command line arguments
cml_args = arg_parser.parse_args()
if cml_args.fireball:
print('FireballDetection')
else:
print('MeteorDetection')
# Load the config file
config = cr.loadConfigFromDirectory(cml_args.config, cml_args.dir_path)
if not os.path.exists(cml_args.dir_path):
print('{:s} directory does not exist!'.format(cml_args.dir_path))
# Load all FF files in the given directory
for file_name in os.listdir(cml_args.dir_path):
# Check if the file is an FF file
if validFFName(file_name):
# Read the FF file
ff = readFF(cml_args.dir_path, file_name)
# Skip the file if it is corruped
if ff is None:
continue
# Use the fireball thresholding
if cml_args.fireball:
k1 = config.k1
j1 = config.j1
# Meteor detection
else:
k1 = config.k1_det
j1 = config.j1_det
# Compute the threshold value
k1_vals = (ff.maxpixel.astype(np.float64) - ff.avepixel.astype(np.float64) \
- j1)/ff.stdpixel.astype(np.float64)
fig, (ax1, ax2) = plt.subplots(nrows=2, sharex=True)
# Plot the threshold map
k1map = ax1.imshow(k1_vals, cmap='inferno', vmin=1, vmax=6, aspect='auto')
# Plot file name
ax1.text(0, 0, "{:s}".format(file_name), color='white', verticalalignment='top')
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
if cml_args.fireball:
plt.colorbar(k1map, cax=cbar_ax, label='Top plot: k1')
else:
plt.colorbar(k1map, cax=cbar_ax, label='Top plot: k1_det')
# Plot thresholded image
threshld = ff.maxpixel > ff.avepixel + k1*ff.stdpixel + j1
ax2.imshow(threshld, cmap='gray', aspect='auto')
ax2.text(0, 0, "k1 = {:.2f}, j1 = {:.2f}".format(k1, j1), color='red', verticalalignment='top',
weight='bold')
fig.subplots_adjust(right=0.8, hspace=0)
plt.show()
| gpl-3.0 |
RoxanneYang/TestCase | WindAdapter/api.py | 1 | 5083 | # -*- coding: utf-8 -*-
import os
import datetime
import pandas as pd
from toolz import merge
from argcheck import expect_types
from WindAdapter.factor_loader import FactorLoader
from WindAdapter.utils import save_data_to_file
from WindAdapter.utils import print_table
from WindAdapter.utils import handle_wind_query_exception
from WindAdapter.custom_logger import CustomLogger
from WindAdapter.data_provider import WindDataProvider
from WindAdapter.helper import WindQueryHelper
from WindAdapter.enums import OutputFormat
LOGGER = CustomLogger()
WIND_DATA_PRODIVER = WindDataProvider()
WIND_QUERY_HELPER = WindQueryHelper()
def reset_log_level(log_level):
"""
:param log_level: enum, 可选择'info', 'critical' 'notset'
:return: 设置WindAdapter函数输出信息的等级, 项目默认为'info'等级
"""
LOGGER.critical('Reset path of data dict to {0}'.format(log_level))
LOGGER.set_level(log_level)
def reset_data_dict_path(path, path_type_abs):
"""
:param path: str, 自定义的data_dict 路径
:param path_type_abs: str, True: 路径为绝对路径, False: 路径为相对路径
:return: data_dict的路径被修改
"""
LOGGER.critical('Reset path of data dict to {0}'.format(path))
os.environ['DATA_DICT_PATH'] = path
os.environ['DATA_DICT_PATH_TYPE_ABS'] = str(path_type_abs)
return
@handle_wind_query_exception(LOGGER)
def get_universe(index_id, date=None, output_weight=False):
"""
:param index_id: str, 可以为指数代码或者'fullA'(指全市场股票),不区分大小写
:param date: str, optional, YYYYMMDD/YYYY-MM-DD,默认为None,即返回最近交易日的成分股列表
:param output_weight: bool, optional, 是否返回对应的个股权重
:return: 如果output_weight=False, 返回list, 成分股列表
如果output_weight=True, 返回DataFrame
"""
LOGGER.info('Loading the constituent stocks of index {0} at date {1}'.
format(index_id, datetime.date.today() if date is None else date))
ret = WindDataProvider.get_universe(index_id, date, output_weight)
LOGGER.info('Number of the loaded constituent stocks is {0}'.format(len(ret)))
return ret
@handle_wind_query_exception(LOGGER)
@expect_types(factor_name=(str, list))
def factor_load(start_date, end_date, factor_name, save_file=None, **kwargs):
"""
:param start_date: str, 读取因子数据的开始日期
:param end_date: str, 读取因子数据的结束日期
:param factor_name: str, 因子名称,不区分大小写
:param save_file: str, optional, 保存数据的文件名,可写成 '*.csv' 或者 '*.pkl'
:param kwargs: dict, optional
freq: str, optional, 因子数据的频率, 可选'M', 'W', 'Q', 'S', 'Y', 参见enums.py - FreqType
tenor: str, optional, 因子数据的周期, 对于截面数据(如换手率,收益率),需要给定数据区间(向前), 可选数字+FreqType, 如'1Q'
sec_id, str/list, optional, 股票代码或者是指数代码
output_data_format: enum, optional, 参见enums.py - FreqType
MULTI_INDEX_DF: multi-index DataFrame, index=[date, secID], value = factor
PITVOT_TABLE_DF: DataFrame, index=date, columns = secID
is_index: bool, optional, True: 输入的sec_id是指数,实际需要读取的是该指数成分股的因子数据,
False: 直接读取sec_id的因子数据
:return: pd.DataFrame 整理好的因子数据
"""
if isinstance(factor_name, list):
kwargs = merge(kwargs, {'output_data_format': OutputFormat.MULTI_INDEX_DF})
factor_names = factor_name
else:
factor_names = [factor_name]
ret = pd.DataFrame()
for factor_name in factor_names:
LOGGER.info('Loading factor data {0}'.format(factor_name))
factor_loader = FactorLoader(start_date=start_date,
end_date=end_date,
factor_name=factor_name,
**kwargs)
factor_data = factor_loader.load_data()
LOGGER.info('factor data {0} is loaded '.format(factor_name))
ret = pd.concat([ret, factor_data], axis=1)
if kwargs.get('reset_col_names'):
ret.columns = factor_names
if save_file:
save_data_to_file(ret, save_file)
LOGGER.critical('Data saved in {0}'.format(save_file))
return ret
def factor_help():
"""
:return: 返回定义的数据字典(简易版)
"""
LOGGER.info('Factors that are available to query')
data_dict = WIND_QUERY_HELPER.data_dict
print_table(data_dict['explanation'], name='Data_Dict')
return
def factor_details_help():
"""
:return: 返回定义的数据字典(详细版)
"""
LOGGER.info('Factors(details) that are available to query')
data_dict = WIND_QUERY_HELPER.data_dict
print_table(data_dict, name='Data_Dict')
return
| mit |
muxiaobai/CourseExercises | python/kaggle/data-visual/plot%26seaborn.py | 1 | 2005 |
# coding: utf-8
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pandas import Series,DataFrame
import seaborn as sns
# In[2]:
#https://www.kaggle.com/residentmario/bivariate-plotting-with-pandas/data
reviews = pd.read_csv("winemag-data_first150k.csv", index_col=0)
reviews.head()
# ### sns.countplot() sns.kdeplot() 核密度估计 sns.jointplot() sns.boxplot() sns.violinplot()
# In[4]:
sns.countplot(reviews['points'])
#reviews['points'].value_counts().sort_index().plot.bar()
plt.show()
# In[5]:
sns.kdeplot(reviews.query('price < 200').price)
#reviews[reviews['price'] < 200]['price'].value_counts().sort_index().plot.line()
plt.show()
# In[6]:
# 出现锯齿状
reviews[reviews['price'] < 200]['price'].value_counts().sort_index().plot.line()
plt.show()
# In[7]:
#两个类别的关系
sns.kdeplot(reviews[reviews['price'] < 200].loc[:, ['price', 'points']].dropna().sample(5000))
plt.show()
# In[8]:
sns.distplot(reviews['points'], bins=10, kde=False)
#reviews[reviews['price'] < 200]['price'].plot.hist() 对应直方图
plt.show()
# ### jointplot 对应 kind=scatter/reg/hex/kde
# In[12]:
sns.jointplot(x='price', y='points', data=reviews[reviews['price'] < 100])
plt.show()
# In[10]:
sns.jointplot(x='price', y='points', data=reviews[reviews['price'] < 100], kind='hex',
gridsize=20)
plt.show()
# In[15]:
sns.jointplot(x='price', y='points', data=reviews[reviews['price'] < 100], kind='reg')
plt.show()
# In[16]:
sns.jointplot(x='price', y='points', data=reviews[reviews['price'] < 100], kind='kde',
gridsize=20)
plt.show()
# In[19]:
df = reviews[reviews.variety.isin(reviews.variety.value_counts().head(5).index)]
sns.boxplot(x='variety', y='points', data=df)
plt.show()
# #### Red Blend 比Chardonnay variety得分更高一点
# In[20]:
sns.violinplot( x='variety',y='points',data=reviews[reviews.variety.isin(reviews.variety.value_counts()[:5].index)])
plt.show()
| gpl-2.0 |
airanmehr/bio | Scripts/Plasmodium/Plot.py | 1 | 15347 | '''
Copyleft Oct 10, 2015 Arya Iranmehr, PhD Student, Bafna's Lab, UC San Diego, Email: [email protected]
'''
import re
import pandas as pd
import pylab as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.cm import *
from sklearn import decomposition
from Scripts.Plasmodium import Data
numGenerations=int(53000*8765.81 /48)
numGenerations=int(53000*2)
# print 'Num Generation= {:.0e}'.format(numGenerations)
numBP=21647181.0
class Plot:
# @staticmethod
def __init__(self,param):
self.titleSize=50
self.titleSizeSup=34
self.figsize,self.dpi=(30,10),100
mpl.rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size':26}) ;
mpl.rc('text', usetex=True)
self.pdf= PdfPages(param['plotpath']+'{}.{}.{}.{}.pdf'.format(param['Region'],param['dsname'],param['filter'],param['HWcutoff']))
self.fignumber=1
@staticmethod
def plotDepth(meta,param):
from scipy.stats import ks_2samp
mpl.rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size':26}) ;
mpl.rc('text', usetex=True)
with PdfPages(param['plotpath']+'Depth.{}.{}.pdf'.format(param['Region'],param['dsname'])) as pdf:
fig=plt.figure(figsize=(30,10), dpi=100);plt.ioff()
plt.subplot(1,2,0)
from Run import computeFws
computeFws(meta, param).plot(kind='bar');plt.title('Average Fws over {} sites'.format(meta.shape[0]));
plt.subplot(1,2,1)
meta['hetero'].value_counts().sort_index().plot(kind='bar');plt.ylabel('Number of Sites');plt.xlabel('Number of Heterozygotes');plt.title('Heterozygote Histogram of {} sites'.format(meta.shape[0]))
pdf.savefig(fig)
tt=[ks_2samp(meta[meta[n+'minrc']>0][n+'minrc'].values,meta[meta[n+'minrc']>0][n+'majrc'].values) for n in param['names']]
fig=plt.figure(figsize=(30,10), dpi=100);plt.ioff()
plt.subplot(1,2,0)
pd.Series(map(lambda x: x[0],tt), index=param['names']).plot(kind='bar');plt.title('KS-statistic')
plt.subplot(1,2,1)
np.log(pd.Series(map(lambda x: x[1],tt), index=param['names'])).plot(kind='bar');plt.title('log-Pval')
plt.suptitle('Comparison of Minor and Major read counts in Hetero sites (using KS test)')
pdf.savefig(fig)
for n in param['names']:
fig=plt.figure(figsize=(30,10), dpi=100);plt.ioff()
# plt.subplot(1,3,0)
ind=meta[[n, n+'majrc',n+'minrc']]
homo=(ind[n]=='0/0') | (ind[n]=='1/1')
hetero=~homo
rc= pd.DataFrame([meta[n+'rc'].value_counts().sort_index().iloc[:100], meta[homo][n+'rc'].value_counts().sort_index().iloc[:100],meta[hetero][n+'rc'].value_counts().sort_index().iloc[:100]],index=['AllSites','HomozygoteSites','HeteroygoteSites']).fillna(0).T
means=[meta[n+'rc'].mean(), meta[homo][n+'rc'].mean(),meta[hetero][n+'rc'].mean()]
rc= rc[rc.index<50]
index, bar_width = rc.index.values,0.25
for i in range(rc.shape[1]):
plt.bar(index + i*bar_width, rc.icol(i).values/rc.icol(i).sum(), bar_width, color=mpl.cm.jet(1.*i/rc.shape[1]), label='{} (mean depth={:.2f})'.format(rc.columns[i],means[i]))
plt.ylabel('fraction of sites');plt.xlabel('depth');plt.title('Depth of distribution of {}'.format(n));plt.legend()
# plt.subplot(1,3,1)
# minrc=meta.ix[hetero,n+'minrc']
# minrc[minrc>20]=20
# minrc.value_counts().sort_index().plot(kind='bar');plt.title('Depth of minor allele in {} heterozygote sites'.format(hetero.sum()));plt.xlabel('Depth');plt.ylabel('Number of sites')
# r=meta[n+'minrc']/meta[n+'majrc']
# plt.subplot(1,3,2)
# r[r>0].hist();plt.title('Soft AFS of Heterozygote sites '); plt.xlabel('minrc/majrc')
# plt.suptitle(n)
pdf.savefig(fig)
def plotOne(self,x,ys,ynames,title,chromOffset,ylim=None):
fig=plt.figure(figsize=(100,10), dpi=300);plt.ioff()
for y,yname in zip(ys,ynames):
plt.plot(x,y,label=yname)
# plt.ylim([0,theta.max()])
plt.xlim([0,x[-1]])
if ylim:
plt.ylim(ylim)
plt.xlabel('Position')
self.plotChromBorders(chromOffset)
plt.legend()
plt.grid()
plt.title('Figure {}. {}'.format(self.fignumber, title),fontsize=self.titleSize); self.pdf.savefig(fig);self.fignumber+=1
def plotSpectrum(self,spectrum,title):
fig=plt.figure(figsize=self.figsize, dpi=self.dpi);plt.ioff()
index, bar_width = spectrum.index.values,0.2
for i in range(spectrum.shape[1]):
plt.bar(index + i*bar_width, spectrum.icol(i).values, bar_width, color=mpl.cm.jet(1.*i/spectrum.shape[1]), label=spectrum.columns[i])
plt.xlabel('Allele') ;plt.xticks(index + 3*bar_width, index) ;plt.legend();
plt.title('Figure {}. {}'.format(self.fignumber, title),fontsize=self.titleSize); self.pdf.savefig(fig);self.fignumber+=1
def subsample(self,decay):
w=100
r=np.arange(0,10000,w)
r[0]=1
for s in r:
decay.loc[s:s+w,'meanRho2']=np.mean(decay.iloc[s:s+w].meanRho2.values)
decay.loc[s:s+w,'l']=np.mean(decay.iloc[s:s+w].l.values)
return decay.iloc[r[:-1]+w/2]
def plotDecay(self,decay,title):
fig=plt.figure(figsize=self.figsize, dpi=self.dpi);plt.ioff()
decay=self.subsample(decay.copy())
plt.plot(decay.index,decay.meanRho2); plt.xlabel('Distance (BP)'); plt.ylabel(r'$\rho^2$'); plt.xlim([-100,decay.meanRho2.index.max()]);plt.grid();
plt.title('Figure {}. {}'.format(self.fignumber, title),fontsize=self.titleSize); self.pdf.savefig(fig);self.fignumber+=1
def getPvalTitle(self,p):
if p>=0:
title= ' larger than {} '.format(p)
else:
title= ' less than {} '.format(-p)
if not p:
title+='(all)'
if p==1:
title+='(exactly at HWE)'
return title
def plot(self,param):
hw=pd.read_pickle(param['outpath'] +'meta.df')
spectrum=pd.read_pickle(param['outpath'] +'afs.df')
SNP=pd.read_pickle(param['outpath'] +'snp.df')
SNP.loc['Reference0']=np.zeros(SNP.shape[1],dtype=int)
foldedSpec=self.getFoldedSpectrum(spectrum.copy())
NumSeg=SNP.shape[1]
mu=NumSeg/(2*numGenerations*numBP)
theta=pd.read_pickle(param['outpath'] +'theta.df')
chromOffset= Data.getChromOffset(theta)
x= chromOffset.loc[theta[theta.parameter=='Theta']['#CHROM'].values].values +theta[theta.parameter=='Theta'].start.values +param['windowSize']/2
theta, pi, pis,pin, S, tajimaD = theta[theta.parameter=='Theta'].estimate.values, theta[theta.parameter=='Pi'].estimate.values, theta[theta.parameter=='PiS'].estimate.values, theta[theta.parameter=='PiN'].estimate.values, theta[theta.parameter=='S'].estimate.values, theta[theta.parameter=='TajimaD'].estimate.fillna(0).values
Ne,thetaBar=theta/(np.log(11)*mu*2*param['windowSize']),np.mean(theta)/param['windowSize']
# self.plotPval(hw, r'HW: {} sites with pval {} are used in this analysis.'.format(sum(hw.pval>=param['HWcutoff']), self.getPvalTitle(param['HWcutoff'])))
if param['filter']=='homo':
PCAD=self.plotGenotype(hw,SNP, r'Genotypes of {} variants ({})'.format(NumSeg, ('All','Filtered by Elizabeth paper')[param['dsname']=='winzeler'] ))
self.plotGeometry(SNP,PCAD, 'Geometry of Haplotypes')
self.plotSpectrum(spectrum/spectrum.sum(), r'Allele Spectrum Frequency ($\xi_i$) of {} sites'.format(SNP.shape[1]))
self.plotSpectrum(pd.DataFrame(spectrum.SYNONYMOUS/spectrum.NONSYNONYMOUS,columns=['SYNONYMOUS/NONSYNONYMOUS']), 'Ratio Between Syn and Nonsyn')
spectrum=spectrum.apply(lambda x: x*spectrum.index.values, axis=0)
self.plotSpectrum(spectrum/spectrum.sum(), r'Scaled Allele Spectrum Frequency ($i\xi_i$)')
# self.plotSpectrum(foldedSpec, 'Folded Allele Spectrum Counts')
self.plotSpectrum(foldedSpec/foldedSpec.sum(), r'Folded Allele Spectrum Frequency ($\xi_i$)')
# foldedSpec=foldedSpec.apply(lambda x: x*foldedSpec.index.values, axis=0)
# self.plotSpectrum(foldedSpec/foldedSpec.sum() , r'Scaled Folded Allele Spectrum Frequency ($i\xi_i \propto \theta$)')
# self.plotTajima(tajimaD, pi)
# if param['filter']=='homo':
# decay=pd.read_pickle(param['outpath'] +'decay.df')
# rho=pd.read_pickle(param['outpath'] +'rho.df')
# self.plotDecay(decay,r'(Average on the Genome) Decay of $\rho^2$ relative to examining site')
# self.plotOne(rho.POSGlobal.values, [rho.rho.values], r'$\rho^2$', r'$\rho^2$ for all {} sites. (for each site avereged over windowSize=3k)'.format(NumSeg), chromOffset)
#
# self.plotOne(x, [S,pi,pis,pin, theta], [r'$S_n$',r'$\pi$',r'$\pi_S$',r'$\pi_N$',r'$\theta$'], title=r'Scaled Mutation Rate Estimates for Window size of {}'.format( param['windowSize']), chromOffset=chromOffset, ylim=[0,50])
# self.plotOne(x, [tajimaD], [r'$D$'], title='Tajimas D for Window size of {}'.format( param['windowSize']), chromOffset=chromOffset)
# self.plotOne(x, [Ne], [r'$N_e$'], title=r'$ N_e = \frac{\theta}{2\mu}$ Estimates for windowSize='+'{}'.format( param['windowSize'] ) + r' ($\mu=\frac{S}{2t\ell}$='+'{:.1e}, $S=${}, $t$={:.0e}, $\ell$={:.0e})'.format(mu,NumSeg,numGenerations,numBP) +r'($\bar\theta$={:.0e}'.format(thetaBar) + r'$\Rightarrow \bar N_e$={:.0e})'.format(thetaBar/(2*mu) ), chromOffset=chromOffset,ylim=[0,50000])
self.pdf.close()
def plotGenotype(self,hw,SNP,title):
fig=plt.figure(figsize=self.figsize, dpi=self.dpi);plt.ioff()
ax=fig.add_subplot(1, 2, 1, projection='3d')
ax.view_init(elev=48, azim=134)
pca = decomposition.PCA(n_components=3)
pca.fit(SNP.values)
X = pca.transform(SNP.values)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=map(lambda x: int(re.search(r'\d+', x).group()), SNP.index), cmap=plt.cm.spectral, s=100*np.ones(12))
for i in range(SNP.shape[0]):
ax.text3D(X[i, 0]+ 5, X[i, 1] , X[i, 2], SNP.index.values[i].replace('mdio',''),horizontalalignment='center',bbox=dict(alpha=.2, edgecolor='w', facecolor='w'),fontsize=20)
ax.set_xlabel('Principle Component 1'); ax.set_ylabel('Principle Component 2');ax.set_zlabel('Principle Component 3');plt.title('Representation via PCA')
D=pd.DataFrame(None,index=SNP.index,columns=SNP.index)
for i,iname in enumerate(SNP.index):
for j,jname in enumerate(SNP.index):
D.loc[iname,jname]= np.linalg.norm(X[i,:]-X[j,:])
# pd.Series.plot(hw[['0/0','0/1','0/2','1/1','1/2','2/2']].sum(),kind='bar',color=[mpl.cm.jet(1.*i/6) for i in range(6)],title='All Genotype')
plt.subplot(1,2,0)
a=hw[['0/0','0/1','1/1']]
pd.Series.plot(a[(a.icol(0)!=6 )&(a.icol(2)!=6 )].sum(),kind='bar',color=[mpl.cm.jet(1.*i/3) for i in range(3)],title='Bi-allelic Genptype (only polymorphyic sites)')
plt.suptitle('Figure {}. {}'.format(self.fignumber, title),fontsize=self.titleSizeSup); self.pdf.savefig(fig);self.fignumber+=1
return D
def plotTajima(self,tajimaD,pi):
fig=plt.figure(figsize=self.figsize, dpi=self.dpi);plt.ioff()
plt.subplot(1,2,1)
plt.hist(pi, bins=50, normed=True)
plt.title('$\pi$')
plt.subplot(1,2,0)
plt.hist(tajimaD, bins=50, normed=True)
plt.title('Tajima D')
plt.suptitle('Figure {}. {}'.format(self.fignumber, 'Histogram of Tajima Statistics'),fontsize=self.titleSizeSup); self.pdf.savefig(fig);self.fignumber+=1
def plotPval(self,meta,title):
fig=plt.figure(figsize=self.figsize, dpi=self.dpi);plt.ioff()
plt.subplot(1,2,1)
pd.Series.plot(meta.f.round(2).value_counts().sort_index(),kind='bar',color=[mpl.cm.jet(1.*i/meta.f.value_counts().shape[0]) for i in range(meta.f.value_counts().shape[0])],title=r'$F$ Inbreeding (Negative F = Deficit of Homozygotes! )')
plt.xlabel(r'$F$'); plt.ylabel('Number of Sites')
plt.subplot(1,2,0)
pd.Series.plot(meta.pval.round(2).value_counts().sort_index(),kind='bar',color=[mpl.cm.jet(1.*i/meta.pval.value_counts().shape[0]) for i in range(meta.pval.value_counts().shape[0])],title=r'$\mathcal{X}^2$ p-value of HWE')
plt.xlabel(r'P=value'); plt.ylabel('Number of Sites')
plt.suptitle('Figure {}. {}'.format(self.fignumber, title),fontsize=self.titleSizeSup); self.pdf.savefig(fig);self.fignumber+=1
def plotChromBorders(self,chromOffset):
min_y,max_y=plt.ylim()
ypos=max_y - (max_y-min_y)*0.1
for k,v in chromOffset.iteritems():
plt.plot([v ,v],[min_y,max_y],'k-.',lw=1)
if k != chromOffset.index.max() :
plt.annotate('CH{}'.format(k),xy=(v, ypos) )
plt.xticks(chromOffset.values)
def getFoldedSpectrum(self,spectrum):
for i in range(1,spectrum.shape[0]/2+1):
spectrum.loc[i]+=spectrum.loc[spectrum.index.max() +1 - i]
spectrum=spectrum.iloc[:spectrum.shape[0]/2+1]
return spectrum
def plotGeometry(self,SNP,PCAD,title):
fig=plt.figure(figsize=self.figsize, dpi=self.dpi);plt.ioff()
SNP.loc['Reference0']=np.zeros(SNP.shape[1],dtype=int)
plt.subplot(1,2,1)
D=pd.DataFrame(None,index=SNP.index,columns=SNP.index)
for i in SNP.index:
for j in SNP.index:
D.loc[i,j]= sum(np.logical_xor(SNP.loc[i],SNP.loc[j]))
D=D.astype(float)
im=plt.imshow(D,interpolation='nearest',cmap='Reds')
plt.gca().xaxis.tick_top()
x=np.arange(D.index.shape[0])
plt.yticks(x,map(lambda x: x.replace('mdio','') ,D.index.values))
plt.xticks(x,map(lambda x: x.replace('mdio','') ,D.columns))
plt.colorbar(im)
plt.gca().tick_params(axis='both', which='major', labelsize=10)
plt.title('Pairwise Hamming Distance',y=1.03)
plt.subplot(1,2,0)
D=PCAD.astype(float)
im=plt.imshow(D,interpolation='nearest',cmap='Reds')
plt.gca().xaxis.tick_top()
x=np.arange(D.index.shape[0])
plt.yticks(x,map(lambda x: x.replace('mdio','') ,D.index.values))
plt.xticks(x,map(lambda x: x.replace('mdio','') ,D.columns))
plt.colorbar(im)
plt.gca().tick_params(axis='both', which='major', labelsize=10)
plt.title('Euclidean Distance in PC3',y=1.03)
plt.suptitle('Figure {}. {}'.format(self.fignumber, title),fontsize=self.titleSizeSup); self.pdf.savefig(fig);self.fignumber+=1
def figureWithMargin(self):
plt.figure(figsize=(10,20));plt.axes([0.0, 0.0, 1, 0.7]);plt.plot(np.random.rand(2));
| mit |
xavierwu/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
Dih5/xpecgen | xpecgen/xpecgen.py | 1 | 33042 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""xpecgen.py: A module to calculate x-ray spectra generated in tungsten anodes."""
from __future__ import print_function
import math
from bisect import bisect_left
import os
from glob import glob
import warnings
import csv
import numpy as np
from scipy import interpolate, integrate, optimize
import xlsxwriter
try:
import matplotlib.pyplot as plt
plt.ion()
plot_available = True
except ImportError:
warnings.warn("Unable to import matplotlib. Plotting will be disabled.")
plot_available = False
__author__ = 'Dih5'
__version__ = "1.3.0"
data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
# --------------------General purpose functions-------------------------#
def log_interp_1d(xx, yy, kind='linear'):
"""
Perform interpolation in log-log scale.
Args:
xx (List[float]): x-coordinates of the points.
yy (List[float]): y-coordinates of the points.
kind (str or int, optional): The kind of interpolation in the log-log domain. This is passed to
scipy.interpolate.interp1d.
Returns:
A function whose call method uses interpolation in log-log scale to find the value at a given point.
"""
log_x = np.log(xx)
log_y = np.log(yy)
# No big difference in efficiency was found when replacing interp1d by
# UnivariateSpline
lin_interp = interpolate.interp1d(log_x, log_y, kind=kind)
return lambda zz: np.exp(lin_interp(np.log(zz)))
# This custom implementation of dblquad is based in the one in numpy
# (Cf. https://github.com/scipy/scipy/blob/v0.16.1/scipy/integrate/quadpack.py#L449 )
# It was modified to work only in rectangular regions (no g(x) nor h(x))
# to set the inner integral epsrel
# and to increase the limit of points taken
def _infunc(x, func, c, d, more_args, epsrel):
myargs = (x,) + more_args
return integrate.quad(func, c, d, args=myargs, epsrel=epsrel, limit=2000)[0]
def custom_dblquad(func, a, b, c, d, args=(), epsabs=1.49e-8, epsrel=1.49e-8, maxp1=50, limit=2000):
"""
A wrapper around numpy's dblquad to restrict it to a rectangular region and to pass arguments to the 'inner'
integral.
Args:
func: The integrand function f(y,x).
a (float): The lower bound of the second argument in the integrand function.
b (float): The upper bound of the second argument in the integrand function.
c (float): The lower bound of the first argument in the integrand function.
d (float): The upper bound of the first argument in the integrand function.
args (sequence, optional): extra arguments to pass to func.
epsabs (float, optional): Absolute tolerance passed directly to the inner 1-D quadrature integration.
epsrel (float, optional): Relative tolerance of the inner 1-D integrals. Default is 1.49e-8.
maxp1 (float or int, optional): An upper bound on the number of Chebyshev moments.
limit (int, optional): Upper bound on the number of cycles (>=3) for use with a sinusoidal weighting and an
infinite end-point.
Returns:
(tuple): tuple containing:
y (float): The resultant integral.
abserr (float): An estimate of the error.
"""
return integrate.quad(_infunc, a, b, (func, c, d, args, epsrel),
epsabs=epsabs, epsrel=epsrel, maxp1=maxp1, limit=limit)
def triangle(x, loc=0, size=0.5, area=1):
"""
The triangle window function centered in loc, of given size and area, evaluated at a point.
Args:
x: The point where the function is evaluated.
loc: The position of the peak.
size: The total.
area: The area below the function.
Returns:
The value of the function.
"""
# t=abs((x-loc)/size)
# return 0 if t>1 else (1-t)*abs(area/size)
return 0 if abs((x - loc) / size) > 1 else (1 - abs((x - loc) / size)) * abs(area / size)
# --------------------Spectrum model functionality----------------------#
class Spectrum:
"""
Set of 2D points and discrete components representing a spectrum.
A Spectrum can be multiplied by a scalar (int, float...) to increase its counts in such a factor.
Two spectra can be added if they share their x axes and their discrete component positions.
Note: When two spectrum are added it is not checked it that addition makes sense. It is the user's responsibility to
do so.
Attributes:
x (:obj:`numpy.ndarray`): x coordinates (energy) describing the continuum part of the spectrum.
y (:obj:`numpy.ndarray`): y coordinates (pdf) describing the continuum part of the spectrum.
discrete (List[List[float]]): discrete components of the spectrum, each of the form [x, num, rel_x] where:
* x is the mean position of the peak.
* num is the number of particles in the peak.
* rel_x is a characteristic distance where it should extend. The exact meaning depends on the windows function.
"""
def __init__(self):
"""
Create an empty spectrum.
"""
self.x = []
self.y = []
self.discrete = []
def clone(self):
"""
Return a new Spectrum object cloning itself
Returns:
:obj:`Spectrum`: The new Spectrum.
"""
s = Spectrum()
s.x = list(self.x)
s.y = self.y[:]
s.discrete = []
for a in self.discrete:
s.discrete.append(a[:])
return s
def get_continuous_function(self):
"""
Get a function representing the continuous part of the spectrum.
Returns:
An interpolation function representing the continuous part of the spectrum.
"""
return interpolate.interp1d(self.x, self.y, bounds_error=False, fill_value=0)
def get_points(self, peak_shape=triangle, num_discrete=10):
"""
Returns two lists of coordinates x y representing the whole spectrum, both the continuous and discrete components.
The mesh is chosen by extending x to include details of the discrete peaks.
Args:
peak_shape: The window function used to calculate the peaks. See :obj:`triangle` for an example.
num_discrete: Number of points that are added to mesh in each peak.
Returns:
(tuple): tuple containing:
x2 (List[float]): The list of x coordinates (energy) in the whole spectrum.
y2 (List[float]): The list of y coordinates (density) in the whole spectrum.
"""
if peak_shape is None or self.discrete == []:
return self.x[:], self.y[:]
# A mesh for each discrete component:
discrete_mesh = np.concatenate(list(map(lambda x: np.linspace(
x[0] - x[2], x[0] + x[2], num=num_discrete, endpoint=True), self.discrete)))
x2 = sorted(np.concatenate((discrete_mesh, self.x)))
f = self.get_continuous_function()
peak = np.vectorize(peak_shape)
def g(x):
t = 0
for l in self.discrete:
t += peak(x, loc=l[0], size=l[2]) * l[1]
return t
y2 = [f(x) + g(x) for x in x2]
return x2, y2
def get_plot(self, place, show_mesh=True, prepare_format=True, peak_shape=triangle):
"""
Prepare a plot of the data in the given place
Args:
place: The class whose method plot is called to produce the plot (e.g., matplotlib.pyplot).
show_mesh (bool): Whether to plot the points over the continuous line as circles.
prepare_format (bool): Whether to include ticks and labels in the plot.
peak_shape: The window function used to plot the peaks. See :obj:`triangle` for an example.
"""
if prepare_format:
place.tick_params(axis='both', which='major', labelsize=10)
place.tick_params(axis='both', which='minor', labelsize=8)
place.set_xlabel('E', fontsize=10, fontweight='bold')
place.set_ylabel('f(E)', fontsize=10, fontweight='bold')
x2, y2 = self.get_points(peak_shape=peak_shape)
if show_mesh:
place.plot(self.x, self.y, 'bo', x2, y2, 'b-')
else:
place.plot(x2, y2, 'b-')
def show_plot(self, show_mesh=True, block=True):
"""
Prepare the plot of the data and show it in matplotlib window.
Args:
show_mesh (bool): Whether to plot the points over the continuous line as circles.
block (bool): Whether the plot is blocking or non blocking.
"""
if plot_available:
plt.clf()
self.get_plot(plt, show_mesh=show_mesh, prepare_format=False)
plt.xlabel("E")
plt.ylabel("f(E)")
plt.gcf().canvas.set_window_title("".join(('xpecgen v', __version__)))
plt.show(block=block)
else:
warnings.warn("Asked for a plot but matplotlib could not be imported.")
def export_csv(self, route="a.csv", peak_shape=triangle, transpose=False):
"""
Export the data to a csv file (comma-separated values).
Args:
route (str): The route where the file will be saved.
peak_shape: The window function used to plot the peaks. See :obj:`triangle` for an example.
transpose (bool): True to write in two columns, False in two rows.
"""
x2, y2 = self.get_points(peak_shape=peak_shape)
with open(route, 'w') as csvfile:
w = csv.writer(csvfile, dialect='excel')
if transpose:
w.writerows([list(a) for a in zip(*[x2, y2])])
else:
w.writerow(x2)
w.writerow(y2)
def export_xlsx(self, route="a.xlsx", peak_shape=triangle, markers=False):
"""
Export the data to a xlsx file (Excel format).
Args:
route (str): The route where the file will be saved.
peak_shape: The window function used to plot the peaks. See :obj:`triangle` for an example.
markers (bool): Whether to use markers or a continuous line in the plot in the file.
"""
x2, y2 = self.get_points(peak_shape=peak_shape)
workbook = xlsxwriter.Workbook(route)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': 1})
worksheet.write(0, 0, "Energy (keV)", bold)
worksheet.write(0, 1, "Photon density (1/keV)", bold)
worksheet.write_column('A2', x2)
worksheet.write_column('B2', y2)
# Add a plot
if markers:
chart = workbook.add_chart(
{'type': 'scatter', 'subtype': 'straight_with_markers'})
else:
chart = workbook.add_chart(
{'type': 'scatter', 'subtype': 'straight'})
chart.add_series({
'name': '=Sheet1!$B$1',
'categories': '=Sheet1!$A$2:$A$' + str(len(x2) + 1),
'values': '=Sheet1!$B$2:$B$' + str(len(y2) + 1),
})
chart.set_title({'name': 'Emission spectrum'})
chart.set_x_axis(
{'name': 'Energy (keV)', 'min': 0, 'max': str(x2[-1])})
chart.set_y_axis({'name': 'Photon density (1/keV)'})
chart.set_legend({'position': 'none'})
chart.set_style(11)
worksheet.insert_chart('D3', chart, {'x_offset': 25, 'y_offset': 10})
workbook.close()
def get_norm(self, weight=None):
"""
Return the norm of the spectrum using a weighting function.
Args:
weight: A function used as a weight to calculate the norm. Typical examples are:
* weight(E)=1 [Photon number]
* weight(E)=E [Energy]
* weight(E)=fluence2Dose(E) [Dose]
Returns:
(float): The calculated norm.
"""
if weight is None:
w = lambda x: 1
else:
w = weight
y2 = list(map(lambda x, y: w(x) * y, self.x, self.y))
return integrate.simps(y2, x=self.x) + sum([w(a[0]) * a[1] for a in self.discrete])
def set_norm(self, value=1, weight=None):
"""
Set the norm of the spectrum using a weighting function.
Args:
value (float): The norm of the modified spectrum in the given convention.
weight: A function used as a weight to calculate the norm. Typical examples are:
* weight(E)=1 [Photon number]
* weight(E)=E [Energy]
* weight(E)=fluence2Dose(E) [Dose]
"""
norm = self.get_norm(weight=weight) / value
self.y = [a / norm for a in self.y]
self.discrete = [[a[0], a[1] / norm, a[2]] for a in self.discrete]
def hvl(self, value=0.5, weight=lambda x: 1, mu=lambda x: 1, energy_min=0):
"""
Calculate a generalized half-value-layer.
This method calculates the depth of a material needed for a certain dosimetric magnitude to decrease in a given factor.
Args:
value (float): The factor the desired magnitude is decreased. Must be in [0, 1].
weight: A function used as a weight to calculate the norm. Typical examples are:
* weight(E)=1 [Photon number]
* weight(E)=E [Energy]
* weight(E)=fluence2Dose(E) [Dose]
mu: The energy absorption coefficient as a function of energy.
energy_min (float): A low-energy cutoff to use in the calculation.
Returns:
(float): The generalized hvl in cm.
"""
# TODO: (?) Cut characteristic if below cutoff. However, such a high cutoff
# would probably make no sense
# Use low-energy cutoff
low_index = bisect_left(self.x, energy_min)
x = self.x[low_index:]
y = self.y[low_index:]
# Normalize to 1 with weighting function
y2 = list(map(lambda a, b: weight(a) * b, x, y))
discrete2 = [weight(a[0]) * a[1] for a in self.discrete]
n2 = integrate.simps(y2, x=x) + sum(discrete2)
y3 = [a / n2 for a in y2]
discrete3 = [[a[0], weight(a[0]) * a[1] / n2] for a in self.discrete]
# Now we only need to add attenuation as a function of depth
f = lambda t: integrate.simps(list(map(lambda a, b: b * math.exp(-mu(a) * t), x, y3)), x=x) + sum(
[c[1] * math.exp(-mu(c[0]) * t) for c in discrete3]) - value
# Search the order of magnitude of the root (using the fact that f is
# monotonically decreasing)
a = 1.0
if f(a) > 0:
while f(a) > 0:
a *= 10.0
# Now f(a)<=0 and f(a*0.1)>0
return optimize.brentq(f, a * 0.1, a)
else:
while f(a) < 0:
a *= 0.1
# Now f(a)>=0 and f(a*10)<0
return optimize.brentq(f, a, a * 10.0)
def attenuate(self, depth=1, mu=lambda x: 1):
"""
Attenuate the spectrum as if it passed thorough a given depth of material with attenuation described by a given
attenuation coefficient. Consistent units should be used.
Args:
depth: The amount of material (typically in cm).
mu: The energy-dependent absorption coefficient (typically in cm^-1).
"""
self.y = list(
map(lambda x, y: y * math.exp(-mu(x) * depth), self.x, self.y))
self.discrete = list(
map(lambda l: [l[0], l[1] * math.exp(-mu(l[0]) * depth), l[2]], self.discrete))
def __add__(self, other):
"""Add two instances, assuming that makes sense."""
if not isinstance(other, Spectrum): # so s+0=s and sum([s1, s2,...]) makes sense
return self
s = Spectrum()
s.x = self.x
s.y = [a + b for a, b in zip(self.y, other.y)]
s.discrete = [[a[0], a[1] + b[1], a[2]] for a, b in zip(self.discrete, other.discrete)]
return s
def __radd__(self, other):
return self.__add__(other)
def __mul__(self, other):
"""Multiply the counts by an scalar."""
s2 = self.clone()
s2.y = [a * other for a in self.y]
s2.discrete = [[a[0], a[1] * other, a[2]] for a in self.discrete]
return s2
def __rmul__(self, other):
return self.__mul__(other)
# --------------------Spectrum calculation functionality----------------#
def get_fluence(e_0=100.0):
"""
Returns a function representing the electron fluence with the distance in CSDA units.
Args:
e_0 (float): The kinetic energy whose CSDA range is used to scale the distances.
Returns:
A function representing fluence(x,u) with x in CSDA units.
"""
# List of available energies
e0_str_list = list(map(lambda x: (os.path.split(x)[1]).split(".csv")[
0], glob(os.path.join(data_path, "fluence", "*.csv"))))
e0_list = sorted(list(map(int, list(filter(str.isdigit, e0_str_list)))))
e_closest = min(e0_list, key=lambda x: abs(x - e_0))
with open(os.path.join(data_path, "fluence/grid.csv"), 'r') as csvfile:
r = csv.reader(csvfile, delimiter=' ', quotechar='|',
quoting=csv.QUOTE_MINIMAL)
t = next(r)
x = np.array([float(a) for a in t[0].split(",")])
t = next(r)
u = np.array([float(a) for a in t[0].split(",")])
t = []
with open(os.path.join(data_path, "fluence", "".join([str(e_closest), ".csv"])), 'r') as csvfile:
r = csv.reader(csvfile, delimiter=' ', quotechar='|',
quoting=csv.QUOTE_MINIMAL)
for row in r:
t.append([float(a) for a in row[0].split(",")])
t = np.array(t)
f = interpolate.RectBivariateSpline(x, u, t, kx=1, ky=1)
# Note f is returning numpy 1x1 arrays
return f
# return lambda x,u:f(x,u)[0]
def get_cs(e_0=100, z=74):
"""
Returns a function representing the scaled bremsstrahlung cross_section.
Args:
e_0 (float): The electron kinetic energy, used to scale u=e_e/e_0.
z (int): Atomic number of the material.
Returns:
A function representing cross_section(e_g,u) in mb/keV, with e_g in keV.
"""
# NOTE: Data is given for E0>1keV. CS values below this level should be used with caution.
# The default behaviour is to keep it constant
with open(os.path.join(data_path, "cs/grid.csv"), 'r') as csvfile:
r = csv.reader(csvfile, delimiter=' ', quotechar='|',
quoting=csv.QUOTE_MINIMAL)
t = next(r)
e_e = np.array([float(a) for a in t[0].split(",")])
log_e_e = np.log10(e_e)
t = next(r)
k = np.array([float(a) for a in t[0].split(",")])
t = []
with open(os.path.join(data_path, "cs/%d.csv" % z), 'r') as csvfile:
r = csv.reader(csvfile, delimiter=' ', quotechar='|',
quoting=csv.QUOTE_MINIMAL)
for row in r:
t.append([float(a) for a in row[0].split(",")])
t = np.array(t)
scaled = interpolate.RectBivariateSpline(log_e_e, k, t, kx=3, ky=1)
m_electron = 511
z2 = z * z
return lambda e_g, u: (u * e_0 + m_electron) ** 2 * z2 / (u * e_0 * e_g * (u * e_0 + 2 * m_electron)) * (
scaled(np.log10(u * e_0), e_g / (u * e_0)))
def get_mu(z=74):
"""
Returns a function representing an energy-dependent attenuation coefficient.
Args:
z (int or str): The identifier of the material in the data folder, typically the atomic number.
Returns:
The attenuation coefficient mu(E) in cm^-1 as a function of the energy measured in keV.
"""
with open(os.path.join(data_path, "mu", "".join([str(z), ".csv"])), 'r') as csvfile:
r = csv.reader(csvfile, delimiter=' ', quotechar='|',
quoting=csv.QUOTE_MINIMAL)
t = next(r)
x = [float(a) for a in t[0].split(",")]
t = next(r)
y = [float(a) for a in t[0].split(",")]
return log_interp_1d(x, y)
def get_csda(z=74):
"""
Returns a function representing the CSDA range in tungsten.
Args:
z (int): Atomic number of the material.
Returns:
The CSDA range in cm in tungsten as a function of the electron kinetic energy in keV.
"""
with open(os.path.join(data_path, "csda/%d.csv" % z), 'r') as csvfile:
r = csv.reader(csvfile, delimiter=' ', quotechar='|',
quoting=csv.QUOTE_MINIMAL)
t = next(r)
x = [float(a) for a in t[0].split(",")]
t = next(r)
y = [float(a) for a in t[0].split(",")]
return interpolate.interp1d(x, y, kind='linear')
def get_mu_csda(e_0, z=74):
"""
Returns a function representing the CSDA-scaled energy-dependent attenuation coefficient in tungsten.
Args:
e_0 (float): The electron initial kinetic energy.
z (int): Atomic number of the material.
Returns:
The attenuation coefficient mu(E) in CSDA units as a function of the energy measured in keV.
"""
mu = get_mu(z)
csda = get_csda(z=z)(e_0)
return lambda e: mu(e) * csda
def get_fluence_to_dose():
"""
Returns a function representing the weighting factor which converts fluence to dose.
Returns:
A function representing the weighting factor which converts fluence to dose in Gy * cm^2.
"""
with open(os.path.join(data_path, "fluence2dose/f2d.csv"), 'r') as csvfile:
r = csv.reader(csvfile, delimiter=' ', quotechar='|',
quoting=csv.QUOTE_MINIMAL)
t = next(r)
x = [float(a) for a in t[0].split(",")]
t = next(r)
y = [float(a) for a in t[0].split(",")]
return interpolate.interp1d(x, y, kind='linear')
def get_source_function(fluence, cs, mu, theta, e_g, phi=0.0):
"""
Returns the attenuated source function (Eq. 2 in the paper) for the given parameters.
An E_0-dependent factor (the fraction found there) is excluded. However, the E_0 dependence is removed in
integrate_source.
Args:
fluence: The function representing the fluence.
cs: The function representing the bremsstrahlung cross-section.
mu: The function representing the attenuation coefficient.
theta (float): The emission angle in degrees, the anode's normal being at 90º.
e_g (float): The emitted photon energy in keV.
phi (float): The elevation angle in degrees, the anode's normal being at 12º.
Returns:
The attenuated source function s(u,x).
"""
factor = -mu(e_g) / math.sin(math.radians(theta)) / math.cos(math.radians(phi))
return lambda u, x: fluence(x, u) * cs(e_g, u) * math.exp(factor * x)
def integrate_source(fluence, cs, mu, theta, e_g, e_0, phi=0.0, x_min=0.0, x_max=0.6, epsrel=0.1, z=74):
"""
Find the integral of the attenuated source function.
An E_0-independent factor is excluded (i.e., the E_0 dependence on get_source_function is taken into account here).
Args:
fluence: The function representing the fluence.
cs: The function representing the bremsstrahlung cross-section.
mu: The function representing the attenuation coefficient.
theta (float): The emission angle in degrees, the anode's normal being at 90º.
e_g: (float): The emitted photon energy in keV.
e_0 (float): The electron initial kinetic energy.
phi (float): The elevation angle in degrees, the anode's normal being at 12º.
x_min: The lower-bound of the integral in depth, scaled by the CSDA range.
x_max: The upper-bound of the integral in depth, scaled by the CSDA range.
epsrel: The relative tolerance of the integral.
z (int): Atomic number of the material.
Returns:
float: The value of the integral.
"""
if e_g >= e_0:
return 0
f = get_source_function(fluence, cs, mu, theta, e_g, phi=phi)
(y, y_err) = custom_dblquad(f, x_min, x_max, e_g / e_0, 1, epsrel=epsrel, limit=100)
# The factor includes n_med, its units being 1/(mb * r_CSDA). We only take into account the r_CSDA dependence.
y *= get_csda(z=z)(e_0)
return y
def add_char_radiation(s, method="fraction_above_poly"):
"""
Adds characteristic radiation to a calculated bremsstrahlung spectrum, assuming it is a tungsten-generated spectrum
If a discrete component already exists in the spectrum, it is replaced.
Args:
s (:obj:`Spectrum`): The spectrum whose discrete component is recalculated.
method (str): The method to use to calculate the discrete component. Available methods include:
* 'fraction_above_linear': Use a linear relation between bremsstrahlung above the K-edge and peaks.
* 'fraction_above_poly': Use polynomial fits between bremsstrahlung above the K-edge and peaks.
"""
s.discrete = []
if s.x[-1] < 69.51: # If under k edge, no char radiation
return
f = s.get_continuous_function()
norm = integrate.quad(f, s.x[0], s.x[-1], limit=2000)[0]
fraction_above = integrate.quad(f, 74, s.x[-1], limit=2000)[0] / norm
if method == "fraction_above_linear":
s.discrete.append([58.65, 0.1639 * fraction_above * norm, 1])
s.discrete.append([67.244, 0.03628 * fraction_above * norm, 1])
s.discrete.append([69.067, 0.01410 * fraction_above * norm, 1])
else:
if method != "fraction_above_poly":
print(
"WARNING: Unknown char radiation calculation method. Using fraction_above_poly")
s.discrete.append([58.65, (0.1912 * fraction_above - 0.00615 *
fraction_above ** 2 - 0.1279 * fraction_above ** 3) * norm, 1])
s.discrete.append([67.244, (0.04239 * fraction_above + 0.002003 *
fraction_above ** 2 - 0.02356 * fraction_above ** 3) * norm, 1])
s.discrete.append([69.067, (0.01437 * fraction_above + 0.002346 *
fraction_above ** 2 - 0.009332 * fraction_above ** 3) * norm, 1])
return
def console_monitor(a, b):
"""
Simple monitor function which can be used with :obj:`calculate_spectrum`.
Prints in stdout 'a/b'.
Args:
a: An object representing the completed amount (e.g., a number representing a part...).
b: An object representing the total amount (... of a number representing a total).
"""
print("Calculation: ", a, "/", b)
def calculate_spectrum_mesh(e_0, theta, mesh, phi=0.0, epsrel=0.2, monitor=console_monitor, z=74):
"""
Calculates the x-ray spectrum for given parameters.
Characteristic peaks are also calculated by add_char_radiation, which is called with the default parameters.
Args:
e_0 (float): Electron kinetic energy in keV
theta (float): X-ray emission angle in degrees, the normal being at 90º
mesh (list of float or ndarray): The photon energies where the integral will be evaluated
phi (float): X-ray emission elevation angle in degrees.
epsrel (float): The tolerance parameter used in numeric integration.
monitor: A function to be called after each iteration with arguments finished_count, total_count. See for example :obj:`console_monitor`.
z (int): Atomic number of the material.
Returns:
:obj:`Spectrum`: The calculated spectrum
"""
# Prepare spectrum
s = Spectrum()
s.x = mesh
mesh_len = len(mesh)
# Prepare integrand function
fluence = get_fluence(e_0)
cs = get_cs(e_0, z=z)
mu = get_mu_csda(e_0, z=z)
# quad may raise warnings about the numerical integration method,
# which are related to the estimated accuracy. Since this is not relevant,
# they are suppressed.
warnings.simplefilter("ignore")
for i, e_g in enumerate(s.x):
s.y.append(integrate_source(fluence, cs, mu, theta, e_g, e_0, phi=phi, epsrel=epsrel, z=z))
if monitor is not None:
monitor(i + 1, mesh_len)
if z == 74:
add_char_radiation(s)
return s
def calculate_spectrum(e_0, theta, e_min, num_e, phi=0.0, epsrel=0.2, monitor=console_monitor, z=74):
"""
Calculates the x-ray spectrum for given parameters.
Characteristic peaks are also calculated by add_char_radiation, which is called with the default parameters.
Args:
e_0 (float): Electron kinetic energy in keV
theta (float): X-ray emission angle in degrees, the normal being at 90º
e_min (float): Minimum kinetic energy to calculate in the spectrum in keV
num_e (int): Number of points to calculate in the spectrum
phi (float): X-ray emission elevation angle in degrees.
epsrel (float): The tolerance parameter used in numeric integration.
monitor: A function to be called after each iteration with arguments finished_count, total_count. See for example :obj:`console_monitor`.
z (int): Atomic number of the material.
Returns:
:obj:`Spectrum`: The calculated spectrum
"""
return calculate_spectrum_mesh(e_0, theta, np.linspace(e_min, e_0, num=num_e, endpoint=True), phi=phi,
epsrel=epsrel, monitor=monitor, z=z)
def cli():
import argparse
import sys
parser = argparse.ArgumentParser(description='Calculate a bremsstrahlung spectrum.')
parser.add_argument('e_0', metavar='E0', type=float,
help='Electron kinetic energy in keV')
parser.add_argument('theta', metavar='theta', type=float, default=12,
help="X-ray emission angle in degrees, the anode's normal being at 90º.")
parser.add_argument('--phi', metavar='phi', type=float, default=0,
help="X-ray emission altitude in degrees, the anode's normal being at 0º.")
parser.add_argument('--z', metavar='z', type=int, default=74,
help="Atomic number of the material (characteristic radiation is only available for z=74).")
parser.add_argument('--e_min', metavar='e_min', type=float, default=3.0,
help="Minimum kinetic energy in keV in the bremsstrahlung calculation.")
parser.add_argument('--n_points', metavar='n_points', type=int, default=50,
help="Number of points used in the bremsstrahlung calculation.")
parser.add_argument('--mesh', metavar='e_i', type=float, nargs='+',
help="Energy mesh where the bremsstrahlung will be calculated. "
"Overrides e_min and n_points parameters.")
parser.add_argument('--epsrel', metavar='tolerance', type=float, default=0.5,
help="Numerical tolerance in integration.")
parser.add_argument('-o', '--output', metavar='path', type=str,
help="Output file. Available formats are csv, xlsx, and pkl, selected by the file extension. "
"pkl appends objects using the pickle module. Note you have to import the Spectrum class "
" INTO THE NAMESPACE (e.g., from xpecgen.xpecgen import Spectrum) to load them. "
"If this argument is not provided, points are written to the standard output and "
"calculation monitor is not displayed.")
parser.add_argument('--overwrite', action="store_true",
help="If this flag is set and the output is a pkl file, overwrite its content instead of "
"appending.")
args = parser.parse_args()
if args.output is not None:
if "." not in args.output:
print("Output file format unknown", file=sys.stderr)
exit(-1)
else:
ext = args.output.split(".")[-1].lower()
if ext not in ["csv", "xlsx", "pkl"]:
print("Output file format unknown", file=sys.stderr)
exit(-1)
monitor = console_monitor
else:
monitor = None
if args.mesh is None:
mesh = np.linspace(args.e_min, args.e_0, num=args.n_points, endpoint=True)
else:
mesh = args.mesh
s = calculate_spectrum_mesh(args.e_0, args.theta, mesh, phi=args.phi, epsrel=args.epsrel, monitor=monitor, z=args.z)
x2, y2 = s.get_points()
if args.output is None:
[print("%.6g, %.6g" % (x, y)) for x, y in zip(x2, y2)]
elif ext == "csv":
s.export_csv(args.output)
elif ext == "xlsx":
s.export_xlsx(args.output)
elif ext == "pkl":
import pickle
print(args.overwrite)
if args.overwrite:
mode = "wb"
else:
mode = "ab"
with open(args.output, mode) as output:
pickle.dump(s, output, pickle.HIGHEST_PROTOCOL)
if __name__ == '__main__':
cli()
| gpl-3.0 |
rempferg/espresso | samples/python/visualization.py | 4 | 6984 | #
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import espressomd._system as es
import espressomd
from espressomd import thermostat
from espressomd import code_info
from espressomd import integrate
from espressomd import visualization
import numpy
from matplotlib import pyplot
from threading import Thread
print("""
=======================================================
= lj_liquid.py =
=======================================================
Program Information:""")
print(code_info.features())
dev = "cpu"
# System parameters
#############################################################
# 10 000 Particles
box_l = 10.7437
density = 0.7
# Interaction parameters (repulsive Lennard Jones)
#############################################################
lj_eps = 1.0
lj_sig = 1.0
lj_cut = 1.12246
lj_cap = 20
# Integration parameters
#############################################################
system = espressomd.System()
system.time_step = 0.001
system.skin = 0.4
#es._espressoHandle.Tcl_Eval('thermostat langevin 1.0 1.0')
system.thermostat.set_langevin(kT=1.0, gamma=1.0)
# warmup integration (with capped LJ potential)
warm_steps = 100
warm_n_times = 30
# do the warmup until the particles have at least the distance min__dist
min_dist = 0.9
# integration
int_steps = 10
int_n_times = 50000
#############################################################
# Setup System #
#############################################################
# Interaction setup
#############################################################
system.box_l = [box_l, box_l, box_l]
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig,
cutoff=lj_cut, shift="auto")
system.non_bonded_inter.set_force_cap(lj_cap)
print("LJ-parameters:")
print(system.non_bonded_inter[0, 0].lennard_jones.get_params())
# Particle setup
#############################################################
volume = box_l * box_l * box_l
n_part = int(volume * density)
for i in range(n_part):
system.part.add(id=i, pos=numpy.random.random(3) * system.box_l)
system.analysis.distto(0)
print("Simulate {} particles in a cubic simulation box {} at density {}."
.format(n_part, box_l, density).strip())
print("Interactions:\n")
act_min_dist = system.analysis.mindist()
print("Start with minimal distance {}".format(act_min_dist))
system.max_num_cells = 2744
#Switch between openGl/Mayavi
visualizer = visualization.mayaviLive(system)
#visualizer = visualization.openGLLive(system)
#############################################################
# Warmup Integration #
#############################################################
# open Observable file
obs_file = open("pylj_liquid.obs", "w")
obs_file.write("# Time\tE_tot\tE_kin\tE_pot\n")
print("""
Start warmup integration:
At maximum {} times {} steps
Stop if minimal distance is larger than {}
""".strip().format(warm_n_times, warm_steps, min_dist))
# set LJ cap
lj_cap = 20
system.non_bonded_inter.set_force_cap(lj_cap)
print(system.non_bonded_inter[0, 0].lennard_jones)
# Warmup Integration Loop
i = 0
while (i < warm_n_times and act_min_dist < min_dist):
integrate.integrate(warm_steps)
# Warmup criterion
act_min_dist = system.analysis.mindist()
# print("\rrun %d at time=%f (LJ cap=%f) min dist = %f\r" % (i,system.time,lj_cap,act_min_dist), end=' ')
i += 1
# Increase LJ cap
lj_cap = lj_cap + 10
system.non_bonded_inter.set_force_cap(lj_cap)
visualizer.update()
# Just to see what else we may get from the c code
print("""
ro variables:
cell_grid {0.cell_grid}
cell_size {0.cell_size}
local_box_l {0.local_box_l}
max_cut {0.max_cut}
max_part {0.max_part}
max_range {0.max_range}
max_skin {0.max_skin}
n_nodes {0.n_nodes}
n_part {0.n_part}
n_part_types {0.n_part_types}
periodicity {0.periodicity}
transfer_rate {0.transfer_rate}
verlet_reuse {0.verlet_reuse}
""".format(system))
# write parameter file
set_file = open("pylj_liquid.set", "w")
set_file.write("box_l %s\ntime_step %s\nskin %s\n" %
(box_l, system.time_step, system.skin))
#############################################################
# Integration #
#############################################################
print("\nStart integration: run %d times %d steps" % (int_n_times, int_steps))
# remove force capping
lj_cap = 0
system.non_bonded_inter.set_force_cap(lj_cap)
print(system.non_bonded_inter[0, 0].lennard_jones)
# print initial energies
energies = system.analysis.energy()
print(energies)
plot, = pyplot.plot([0],[energies['total']], label="total")
pyplot.xlabel("Time")
pyplot.ylabel("Energy")
pyplot.legend()
pyplot.show(block=False)
j = 0
def main_loop():
global energies
print("run %d at time=%f " % (i, system.time))
integrate.integrate(int_steps)
visualizer.update()
energies = system.analysis.energy()
print(energies)
plot.set_xdata(numpy.append(plot.get_xdata(), system.time))
plot.set_ydata(numpy.append(plot.get_ydata(), energies['total']))
obs_file.write('{ time %s } %s\n' % (system.time, energies))
linear_momentum = system.analysis.analyze_linear_momentum()
print(linear_momentum)
def main_thread():
for i in range(0, int_n_times):
main_loop()
last_plotted = 0
def update_plot():
global last_plotted
current_time = plot.get_xdata()[-1]
if last_plotted == current_time:
return
last_plotted = current_time
pyplot.xlim(0, plot.get_xdata()[-1])
pyplot.ylim(plot.get_ydata().min(), plot.get_ydata().max())
pyplot.draw()
t = Thread(target=main_thread)
t.daemon = True
t.start()
visualizer.registerCallback(update_plot, interval=2000)
visualizer.start()
# write end configuration
end_file = open("pylj_liquid.end", "w")
end_file.write("{ time %f } \n { box_l %f }\n" % (system.time, box_l))
end_file.write("{ particles {id pos type} }")
for i in range(n_part):
end_file.write("%s\n" % system.part[i].pos)
# id & type not working yet
obs_file.close()
set_file.close()
end_file.close()
# terminate program
print("\nFinished.")
| gpl-3.0 |
uzh-rpg/rpg_svo | svo_analysis/src/svo_analysis/analyse_trajectory.py | 17 | 8764 | #!/usr/bin/python
import os
import yaml
import argparse
import numpy as np
import matplotlib.pyplot as plt
import svo_analysis.tum_benchmark_tools.associate as associate
import vikit_py.transformations as transformations
import vikit_py.align_trajectory as align_trajectory
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Cardo']})
rc('text', usetex=True)
def plot_translation_error(timestamps, translation_error, results_dir):
fig = plt.figure(figsize=(8, 2.5))
ax = fig.add_subplot(111, xlabel='time [s]', ylabel='position drift [mm]', xlim=[0,timestamps[-1]-timestamps[0]+4])
ax.plot(timestamps-timestamps[0], translation_error[:,0]*1000, 'r-', label='x')
ax.plot(timestamps-timestamps[0], translation_error[:,1]*1000, 'g-', label='y')
ax.plot(timestamps-timestamps[0], translation_error[:,2]*1000, 'b-', label='z')
ax.legend()
fig.tight_layout()
fig.savefig(results_dir+'/translation_error.pdf')
def plot_rotation_error(timestamps, rotation_error, results_dir):
fig = plt.figure(figsize=(8, 2.5))
ax = fig.add_subplot(111, xlabel='time [s]', ylabel='orientation drift [rad]', xlim=[0,timestamps[-1]-timestamps[0]+4])
ax.plot(timestamps-timestamps[0], rotation_error[:,0], 'r-', label='yaw')
ax.plot(timestamps-timestamps[0], rotation_error[:,1], 'g-', label='pitch')
ax.plot(timestamps-timestamps[0], rotation_error[:,2], 'b-', label='roll')
ax.legend()
fig.tight_layout()
fig.savefig(results_dir+'/orientation_error.pdf')
def analyse_synthetic_trajectory(results_dir):
data = np.loadtxt(os.path.join(results_dir, 'translation_error.txt'))
timestamps = data[:,0]
translation_error = data[:,1:4]
plot_translation_error(timestamps, translation_error, results_dir)
# plot orientation error
data = np.loadtxt(os.path.join(results_dir, 'orientation_error.txt'))
timestamps = data[:,0]
orientation_error = data[:,1:4]
plot_rotation_error(timestamps, orientation_error, results_dir)
def analyse_optitrack_trajectory_with_hand_eye_calib(results_dir, params, n_align_frames = 200):
print('loading hand-eye-calib')
T_cm_quat = np.array([params['hand_eye_calib']['Tcm_qx'],
params['hand_eye_calib']['Tcm_qy'],
params['hand_eye_calib']['Tcm_qz'],
params['hand_eye_calib']['Tcm_qw']])
T_cm_tran = np.array([params['hand_eye_calib']['Tcm_tx'],
params['hand_eye_calib']['Tcm_ty'],
params['hand_eye_calib']['Tcm_tz']])
T_cm = get_rigid_body_trafo(T_cm_quat, T_cm_tran)
T_mc = transformations.inverse_matrix(T_cm)
t_es, p_es, q_es, t_gt, p_gt, q_gt = load_dataset(results_dir, params['cam_delay'])
# align Sim3 to get scale
print('align Sim3 using '+str(n_align_frames)+' first frames.')
scale,rot,trans = align_trajectory.align_sim3(p_gt[0:n_align_frames,:], p_es[0:n_align_frames,:])
print 'scale = '+str(scale)
# get trafo between (v)ision and (o)ptitrack frame
print q_gt[0,:]
print p_gt[0,:]
T_om = get_rigid_body_trafo(q_gt[0,:], p_gt[0,:])
T_vc = get_rigid_body_trafo(q_es[0,:], scale*p_es[0,:])
T_cv = transformations.inverse_matrix(T_vc)
T_ov = np.dot(T_om, np.dot(T_mc, T_cv))
print 'T_ov = ' + str(T_ov)
# apply transformation to estimated trajectory
q_es_aligned = np.zeros(np.shape(q_es))
rpy_es_aligned = np.zeros(np.shape(p_es))
rpy_gt = np.zeros(np.shape(p_es))
p_es_aligned = np.zeros(np.shape(p_es))
for i in range(np.shape(p_es)[0]):
T_vc = get_rigid_body_trafo(q_es[i,:],p_es[i,:])
T_vc[0:3,3] *= scale
T_om = np.dot(T_ov, np.dot(T_vc, T_cm))
p_es_aligned[i,:] = T_om[0:3,3]
q_es_aligned[i,:] = transformations.quaternion_from_matrix(T_om)
rpy_es_aligned[i,:] = transformations.euler_from_quaternion(q_es_aligned[i,:], 'rzyx')
rpy_gt[i,:] = transformations.euler_from_quaternion(q_gt[i,:], 'rzyx')
# plot position error (drift)
translation_error = (p_gt-p_es_aligned)
plot_translation_error(t_es, translation_error, results_dir)
# plot orientation error (drift)
orientation_error = (rpy_gt - rpy_es_aligned)
plot_rotation_error(t_es, orientation_error, results_dir)
# plot scale drift
motion_gt = np.diff(p_gt, 0)
motion_es = np.diff(p_es_aligned, 0)
dist_gt = np.sqrt(np.sum(np.multiply(motion_gt,motion_gt),1))
dist_es = np.sqrt(np.sum(np.multiply(motion_es,motion_es),1))
fig = plt.figure(figsize=(8,2.5))
ax = fig.add_subplot(111, xlabel='time [s]', ylabel='scale change [\%]', xlim=[0,t_es[-1]+4])
scale_drift = np.divide(dist_es,dist_gt)*100-100
ax.plot(t_es, scale_drift, 'b-')
fig.tight_layout()
fig.savefig(results_dir+'/scale_drift.pdf')
# plot trajectory
fig = plt.figure()
ax = fig.add_subplot(111, title='trajectory', aspect='equal', xlabel='x [m]', ylabel='y [m]')
ax.plot(p_es_aligned[:,0], p_es_aligned[:,1], 'b-', label='estimate')
ax.plot(p_gt[:,0], p_gt[:,1], 'r-', label='groundtruth')
ax.plot(p_es_aligned[0:n_align_frames,0], p_es_aligned[0:n_align_frames,1], 'g-', linewidth=2, label='aligned')
ax.legend()
fig.tight_layout()
fig.savefig(results_dir+'/trajectory.pdf')
def analyse_trajectory(results_dir, n_align_frames = 200, use_hand_eye_calib = True):
params = yaml.load(open(os.path.join(results_dir, 'dataset_params.yaml'),'r'))
if params['dataset_is_blender']:
analyse_synthetic_trajectory(results_dir)
elif use_hand_eye_calib:
analyse_optitrack_trajectory_with_hand_eye_calib(results_dir, params, n_align_frames)
else:
t_es, p_es, q_es, t_gt, p_gt, q_gt = load_dataset(results_dir, params['cam_delay'])
scale,rot,trans = align_trajectory.align_sim3(p_gt[0:n_align_frames,:], p_es[0:n_align_frames,:])
p_es_aligned = np.zeros(np.shape(p_es))
for i in range(np.shape(p_es)[0]):
p_es_aligned[i,:] = scale*rot.dot(p_es[i,:]) + trans
# plot position error (drift)
translation_error = (p_gt-p_es_aligned)
plot_translation_error(t_es, translation_error, results_dir)
def get_rigid_body_trafo(quat,trans):
T = transformations.quaternion_matrix(quat)
T[0:3,3] = trans
return T
def load_dataset(results_dir, cam_delay):
print('loading dataset in '+results_dir)
print('cam_delay = '+str(cam_delay))
data_gt = open(os.path.join(results_dir, 'groundtruth.txt')).read()
lines = data_gt.replace(","," ").replace("\t"," ").split("\n")
data_gt = np.array([[np.float(v.strip()) for i,v in enumerate(line.split(" ")) if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"])
#data_gt = np.array([[np.float(v.strip()) for i,v in enumerate(line.split(" ")) if i != 1 and v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"])
data_gt = [(float(l[0]),l[1:]) for l in data_gt]
data_gt = dict(data_gt)
data_es = open(os.path.join(results_dir, 'traj_estimate.txt')).read()
lines = data_es.replace(","," ").replace("\t"," ").split("\n")
data_es = np.array([[np.float(v.strip()) for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"])
data_es = [(float(l[0]),l[1:]) for l in data_es]
data_es = dict(data_es)
matches = associate.associate(data_gt, data_es, -cam_delay, 0.02)
p_gt = np.array([[np.float(value) for value in data_gt[a][0:3]] for a,b in matches])
q_gt = np.array([[np.float(value) for value in data_gt[a][3:7]] for a,b in matches])
p_es = np.array([[np.float(value) for value in data_es[b][0:3]] for a,b in matches])
q_es = np.array([[np.float(value) for value in data_es[b][3:7]] for a,b in matches])
t_gt = np.array([np.float(a) for a,b in matches])
t_es = np.array([np.float(b) for a,b in matches])
# set start time to zero
start_time = min(t_es[0], t_gt[0])
t_es -= start_time
t_gt -= start_time
return t_es, p_es, q_es, t_gt, p_gt, q_gt
if __name__ == '__main__':
# parse command line
parser = argparse.ArgumentParser(description='''
Analyse trajectory
''')
parser.add_argument('results_dir', help='folder with the results')
parser.add_argument('--use_hand_eye_calib', help='', action='store_true')
parser.add_argument('--n_align_frames', help='', default=200)
args = parser.parse_args()
print('analyse trajectory for dataset: '+str(args.results_dir))
analyse_trajectory(args.results_dir,
n_align_frames = int(args.n_align_frames),
use_hand_eye_calib = args.use_hand_eye_calib) | gpl-3.0 |
manub686/atomix | r2cmplr/_program_analyzer.py | 1 | 19361 | '''
Atomix project, _program_analyzer.py, (TODO: summary)
Copyright (c) 2015 Stanford University
Released under the Apache License v2.0. See the LICENSE file for details.
Author(s): Manu Bansal
'''
from _db import *
from _codegen_write_out import *
from _util import *
from collections import Counter
import networkx as nx
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
DELTA = 1e-9
class ProgramConstructor:
#-----------------------------
# construct the data flowgraph
# (atoms, fifos and wires)
#-----------------------------
#add all atoms in the program as nodes of the dfg
def add_atoms(self, ATOM_DB):
self.logger.info("adding atoms")
P = self.P
for row in ATOM_DB:
print row
atom, block, core, inp_ports, out_ports = tuple(row)
inp_ports_list = []
out_ports_list = []
if inp_ports:
inp_ports_list = inp_ports.split(',')
if out_ports:
out_ports_list = out_ports.split(',')
print atom, block, inp_ports_list, out_ports_list
P.add_node(atom, block=block, inp_ports=inp_ports_list, out_ports=out_ports_list)
atoms = [row[0] for row in ATOM_DB]
self.atoms = atoms
self.ATOM_DB = ATOM_DB
#add all fifos in the program as nodes of the dfg
def add_fifos(self, FIFO_DB):
self.logger.info("adding fifos")
P = self.P
for row in FIFO_DB:
#print row
fifo, nbufs, fifotype, core = tuple(row)
print fifo, nbufs, core
P.add_node(fifo, nbufs=int(nbufs))
fifos = [row[0] for row in FIFO_DB]
self.fifos = fifos
self.FIFO_DB = FIFO_DB
#add all wires as edges going between atoms and fifos in the dfg
def add_wires(self, WIRE_DB):
self.logger.info("adding wires")
P = self.P
self.edges = []
for row in WIRE_DB:
print row
(wired_atom, wired_fifos),(_, orig_wired_fifos) = tuple(row)
print "wired_fifos =|%s|" % wired_fifos
wired_fifos = wired_fifos.split(',')
orig_wired_fifos = orig_wired_fifos.split(',')
print "wired_fifos =|%s|" % wired_fifos
atom = P.node[wired_atom]
inp_ports = atom["inp_ports"]
out_ports = atom["out_ports"]
ninp = len(inp_ports)
nout = len(out_ports)
print inp_ports, out_ports, ninp, nout
wired_inps = wired_fifos[0:ninp]
wired_outs = wired_fifos[ninp:]
orig_wired_inps = orig_wired_fifos[0:ninp]
orig_wired_outs = orig_wired_fifos[ninp:]
#print wired_atom, wired_fifos, atom
#print wired_atom, wired_fifos, inp_ports, out_ports, "wired_inps:", wired_inps, "wired_outs:", wired_outs
print wired_atom, "wired_inps:", wired_inps, "wired_outs:", wired_outs
print wired_atom, "orig_wired_inps:", orig_wired_inps, "orig_wired_outs:", orig_wired_outs
#if wired_atom == 'jumpToRxDataFinish1':
# raw_input()
#wired_inp_fifos = [P.node[fifo] for fifo in wired_inps]
#wired_out_fifos = [P.node[fifo] for fifo in wired_outs]
#inp_edges = [(fifo,atom) for fifo in wired_inp_fifos]
#out_edges = [(atom,fifo) for fifo in wired_out_fifos]
# if wired_fifos != orig_wired_fifos:
# raw_input()
inp_edges = []
inp_edges_weighted = []
if wired_inps != orig_wired_inps:
for fifo, orig_fifo in zip(wired_inps, orig_wired_inps):
#if orig_fifo.endswith(">"):
#print fifo, orig_fifo
#raw_input()
if fifo != orig_fifo:
op = orig_fifo[-1]
print "op = ", op
#raw_input()
if op == "+" or op == "*":
inp_edges.append((fifo,wired_atom,1))
inp_edges.append((wired_atom,fifo,1))
elif op == ">":
inp_edges_weighted.append((fifo,wired_atom,0.5))
#print inp_edges_weighted
#raw_input()
elif op == "<":
inp_edges_weighted.append((fifo,wired_atom,0.5))
elif op == ")":
inp_edges_weighted.append((fifo,wired_atom,0.4))
elif op == "(":
inp_edges_weighted.append((fifo,wired_atom,0.6))
elif op == "[":
inp_edges_weighted.append((fifo,wired_atom,0.6))
inp_edges_weighted.append((wired_atom,fifo,1))
else:
print "unhandled case"
sys.exit(1)
else:
inp_edges.append((fifo,wired_atom,1))
else:
inp_edges = [(fifo,wired_atom,1) for fifo in wired_inps]
out_edges = []
out_edges_weighted = []
if wired_outs != orig_wired_outs:
for fifo, orig_fifo in zip(wired_outs, orig_wired_outs):
if fifo != orig_fifo:
op = orig_fifo[-1]
print "op = ", op
if op == "+" or op == "*":
out_edges.append((fifo,wired_atom,1))
out_edges.append((wired_atom,fifo,1))
elif op == ">":
out_edges_weighted.append((wired_atom,fifo,0.5))
elif op == "<":
out_edges_weighted.append((wired_atom,fifo,0.5))
elif op == ")":
out_edges_weighted.append((wired_atom,fifo,0.4))
elif op == "(":
out_edges_weighted.append((wired_atom,fifo,0.6))
else:
print "unhandled case"
sys.exit(1)
else:
out_edges.append((wired_atom,fifo,1))
else:
out_edges = [(wired_atom,fifo,1) for fifo in wired_outs]
edges = inp_edges + out_edges
weighted_edges = inp_edges_weighted + out_edges_weighted
edges += weighted_edges
print edges
ws = [w for u,v,w in edges]
#if any([w != 1 for w in ws]):
# raw_input()
#P.add_edges_from(edges)
#P.add_weighted_edges_from(edges)
#P.add_weighted_edges_from(weighted_edges)
P.add_weighted_edges_from(edges)
#self.edges += edges
#self.weighted_edges += weighted_edges
self.WIRE_DB = WIRE_DB
def add_conf(self, CONF_DB):
for row in CONF_DB:
print row
self.CONF_DB = CONF_DB
#---------------------------------
# construct the control flow graph
# (actions and states)
#---------------------------------
#for each state, collect actions (flowgraphs) and statep's belonging to same
#state --> these are the nodes of the flowgraph graphs
def add_actions(self, AXN_DB):
self.logger.info("adding actions")
#P = self.P
#for row in AXN_DB:
# print row
self.AXN_DB = AXN_DB
adb = AXN_DB
adbd = {}
adbd['noaxn'] = []
for row in adb:
axn, atoms = tuple(row)
adbd[axn] = atoms
self.AXN_DB_DICT = adbd
#list all states
def add_states(self, STATE_DB, initStateName):
self.logger.info("adding states")
for state in STATE_DB:
print state, STATE_DB[state]
self.STATE_DB = STATE_DB
self.initStateName = initStateName
#---------------------------------
# analysis methods
#---------------------------------
def analyze(self):
self.find_dx_atoms()
#self.find_action_subgraphs()
#self.find_state_subgraphs()
paths = []
### TODO: move these out into a configuration file (maybe debug.txt)
### paths.append(['staRxPktInit', 'staRxLTFProc', 'staRxPktDetect', 'staRxPLCPDecodeSpec', 'staRxContinue', 'staRxFinish'])
### paths.append(['staRxPktInit', 'staRxLTFProc', 'staRxPktDetect', 'staRxPLCPDecodeSpec', 'staRxContinue', 'staRxFinish'])
#### paths.append(['staRxPktInit', 'staRxLTFProc', 'staRxPktDetect', 'staRxPLCPDecodeSpec', 'staRxDataDecode54m_setup', \
#### 'staRxDataDecode54m_c', 'staRxDataDecode54m_H', 'staRxDataDecode54m_Mp', 'staRxDataDecode54m_M', 'staRxDataDecode54m_M', \
#### 'staRxDataDecode54m_T', 'staRxDataDecode54m_f', 'staRxDataFinish', 'staTxAckInit', 'staTxAckPreambleSignal', \
#### 'staTxAck', 'staTxAckFinal', 'staRxFinish'])
### paths.append(['staRxPktInit', 'staRxLTFProc', 'staRxPktDetect', 'staRxPLCPDecodeSpec', 'staRxDataDecode54m_setup', \
### 'staRxDataDecode54m_H', 'staRxDataDecode54m_Mp', 'staRxDataDecode54m_M', 'staRxDataDecode54m_M', \
### 'staRxDataDecode54m_T', 'staRxDataDecode54m_f', 'staRxDataFinish', 'staTxAckInit', 'staTxAckPreambleSignal', \
### 'staTxAck', 'staTxAckFinal', 'staRxFinish'])
statepathnames = []
state_paths_index_file = "__data_conservation_state_paths_index.txt"
f = open(state_paths_index_file, "w")
for i, path in enumerate(paths):
#state_path_name = ""
f.write("%d %s\n" % (i, path))
for state in path:
#state_path_name += "+%s" % state
#statepathnames.append(state_path_name)
state_path_name = "path_%d" % i
subgraph = self.find_statepath_subgraph(path, state_path_name, True)
subgraph_name = state_path_name
mismatched_fifos = self.check_data_conservation(subgraph, subgraph_name)
self.draw_mismatched_fifos(subgraph, subgraph_name, mismatched_fifos)
f.close()
def draw_mismatched_fifos(self, subgraph, subgraph_name, mismatched_fifos):
for f in mismatched_fifos:
g = nx.MultiDiGraph()
in_edges = subgraph.in_edges(f,True,True)
out_edges = subgraph.out_edges(f,True,True)
edges = in_edges + out_edges
print "edges:", edges
g.add_edges_from(edges)
print "nodes:", g.nodes()
print "edges:", g.edges()
g_name = "__data_conservation_" + subgraph_name + "_mismatch_" + f + ".png"
self.draw_twocolor_(g, True, g_name, False, (10,10))
def check_data_conservation(self, G, subgraph_name):
n = G.nodes()
atoms = set(self.atoms).intersection(n)
fifos = set(self.fifos).intersection(n)
din_f = G.in_degree(fifos,weight='weight')
dout_f = G.out_degree(fifos,weight='weight')
dinout = {}
#print din_f
#print dout_f
#print dinout
outfile = "__data_conservation_%s.txt" % subgraph_name
of = open(outfile, "w")
mismatched_fifos = []
for f in fifos:
dinout[f] = (din_f[f], dout_f[f], din_f[f] - dout_f[f])
ind,outd,diff = dinout[f]
#print "%40s" % f, dinout[f]
cur = ""
if abs(diff) > DELTA:
cur = "<<"
mismatched_fifos.append(f)
print "%40s %5.1f, %5.1f, %5.1f %s" % (f, ind,outd,diff,cur)
of.write("%40s %5.1f, %5.1f, %5.1f %s\n" % (f, ind,outd,diff,cur))
of.close()
#raw_input()
return mismatched_fifos
def find_atom_subgraph(self, atoms_set, count_multiplicity=False):
if not count_multiplicity:
return self.find_atom_subgraph_no_multiplicity(atoms_set)
else:
return self.find_atom_subgraph_with_multiplicity(atoms_set)
def find_atom_subgraph_with_multiplicity(self, atoms_set):
G = self.P
#atoms = self.atoms
#fifos = self.fifos
print "atoms_set (with multiplicity):", atoms_set
print "----------------------------------------"
subgraph = nx.MultiDiGraph()
for atom in atoms_set:
#preds = G.predecessors(atom)
#succs = G.successors(atom)
#edges = [(p,atom) for p in preds]
#print "edges:", edges
#edges += [(atom,s) for s in succs]
#print "edges:", edges
#subgraph.add_edges_from(edges)
in_edges = G.in_edges(atom, True, True)
out_edges = G.out_edges(atom, True, True)
print "in_edges:", in_edges
print "out_edges:", out_edges
edges = in_edges + out_edges
# weights = [d['weight'] for _,_,_,d in edges]
# if any([w != 1 for w in weights]):
# raw_input()
#if atom == "agcStateDrainer":
# raw_input()
#subgraph.add_weighted_edges_from(edges)
#subgraph.add_edges_from(edges)
weighted_edges = [(u,v,d['weight']) for u,v,_,d in edges]
subgraph.add_weighted_edges_from(weighted_edges)
#note: by adding (u,v,w)'s instead of edges directly gets rid of
#edge identifier that nx produces. this allows us to add the same
#edge from the full graph multiple times in the subgraph. if, instead,
#we let the edge id stay, adding the same edge multiple times will
#not replicate it as desired.
print subgraph.nodes()
print subgraph.edges()
print '---------- printed subgraph nodes and edges -----------'
return subgraph
def find_atom_subgraph_no_multiplicity(self, atoms_set):
G = self.P
#atoms = self.atoms
#fifos = self.fifos
print "atoms_set (with multiplicity):", atoms_set
print "----------------------------------------"
preds = []
succs = []
for atom in atoms_set:
preds += G.predecessors(atom)
succs += G.successors(atom)
expanded_atoms_set = atoms_set + preds + succs
print "expanded_atoms_set:", expanded_atoms_set
print "----------------------------------------"
c = Counter(expanded_atoms_set)
print "counts:", c
print "----------------------------------------"
raw_input()
subgraph = G.subgraph(expanded_atoms_set)
return subgraph
def find_statepath_subgraph(self, path=[], state_path_name="", count_multiplicity=False):
if not path:
return
#sdb = self.STATE_DB
state_path_atoms = []
for state in path:
atoms = self.find_state_atoms(state)
state_path_atoms += atoms
print "state_path_atoms:", state_path_atoms
print "--------- printed state path atoms-----------"
atoms = state_path_atoms
subgraph = self.find_atom_subgraph(atoms, count_multiplicity)
## self.draw_simple_(subgraph)
plotfile = "__%s.png" % state_path_name
show_labels = True
show = False
self.draw_twocolor_(subgraph, show_labels, plotfile, show)
#if state.startswith('...'):
#self.draw_twocolor_(subgraph, show_labels, plotfile, show)
return subgraph
def find_state_atoms(self, state):
adbd = self.AXN_DB_DICT
sdb = self.STATE_DB
v = sdb[state]
sfgs_atoms = []
print state, v
#state_flowgraphs
sfgs_atomseqs = v.values()
sfgs = [sfg for sfg,_ in sfgs_atomseqs]
atomseqs = [atomseq for _,atomseq in sfgs_atomseqs]
print sfgs
print atomseqs
for fg in sfgs:
sfgs_atoms += adbd[fg]
state_atoms = []
for aseq in atomseqs:
state_atoms += aseq
print sfgs_atoms
print state_atoms
all_state_atoms = sfgs_atoms + state_atoms
print all_state_atoms
print "----printed all state atoms--------"
atoms = all_state_atoms
return atoms
def find_state_subgraphs(self):
sdb = self.STATE_DB
for state in sdb:
atoms = self.find_state_atoms(state)
subgraph = self.find_atom_subgraph(atoms)
## self.draw_simple_(subgraph)
plotfile = "__%s.png" % state
show_labels = True
show = False
self.draw_twocolor_(subgraph, show_labels, plotfile, show)
#if state.startswith('...'):
#self.draw_twocolor_(subgraph, show_labels, plotfile, show)
def find_action_subgraphs(self):
adb = self.AXN_DB
for row in adb:
print row
axn, atoms = tuple(row)
subgraph = self.find_atom_subgraph(atoms)
#print subgraph
## self.draw_simple_(subgraph)
plotfile = "__%s.png" % axn
show_labels = True
show = False
self.draw_twocolor_(subgraph, show_labels, plotfile, show)
#if axn.startswith('axnRxPLCPDecodeSpec'):
#self.draw_twocolor_(subgraph, show_labels, plotfile, show)
def find_dx_atoms(self):
## algorithm 1 ##
#this is problematic because some decision atoms feed into ff_kk* through
#transfer atoms, so they are not neighbors of ff_kk* nodes
#fifos = self.fifos
#kernel_fifos = [fifo for fifo in fifos if fifo.startswith("ff_kk")]
#print "kernel_fifos =", kernel_fifos
#dx_atoms = []
#for ff_kk in kernel_fifos:
# dx_atoms += G.predecessors(ff_kk)
#print dx_atoms
## algorithm 2 ##
#this one could become problematic if a decision atom output void type
#instead of full type Decision_t. for now, it suffices. it would be good
#to impose enforcement of Decision_t type for a decision atom to quality
#as a legit decision atom right in the language.
G = self.P
atoms = self.atoms
for atom in atoms:
print atom, G.node[atom]
#raw_input()
dx_atoms = [atom for atom in atoms if any([p.split(':')[0] == 'Decision_t' for p in G.node[atom]["out_ports"]])]
print dx_atoms
#raw_input()
cdb = self.CONF_DB
cdbd = {}
for conf in cdb:
k, v = tuple(conf)
cdbd[k] = v
for dxa in dx_atoms:
print dxa, cdbd[dxa]
self.dx_atoms = dx_atoms
#pp = G.predecessors('jumpToRxDataFinish1')
#ss = G.successors('jumpToRxDataFinish1')
#print pp, ss
#---------------------------------
# rendering methods
#---------------------------------
def draw(self):
#self.draw_simple()
self.draw_twocolor()
def draw_simple_(self, G):
nx.draw(G)
plt.show()
def draw_simple(self):
nx.draw(self.P)
plt.savefig("__program.png")
#plt.show()
def flattened_edge_weights(self, G):
edges_weights = {}
H = nx.DiGraph()
#for e in G.edges(G.nodes(), True, True):
#u, v, i, d = e
fifos = self.fifos
for e in G.edges():
print e
u, v = e
d = G.get_edge_data(u, v)
w = sum([vv['weight'] for vv in d.values()])
print u, v, d, w
H.add_edge(u,v,weight=w)
# edge_labels={(nodes[0],nodes[1]):'jj'})
edges = H.edges(H.nodes(), True)
print edges
edge_weights = {}
for u, v, d in edges:
#for e in edges:
# print e
e = (u, v)
w = d['weight']
er = (v, u)
if er in edge_weights:
wr = edge_weights[er]
if v in fifos:
ws = "a-->f: %.1f f-->a: %s" % (w, wr)
else:
ws = "a-->f: %s f-->a: %.1f" % (wr, w)
edge_weights[er] = ws
else:
edge_weights[e] = "%.1f" % w
print edge_weights
return edge_weights
def draw_twocolor_(self, G, show_labels=False, plotfile="", show = False, figsize=(30,30)):
plt.figure(figsize=figsize)
nodes = G.nodes()
ns = set(nodes)
atoms = ns.intersection(set(self.atoms))
fifos = ns.intersection(set(self.fifos))
labels = {}
if show_labels:
for n in nodes:
labels[n] = n
#pos=nx.spring_layout(G) # positions for all nodes
#pos=nx.spectral_layout(G) # positions for all nodes
pos=nx.circular_layout(G) # positions for all nodes
# nodes
nx.draw_networkx_nodes(G,pos,
nodelist=atoms,
node_color='r',
node_size=1000,
alpha=0.5)
nx.draw_networkx_nodes(G,pos,
nodelist=fifos,
node_color='b',
node_size=1000,
alpha=0.5)
# edges
nx.draw_networkx_edges(G,pos,width=1.0,alpha=0.5)
# nx.draw_networkx_edges(G,pos,
# edgelist=[(0,1),(1,2),(2,3),(3,0)],
# width=8,alpha=0.5,edge_color='r')
# nx.draw_networkx_edges(G,pos,
# edgelist=[(4,5),(5,6),(6,7),(7,4)],
# width=8,alpha=0.5,edge_color='b')
if labels:
#nx.draw_networkx_labels(G,pos,labels,font_size=16)
nx.draw_networkx_labels(G,pos,labels,font_size=10)
edge_labels = self.flattened_edge_weights(G)
nx.draw_networkx_edge_labels(G,pos,edge_labels)
# edge_labels={(nodes[0],nodes[1]):'jj'})
plt.axis('off')
if plotfile:
plt.savefig(plotfile) # save as png
if show:
plt.show()
def draw_twocolor(self):
G = self.P
plotfile = "__program.png"
self.draw_twocolor_(G, False, plotfile)
#---------------------------------
# constructor
#---------------------------------
def __init__(self, logger):
self.logger = logger
#top-level program of the app
P = nx.MultiDiGraph()
self.P = P
class R1CodeGenerator:
def gencode_r1(self, pc):
#sys.exit(1)
pass
def __init__(self, logger):
self.logger = logger
| apache-2.0 |